diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Descargar Discografia Completa Richard Clayderman Torrent Reljate con las Melodas Suaves y Emotivas del Piano.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Descargar Discografia Completa Richard Clayderman Torrent Reljate con las Melodas Suaves y Emotivas del Piano.md deleted file mode 100644 index 21b2414d1413eeff60965c28ab38a2074a9e348e..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Descargar Discografia Completa Richard Clayderman Torrent Reljate con las Melodas Suaves y Emotivas del Piano.md +++ /dev/null @@ -1,72 +0,0 @@ - -

Descargar Discografia Completa Richard Clayderman Torrent

-

If you are a fan of piano music, you have probably heard of Richard Clayderman, one of the most popular and successful pianists in the world. He has released more than 200 albums, sold over 150 million records, and performed in more than 80 countries. His music is soothing, elegant, and captivating, and it appeals to people of all ages and backgrounds.

-

But did you know that you can download his complete discography for free using torrent sites? Yes, you read that right. You can enjoy all of his albums, from his debut in 1977 to his latest releases in 2020, without spending a dime. All you need is a good internet connection, a torrent client, and some patience.

-

Descargar Discografia Completa Richard Clayderman Torrent


DOWNLOAD »»» https://byltly.com/2uKxqn



-

In this article, we will tell you everything you need to know about Richard Clayderman, why you should download his complete discography, and how to do it safely and easily. Let's get started!

-

Who is Richard Clayderman?

-

Richard Clayderman is the stage name of Philippe Pagès, a French pianist who was born on December 28, 1953 in Paris. He started playing the piano at a young age, following his father's footsteps, who was an accordion teacher. He entered the Conservatoire de Paris at the age of 12, where he won many awards and accolades.

-

However, his classical career was cut short by financial difficulties caused by his father's illness. He had to work as a bank clerk and an accompanist for pop singers to make ends meet. His big break came in 1976, when he was chosen by music producer Olivier Toussaint to record a piano ballad called "Ballade pour Adeline", composed by Paul de Senneville.

-

Descargar todos los álbumes de Richard Clayderman en Torrent
-Cómo bajar la discografía completa de Richard Clayderman gratis
-Richard Clayderman: el pianista más famoso del mundo - Descarga su música por Torrent
-Descargar Discografia Completa Richard Clayderman Mega
-Richard Clayderman: sus mejores canciones en Torrent - Descarga rápida y segura
-Descargar Discografia Completa Richard Clayderman 320 kbps
-Richard Clayderman: el rey del piano romántico - Descarga su discografía por Torrent
-Descargar Discografia Completa Richard Clayderman FLAC
-Richard Clayderman: una leyenda de la música instrumental - Descarga sus álbumes en Torrent
-Descargar Discografia Completa Richard Clayderman MP3
-Richard Clayderman: el mago del teclado - Descarga su colección completa por Torrent
-Descargar Discografia Completa Richard Clayderman ZIP
-Richard Clayderman: el maestro de la melodía - Descarga su obra completa en Torrent
-Descargar Discografia Completa Richard Clayderman RAR
-Richard Clayderman: el genio del piano francés - Descarga su discografía en Torrent
-Descargar Discografia Completa Richard Clayderman Online
-Richard Clayderman: el artista más vendido de la música clásica - Descarga su música por Torrent
-Descargar Discografia Completa Richard Clayderman Gratis
-Richard Clayderman: el compositor más prolífico del siglo XX - Descarga sus álbumes por Torrent
-Descargar Discografia Completa Richard Clayderman Full HD
-Richard Clayderman: el ídolo de millones de fans - Descarga su discografía por Torrent
-Descargar Discografia Completa Richard Clayderman Sin Registrarse
-Richard Clayderman: el creador de un estilo único - Descarga su música por Torrent
-Descargar Discografia Completa Richard Clayderman Sin Virus
-Richard Clayderman: el músico más versátil del mundo - Descarga sus álbumes por Torrent
-Descargar Discografia Completa Richard Clayderman Sin Publicidad
-Richard Clayderman: el embajador de la cultura francesa - Descarga su discografía por Torrent
-Descargar Discografia Completa Richard Clayderman Sin Límites
-Richard Clayderman: el virtuoso del piano moderno - Descarga su música por Torrent
-Descargar Discografia Completa Richard Clayderman Con Subtítulos
-Richard Clayderman: el innovador de la música instrumental - Descarga sus álbumes por Torrent
-Descargar Discografia Completa Richard Clayderman Con Carátulas
-Richard Clayderman: el fenómeno musical del siglo XXI - Descarga su discografía por Torrent
-Descargar Discografia Completa Richard Clayderman Con Extras
-Richard Clayderman: el maestro de la armonía - Descarga su música por Torrent
-Descargar Discografia Completa Richard Clayderman Con Bonus Tracks
-Richard Clayderman: el pionero de la música new age - Descarga sus álbumes por Torrent
-Descargar Discografia Completa Richard Clayderman Con Calidad Garantizada
-Richard Clayderman: el inspirador de generaciones de pianistas - Descarga su discografía por Torrent
-Descargar Discografia Completa Richard Clayderman Con Comentarios Del Artista
-Richard Clayderman: el referente de la música romántica - Descarga su música por Torrent
-Descargar Discografia Completa Richard Clayderman Con Letras De Las Canciones
-Richard Clayderman: el autor de más de 1000 temas originales - Descarga sus álbumes por Torrent
-Descargar Discografia Completa Richard Clayderman Con Notas Musicales
-Richard Clayderman: el intérprete de los grandes clásicos - Descarga su discografía por Torrent
-Descargar Discografia Completa Richard Clayderman Con Partituras Para Piano
-Richard Clayderman: el colaborador de grandes artistas - Descarga su música por Torrent
-Descargar Discografia Completa Richard Clayderman Con Videos Musicales

-

The song was an instant hit, selling over 22 million copies worldwide. It launched Clayderman's international career, and he adopted his stage name after his great-grandmother's last name. Since then, he has recorded hundreds of albums with original compositions by Toussaint and de Senneville, as well as instrumental versions of popular songs, movie soundtracks, ethnic music, and classical pieces.

-

A French pianist with a prolific discography

-

Richard Clayderman has one of the most extensive discographies in the music industry. He has released more than 200 albums in different languages and formats, including CDs, LPs, cassettes, DVDs, and digital downloads. He has also collaborated with other artists and orchestras, such as James Last, Francis Goya, The Royal Philharmonic Orchestra, and The London Symphony Orchestra.

-

Some of his most famous albums are:

- -

His albums cover a wide range of genres and themes, such as love songs, Christmas songs, movie themes, Broadway musicals, Chinese music, Latin music, rock music, jazz music, and more. He has also recorded tribute albums to artists like ABBA, The Beatles, The Carpenters

0a6ba089eb
-
-
\ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Interlok Driver Auto-tune Software and Learn from the Antares Tech Learning Center.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Interlok Driver Auto-tune Software and Learn from the Antares Tech Learning Center.md deleted file mode 100644 index b28e1b924b81376d6eb466b81eb34e2ada1cf461..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Interlok Driver Auto-tune Software and Learn from the Antares Tech Learning Center.md +++ /dev/null @@ -1,136 +0,0 @@ -
-

Download Interlok Driver Auto-tune Software: What You Need to Know

-

If you are looking for a tool that can run your code faster and more efficiently, you might want to check out Interlok Driver Auto-tune Software. This software is designed to optimize the performance of your hardware and software by automatically tuning the driver settings. In this article, we will explain what Interlok Driver Auto-tune Software is, how it works, and how you can download, install, and use it for your projects. We will also answer some frequently asked questions about this software and provide some tips and tricks for troubleshooting common problems.

-

What is Interlok Driver Auto-tune Software?

-

Interlok Driver Auto-tune Software is a tool that runs third-party applications faster and more efficiently by loading and running them with optimized driver settings. The software can analyze your hardware, software, and other potential issues that could slow down your workflows and provide solutions to improve them. The software can also identify, create, and diagnose issues within the factory settings of your hardware that runs a particular software. For example, you can use Interlok Driver Auto-tune Software to check whether your hardware running a driver can function properly or not.

-

Download Interlok Driver Auto-tune Software


Download Zip · https://byltly.com/2uKz7h



-

A brief introduction to the software and its features

-

Interlok Driver Auto-tune Software was developed by Antares Tech, a company that specializes in vocal processing and pitch correction software. The software is based on their flagship product, Auto-Tune, which is widely used by musicians and producers to correct vocal pitch and create vocal effects. Interlok Driver Auto-tune Software uses the same technology as Auto-Tune to tune your driver settings according to your needs.

-

Some of the features of Interlok Driver Auto-tune Software are:

- -

How does it work and what are the benefits?

-the screen. -
  • Click on the "Download and Install" button and follow the instructions on the screen.
  • -
  • You may need to restart your computer to complete the update.
  • - -

    To uninstall Interlok Driver Auto-tune Software, you need to follow these steps:

    -
      -
    1. Go to the "Control Panel" on your computer and select "Programs and Features".
    2. -
    3. Find and select "Interlok Driver Auto-tune Software" from the list of programs.
    4. -
    5. Click on the "Uninstall" button and follow the instructions on the screen.
    6. -
    7. You may need to restart your computer to complete the uninstallation.
    8. -
    -

    How to use Interlok Driver Auto-tune Software for your projects?

    -

    Interlok Driver Auto-tune Software can help you run your code faster and more efficiently for various applications, such as audio and video editing, gaming, design, etc. Here are some examples of how you can use Interlok Driver Auto-tune Software for your projects:

    -

    Examples of applications that can benefit from the software

    - -

    How to configure and customize the software settings

    -

    Interlok Driver Auto-tune Software allows you to configure and customize the software settings according to your preferences and needs. Here are some steps to configure and customize the software settings:

    -

    How to install Interlok driver for Auto-tune
    -Interlok driver setup guide for Auto-tune software
    -Where to find Interlok driver download link for Auto-tune
    -Interlok driver compatibility issues with Auto-tune software
    -Troubleshooting Interlok driver errors in Auto-tune
    -Best practices for using Interlok driver with Auto-tune software
    -Benefits of Interlok driver for Auto-tune performance and quality
    -Interlok driver update and maintenance for Auto-tune software
    -Interlok driver alternatives for Auto-tune software
    -Interlok driver reviews and ratings for Auto-tune software
    -Interlok driver features and specifications for Auto-tune software
    -Interlok driver license and activation for Auto-tune software
    -Interlok driver support and customer service for Auto-tune software
    -Interlok driver FAQs and tips for Auto-tune software
    -Interlok driver tutorials and videos for Auto-tune software
    -How to uninstall Interlok driver from Auto-tune software
    -How to fix Interlok driver not working with Auto-tune software
    -How to optimize Interlok driver settings for Auto-tune software
    -How to use Interlok driver with other audio software besides Auto-tune
    -How to transfer Interlok driver to a new computer with Auto-tune software
    -How to download Interlok driver for free with Auto-tune software
    -How to get Interlok driver discount or coupon code for Auto-tune software
    -How to buy Interlok driver online for Auto-tune software
    -How to contact Interlok driver developer or manufacturer for Auto-tune software
    -How to solve Interlok driver security or privacy issues with Auto-tune software
    -How to integrate Interlok driver with other plugins or tools for Auto-tune software
    -How to customize Interlok driver preferences or options for Auto-tune software
    -How to troubleshoot Interlok driver installation or download problems with Auto-tune software
    -How to compare Interlok driver with other drivers or software for Auto-tune
    -How to backup or restore Interlok driver data or files for Auto-tune software
    -How to upgrade or downgrade Interlok driver version for Auto-tune software
    -How to test or evaluate Interlok driver functionality or quality for Auto-tune software
    -How to access or manage Interlok driver account or profile for Auto-tune software
    -How to share or export Interlok driver results or outputs for Auto-tune software
    -How to import or load Interlok driver inputs or sources for Auto-tune software
    -How to enable or disable Interlok driver features or functions for Auto-tune software
    -How to monitor or track Interlok driver performance or usage for Auto-tune software
    -How to learn or master Interlok driver skills or techniques for Auto-tune software
    -How to improve or enhance Interlok driver effects or outcomes for Auto-tune software
    -How to avoid or prevent Interlok driver issues or errors for Auto-tune software
    -How to recover or repair Interlok driver damage or corruption for Auto-tune software
    -How to verify or validate Interlok driver authenticity or legitimacy for Auto-tune software
    -How to change or modify Interlok driver parameters or values for Auto-tune software
    -How to configure or adjust Interlok driver modes or levels for Auto-tune software
    -How to add or remove Interlok driver components or elements for Auto-tune software
    -How to create or generate Interlok driver reports or logs for Auto-tune software
    -How to edit or modify Interlok driver content or format for Auto-tune software
    -How to apply or use Interlok driver presets or templates for Auto-tune software
    -How to convert or transform Interlok driver formats or types for Auto-tune software
    -How to sync or connect Interlok driver devices or systems for Auto-tune software

    -
      -
    1. Launch Interlok Driver Auto-tune Software from your desktop or start menu.
    2. -
    3. Click on the "Settings" menu and select "Preferences".
    4. -
    5. You will see a window with various tabs and options that you can adjust.
    6. -
    7. You can change the language, theme, update frequency, notification settings, etc. of the software.
    8. -
    9. You can also create and manage different driver profiles for different applications. You can name, save, load, edit, or delete your driver profiles as you wish.
    10. -
    11. You can also enable or disable certain driver features or settings that may affect the performance or functionality of your applications. For example, you can enable or disable hardware acceleration, anti-aliasing, vertical sync, etc.
    12. -
    13. Once you are done with configuring and customizing the software settings, click on the "OK" button to save your changes.
    14. -
    -

    How to run and analyze your code with the software

    -

    To run and analyze your code with Interlok Driver Auto-tune Software, you need to follow these steps:

    -
      -
    1. Launch Interlok Driver Auto-tune Software from your desktop or start menu.
    2. -
    3. Click on the "File" menu and select "Open".
    4. -, you can select the Maya.exe file from your computer. -
    5. Click on the "Open" button and wait for the software to load and run the application.
    6. -
    7. You will see a window with the application running and a toolbar with various options and information.
    8. -
    9. You can use the toolbar to monitor and control the driver settings and the performance of the application. For example, you can see the CPU usage, GPU usage, memory usage, FPS, etc. of the application.
    10. -
    11. You can also use the toolbar to switch between different driver profiles, enable or disable certain driver features or settings, or run a stress test or a benchmark test on the application.
    12. -
    13. You can also use the toolbar to take screenshots, record videos, or save logs of the application.
    14. -
    15. Once you are done with running and analyzing your code with Interlok Driver Auto-tune Software, you can close the window and exit the software.
    16. -
    -

    Conclusion

    -

    Interlok Driver Auto-tune Software is a powerful and useful tool that can help you run your code faster and more efficiently for various applications. It can also help you improve the performance, stability, compatibility, and security of your hardware and software. It is easy to download, install, and use Interlok Driver Auto-tune Software for your projects. You can also configure and customize the software settings according to your preferences and needs. You can also run and analyze your code with Interlok Driver Auto-tune Software and get valuable insights and feedback on your workflows. If you are looking for a tool that can optimize your driver settings and enhance your productivity and creativity, you should definitely try Interlok Driver Auto-tune Software.

    -

    FAQs

    -

    What is the difference between Interlok Driver Auto-tune Software and other similar tools?

    -

    Interlok Driver Auto-tune Software is different from other similar tools in several ways. First, Interlok Driver Auto-tune Software is based on the technology of Auto-Tune, which is a renowned vocal processing and pitch correction software. This means that Interlok Driver Auto-tune Software can tune your driver settings with high accuracy and quality. Second, Interlok Driver Auto-tune Software supports various types of hardware and software, unlike some tools that only support specific devices or applications. Third, Interlok Driver Auto-tune Software provides a user-friendly interface that allows you to easily monitor and control the driver settings and the performance of your applications.

    -

    Is Interlok Driver Auto-tune Software safe and legal to use?

    -

    Yes, Interlok Driver Auto-tune Software is safe and legal to use. Interlok Driver Auto-tune Software is developed by Antares Tech, a reputable company that has been in the industry for over 20 years. Interlok Driver Auto-tune Software does not contain any viruses, malware, spyware, or other harmful components that may damage your computer or compromise your privacy. Interlok Driver Auto-tune Software is also licensed by PACE Anti-Piracy, which is a leading provider of anti-piracy solutions for software developers. This means that Interlok Driver Auto-tune Software is authorized and protected by PACE Anti-Piracy and does not violate any intellectual property rights or laws.

    -

    How much does Interlok Driver Auto-tune Software cost and where can I buy it?

    -

    Interlok Driver Auto-tune Software costs $99.00 USD for a single-user license. You can buy it from the official website of Antares Tech or from authorized resellers or distributors. You can also get a free trial version of Interlok Driver Auto-tune Software from the official website of Antares Tech. The free trial version allows you to use Interlok Driver Auto-tune Software for 14 days with full functionality.

    -

    How can I contact the support team if I have any questions or issues?

    -

    If you have any questions or issues regarding Interlok Driver Auto-tune Software, you can contact the support team of Antares Tech by filling out a support ticket on their website or by sending an email to support@antarestech.com. You can also visit their learning center or community forum to find helpful resources and tips on how to use Interlok Driver Auto-tune Software.

    -

    What are some alternative or complementary software that I can use with Interlok Driver Auto-tune Software?

    -

    Some alternative or complementary software that you can use with Interlok Driver Auto-tune Software are:

    - -

    0a6ba089eb
    -
    -
    \ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Canalis Hydra Urbano.md b/spaces/1gistliPinn/ChatGPT4/Examples/Canalis Hydra Urbano.md deleted file mode 100644 index f6a7ffe3985fa5c77c3f1954708aaea9f321cd28..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Canalis Hydra Urbano.md +++ /dev/null @@ -1,9 +0,0 @@ -
    -

    a section of the drawing can also be saved (the only type of section that can be saved in urbano) in the drawing by pressing the menu item “save section”. the saved section is automatically linked with a pipe in the drawing. it is not possible to save the section by itself.

    -

    the section of the drawing can also be saved (the only type of section that can be saved in urbano) in the drawing by pressing the menu item “save section”. the saved section is automatically linked with a pipe in the drawing. it is not possible to save the section by itself.

    -

    Canalis hydra urbano


    DOWNLOAD ✶✶✶ https://imgfil.com/2uy08U



    -

    the drawing 00 tutorial initial.dwg should be open in civil 3d with urbano 7 profile (after installation you should have appropriate icon on desktop). the system should be set to show multiple pipes in the drawing (show_multiple_pipes option in urbano configurator). define the first pipe, go to the drawing and save the section of the drawing by pressing the menu item “save section”. it will be saved to the last pipe in the drawing. go to the drawing and create the next pipe, show it in the drawing and save the section of the drawing by pressing the menu item “save section”. it will be saved to the next pipe in the drawing. you can repeat this process until you have defined all pipes.

    -

    in urbano, it is possible to view, define and edit the properties of pipes and sections. in the drawing, you can select pipes and sections by clicking on them. it is possible to change the pipe and section parameters by clicking on them. to edit, it is enough to select them and press the menu item “edit”.

    -

    urbano hydra is a powerful tool for epanet hydraulic calculation. this tool generates epanet hydraulic results in seconds without having epanet installed on your computer. epanet results can be reviewed in urbano hydra, and urbano hydra is fully integrated in the autocad environment. urbano hydra is free for non-commercial users.

    899543212b
    -
    -
    \ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Efi Colorproof Xf 4.5 Download Crack Software.md b/spaces/1gistliPinn/ChatGPT4/Examples/Efi Colorproof Xf 4.5 Download Crack Software.md deleted file mode 100644 index a868877d16c66b95b6478dda639d420dd908a0a0..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Efi Colorproof Xf 4.5 Download Crack Software.md +++ /dev/null @@ -1,110 +0,0 @@ - -

    Efi Colorproof Xf 4.5 Download Crack Software: A Complete Review

    - -

    If you are looking for a professional-level RIP software that can handle color management and proofing for your wide-format or superwide-format printer, you might want to consider Efi Colorproof Xf 4.5. This software is designed to help you produce accurate and consistent color across different devices and media types, as well as comply with industry standards such as ISO 12647-7 / 8 and G7. But what are the features and benefits of Efi Colorproof Xf 4.5, and how can you download it for free? In this article, we will give you a comprehensive review of Efi Colorproof Xf 4.5 download crack software, and show you how to get it without paying a dime.

    -

    efi colorproof xf 4.5 download crack software


    Download File ••• https://imgfil.com/2uy1If



    - -

    What is Efi Colorproof Xf 4.5?

    - -

    Efi Colorproof Xf 4.5 is a software solution developed by EFI, a leading company in the field of digital printing and imaging. It is part of the EFI Fiery XF family of products, which also includes Fiery XF Server and Fiery XF Client. Efi Colorproof Xf 4.5 is a RIP software that stands for raster image processor, which means it converts digital files into printable formats that can be understood by printers. Efi Colorproof Xf 4.5 is specifically designed for wide-format and superwide-format printers, such as inkjet, laser, and LED printers.

    - -

    Efi Colorproof Xf 4.5 enables you to manage the color supply chain from design to production to output, by providing you with all the tools to produce the best color possible. It allows you to create profiles for different devices and media types, calibrate your printer and monitor, optimize your output quality and consistency, and perform validation printing and contract proofing. It also supports a wide range of file formats, such as PDF, TIFF, JPEG, PSD, EPS, DCS2, PSB, and more.

    - -

    What are the benefits of Efi Colorproof Xf 4.5?

    - -

    Efi Colorproof Xf 4.5 offers many benefits for users who want to achieve high-quality color printing and proofing. Some of the main benefits are:

    - - - -

    How to download Efi Colorproof Xf 4.5 crack software for free?

    - -

    If you are interested in trying out Efi Colorproof Xf 4.5, you might be wondering how to download it for free. The official website of EFI offers a free trial version of Efi Colorproof Xf 4.5 that you can download and use for 30 days. However, if you want to use it beyond the trial period, you will need to purchase a license key that can cost thousands of dollars.

    - -

    Fortunately, there is a way to download Efi Colorproof Xf 4.5 crack software for free, without paying anything or risking any viruses or malware. All you need to do is follow these simple steps:

    -

    - -
      -
    1. Go to this link and click on the "Download" button.
    2. -
    3. Create a free account on Zedload.com by entering your email address and choosing a password.
    4. -
    5. Log in to your account and click on the "Download" button again.
    6. -
    7. Select one of the available download servers and wait for the file to be downloaded.
    8. -
    9. Extract the file using WinRAR or any other extraction tool.
    10. -
    11. Run the setup file and follow the instructions to install Efi Colorproof Xf 4.5 on your computer.
    12. -
    13. Copy the crack file from the crack folder and paste it into the installation directory of Efi Colorproof Xf 4.5.
    14. -
    15. Launch Efi Colorproof Xf 4.5 and enjoy using it for free!
    16. -
    - -

    Conclusion

    - -

    Efi Colorproof Xf 4.5 is a powerful and versatile software solution for professional color management and proofing for wide-format and superwide-format printers. It offers many features and benefits that can help you improve your printing quality and consistency, as well as comply with industry standards and customer expectations. If you want to try out Efi Colorproof Xf 4.5 without spending any money, you can download it for free from Zedload.com using the crack software method described above. However, we recommend that you use this method only for testing purposes, and not for commercial use. If you like Efi Colorproof Xf 4.5 and want to support its development, you should buy a license key from EFI or its authorized resellers.

    -

    Some of the drawbacks and risks of using Efi Colorproof Xf 4.5 crack software are:

    - - - -

    What are the alternatives to Efi Colorproof Xf 4.5 crack software?

    - -

    If you want to avoid the drawbacks and risks of using Efi Colorproof Xf 4.5 crack software, you have some alternatives that you can consider. Some of the alternatives are:

    - - - -

    Conclusion

    - -

    Efi Colorproof Xf 4.5 is a powerful and versatile RIP software solution for professional color management and proofing for wide-format and superwide-format printers. It offers many features and benefits that can help you improve your printing quality and consistency, as well as comply with industry standards and customer expectations. However, if you want to download Efi Colorproof Xf 4.5 crack software for free, you should be aware of the drawbacks and risks that come with it, such as legal, ethical, security, reliability, support, and quality issues. You should also consider some alternatives that can provide you with similar or better results without compromising your integrity or professionalism.

    -

    What are the reviews of Efi Colorproof Xf 4.5?

    - -

    Efi Colorproof Xf 4.5 has received many positive reviews from users who have used it for their color printing and proofing projects. Some of the common praises that users have given to Efi Colorproof Xf 4.5 are:

    - - - -

    However, Efi Colorproof Xf 4.5 also has some negative reviews from users who have encountered some problems or limitations with the software. Some of the common complaints that users have given to Efi Colorproof Xf 4.5 are:

    - - - -

    Overall, Efi Colorproof Xf 4.5 is a highly rated software solution that can meet the needs and expectations of most print professionals who want to achieve high-quality color printing and proofing. However, it also has some drawbacks and challenges that users should be aware of before using it.

    -

    What are some tips and tricks for using Efi Colorproof Xf 4.5?

    - -

    Efi Colorproof Xf 4.5 is a powerful and versatile software solution that can help you achieve high-quality color printing and proofing. However, to get the most out of it, you need to know some tips and tricks that can help you optimize your workflow and output. Here are some of them:

    - - - -

    Conclusion

    - -

    Efi Colorproof Xf 4.5 is a software solution that can help you manage color supply chain from design to production to output by providing you with all the tools to produce the best color possible. It allows you to produce ISO 12647-7 / 8 compliant validation printing and contract proofing and G7 compliant proofs on inkjet, laser and LED printers. However, if you want to download Efi Colorproof Xf 4.5 crack software for free, you should be aware of the drawbacks and risks that come with it, such as legal, ethical, security, reliability, support, and quality issues. You should also consider some alternatives that can provide you with similar or better results without compromising your integrity or professionalism. Finally, you should also know some tips and tricks that can help you optimize your workflow and output using Efi Colorproof Xf 4.5.

    -

    Conclusion

    - -

    Efi Colorproof Xf 4.5 is a software solution that can help you manage color supply chain from design to production to output by providing you with all the tools to produce the best color possible. It allows you to produce ISO 12647-7 / 8 compliant validation printing and contract proofing and G7 compliant proofs on inkjet, laser and LED printers. However, if you want to download Efi Colorproof Xf 4.5 crack software for free, you should be aware of the drawbacks and risks that come with it, such as legal, ethical, security, reliability, support, and quality issues. You should also consider some alternatives that can provide you with similar or better results without compromising your integrity or professionalism. Finally, you should also know some tips and tricks that can help you optimize your workflow and output using Efi Colorproof Xf 4.5.

    3cee63e6c2
    -
    -
    \ No newline at end of file diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Bus Simulator Ultimate Mod APK and Enjoy All Bus Unlocked.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Bus Simulator Ultimate Mod APK and Enjoy All Bus Unlocked.md deleted file mode 100644 index d462891a04c6777a8f1c89cebcdcfcc3cea978be..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Bus Simulator Ultimate Mod APK and Enjoy All Bus Unlocked.md +++ /dev/null @@ -1,113 +0,0 @@ - -

    Bus Simulator Ultimate Mod APK Unlocked All Bus: A Review

    -

    If you are a fan of simulation games, especially bus driving games, you might have heard of Bus Simulator Ultimate. This is one of the most popular and realistic bus simulator games on the market, with over 100 million downloads on Google Play Store. In this game, you can experience what it is like to be a bus driver, from driving on different roads and cities, to managing your own bus company and hiring other drivers. You can also customize your buses, create your own routes, and compete with other players online. But what if you want to enjoy the game without any limitations or restrictions? That's where Bus Simulator Ultimate Mod APK comes in handy. This is a modified version of the original game that gives you access to unlimited money, removed ads, and unlocked all buses and skins. In this article, we will review Bus Simulator Ultimate Mod APK and show you how to download and install it on your device.

    -

    bus simulator ultimate mod apk unlocked all bus


    Download Zip ––– https://urlin.us/2uSU5n



    -

    What is Bus Simulator Ultimate?

    -

    Bus Simulator Ultimate is a 3D bus simulation game developed by Zuuks Games, a Turkish game studio that specializes in simulation and racing games. The game was released in 2019 and has since received many updates and improvements. The game is available for both Android and iOS devices, as well as Windows PC.

    -

    In Bus Simulator Ultimate, you can choose from over 30 different buses, each with their own features and specifications. You can drive on realistic roads and environments, such as Germany, Turkey, Italy, France, Spain, USA, Brazil, Azerbaijan, Russia, China, Japan, Canada, Netherlands, and more. You can also create your own routes and share them with other players. You can manage your own bus company and hire other drivers to work for you. You can earn money by completing missions and challenges, as well as by transporting passengers safely and comfortably. You can use the money to buy new buses, upgrade your existing ones, or expand your business.

    -

    The game also features online multiplayer mode, where you can join or create a bus convoy with other players from around the world. You can chat with them, cooperate with them, or compete with them on the leaderboards. You can also join events and tournaments to win prizes and rewards.

    -

    bus simulator ultimate mod apk unlimited money and gold
    -bus simulator ultimate mod apk free download latest version
    -bus simulator ultimate mod apk all buses unlocked and upgraded
    -bus simulator ultimate mod apk offline play without internet
    -bus simulator ultimate mod apk hack cheats no root
    -bus simulator ultimate mod apk realistic graphics and sounds
    -bus simulator ultimate mod apk multiplayer mode with friends
    -bus simulator ultimate mod apk new maps and routes added
    -bus simulator ultimate mod apk premium features unlocked
    -bus simulator ultimate mod apk no ads and in-app purchases
    -bus simulator ultimate mod apk android 11 support
    -bus simulator ultimate mod apk obb data file download
    -bus simulator ultimate mod apk high fps and smooth gameplay
    -bus simulator ultimate mod apk custom skins and liveries
    -bus simulator ultimate mod apk easy controls and settings
    -bus simulator ultimate mod apk different weather and traffic conditions
    -bus simulator ultimate mod apk fun missions and challenges
    -bus simulator ultimate mod apk best simulation game of 2023
    -bus simulator ultimate mod apk create your own company and logo
    -bus simulator ultimate mod apk earn rewards and bonuses
    -bus simulator ultimate mod apk drive various types of buses
    -bus simulator ultimate mod apk support for gamepad and steering wheel
    -bus simulator ultimate mod apk realistic physics and damage system
    -bus simulator ultimate mod apk online leaderboards and achievements
    -bus simulator ultimate mod apk original soundtrack and radio stations
    -bus simulator ultimate mod apk low mb size and fast installation
    -bus simulator ultimate mod apk compatible with all devices and android versions
    -bus simulator ultimate mod apk unlimited fuel and maintenance
    -bus simulator ultimate mod apk travel across different countries and cities
    -bus simulator ultimate mod apk dynamic day and night cycle
    -bus simulator ultimate mod apk passenger mode and camera views
    -bus simulator ultimate mod apk improve your driving skills and ratings
    -bus simulator ultimate mod apk regular updates and bug fixes
    -bus simulator ultimate mod apk user-friendly interface and design
    -bus simulator ultimate mod apk full unlocked everything no verification
    -bus simulator ultimate mod apk unlimited gems and coins generator
    -bus simulator ultimate mod apk download from google play store or apkpure.com[^1^]
    -bus simulator ultimate mod apk enjoy the most realistic and immersive simulation experience ever

    -

    Features of Bus Simulator Ultimate

    -

    Bus Simulator Ultimate has many features that make it one of the best bus simulator games on the market. Here are some of them:

    -

    Realistic bus driving experience

    -

    The game offers a realistic bus driving experience with high-quality graphics, sound effects, and physics. You can feel the weight and size of the bus as you drive on different roads and terrains. You can also interact with various elements inside and outside the bus, such as the steering wheel, pedals, mirrors, doors, windows, lights, indicators, wipers, horn, radio, GPS, etc. You can also adjust the camera angle to suit your preference.

    -

    Multiple game modes and challenges

    -

    The game has multiple game modes and challenges for you to enjoy. You can play in free mode, where you can drive anywhere you want without any time limit or objective. You can also play in career mode, where you have to complete missions and tasks to earn money and reputation. You can also play in multiplayer mode, where you can join or create a bus convoy with other players online. You can also participate in events and tournaments to win prizes and rewards.

    -

    Customizable buses and routes

    -

    The game allows you to customize your buses and routes according to your liking. You can choose from over 30 different buses, each with their own features and specifications. You can also change the color, skin, license plate, logo, interior, and accessories of your buses. You can also create your own routes and share them with other players. You can select the starting and ending points, the stops, the traffic, the weather, the time, and the difficulty of your routes. You can also edit the existing routes and make them more challenging or fun.

    -

    Online multiplayer and leaderboards

    -

    The game also features online multiplayer mode, where you can join or create a bus convoy with other players from around the world. You can chat with them, cooperate with them, or compete with them on the leaderboards. You can also join events and tournaments to win prizes and rewards. You can also see the statistics and rankings of other players and compare your performance with them.

    -

    How to download and install Bus Simulator Ultimate Mod APK?

    -

    If you want to download and install Bus Simulator Ultimate Mod APK on your device, you need to follow some simple steps. Here they are:

    -

    Requirements and compatibility

    -

    Before you download and install Bus Simulator Ultimate Mod APK, you need to make sure that your device meets the minimum requirements and is compatible with the game. The requirements are as follows:

    - -

    The game is compatible with most Android devices, but some models may not work properly or may experience some glitches. If you encounter any problems, you can contact the developer or try a different device.

    -

    Steps to download and install

    -

    After you have checked the requirements and compatibility, you can proceed to download and install Bus Simulator Ultimate Mod APK on your device. The steps are as follows:

    -
      -
    1. Go to a trusted website that provides Bus Simulator Ultimate Mod APK download link, such as [APKPure] or [APKDone].
    2. -
    3. Click on the download button and wait for the file to be downloaded on your device.
    4. -
    5. Once the file is downloaded, go to your device's settings and enable the installation of apps from unknown sources. This will allow you to install Bus Simulator Ultimate Mod APK without any issues.
    6. -
    7. Locate the downloaded file on your device and tap on it to start the installation process.
    8. -
    9. Follow the instructions on the screen and wait for the installation to be completed.
    10. -
    11. Launch the game and enjoy Bus Simulator Ultimate Mod APK features.
    12. -
    -

    Permissions and security

    -

    When you install Bus Simulator Ultimate Mod APK on your device, you may need to grant some permissions to the app. These permissions are necessary for the app to function properly and access some features of your device. The permissions are as follows:

    - -

    You can revoke these permissions at any time by going to your device's settings and selecting the app. However, this may affect some features of the game and cause some errors.

    -

    As for security, you don't have to worry about Bus Simulator Ultimate Mod APK being harmful or malicious. The app is safe and virus-free, as long as you download it from a trusted website. However, you should always be careful when downloading any modded apps from unknown sources, as they may contain malware or spyware that can harm your device or steal your personal information. You should also avoid using Bus Simulator Ultimate Mod APK on public or unsecured networks, as they may expose your data to hackers or cybercriminals.

    -

    What are the benefits of Bus Simulator Ultimate Mod APK?

    -

    Bus Simulator Ultimate Mod APK has many benefits that make it better than the original game. Here are some of them:

    -

    Unlimited money and resources

    -

    The most obvious benefit of Bus Simulator Ultimate Mod APK is that it gives you unlimited money and resources in the game. You don't have to worry about running out of money or resources when buying new buses, upgrading your existing ones, expanding your business, etc. You can also use the money and resources to unlock premium features that are otherwise unavailable in the original game.

    -

    Removed ads

    Removed ads and pop-ups

    -

    Another benefit of Bus Simulator Ultimate Mod APK is that it removes all the annoying ads and pop-ups that interrupt your gameplay and ruin your immersion. You don't have to watch any ads to get extra rewards or bonuses, or to access some features of the game. You can also avoid any unwanted redirects or downloads that may harm your device or waste your data. You can enjoy the game without any distractions or interruptions.

    -

    Unlocked all buses and skins

    -

    The last but not least benefit of Bus Simulator Ultimate Mod APK is that it unlocks all the buses and skins in the game. You don't have to spend any money or resources to buy new buses or upgrade your existing ones. You can also change the appearance of your buses with different skins, colors, logos, etc. You can choose from over 30 different buses, each with their own features and specifications. You can also access some exclusive buses and skins that are only available in the modded version of the game.

    -

    Conclusion

    -

    Bus Simulator Ultimate is one of the best bus simulator games on the market, with realistic graphics, sound effects, physics, and gameplay. You can drive on different roads and environments, customize your buses and routes, manage your own bus company, and compete with other players online. However, if you want to enjoy the game without any limitations or restrictions, you should try Bus Simulator Ultimate Mod APK. This is a modified version of the original game that gives you unlimited money, removed ads, and unlocked all buses and skins. You can download and install Bus Simulator Ultimate Mod APK on your device by following some simple steps. However, you should always be careful when downloading any modded apps from unknown sources, as they may contain malware or spyware that can harm your device or steal your personal information. You should also avoid using Bus Simulator Ultimate Mod APK on public or unsecured networks, as they may expose your data to hackers or cybercriminals.

    -

    We hope this article has helped you learn more about Bus Simulator Ultimate Mod APK and how to download and install it on your device. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!

    -

    FAQs

    -

    Here are some frequently asked questions about Bus Simulator Ultimate Mod APK:

    -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Dislyte APK OBB Tips and Tricks to Master the Deep Strategic Gameplay.md b/spaces/1phancelerku/anime-remove-background/Dislyte APK OBB Tips and Tricks to Master the Deep Strategic Gameplay.md deleted file mode 100644 index e1a5c5f4db88fe51c1fca7ceee078eb5ff5f4b02..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Dislyte APK OBB Tips and Tricks to Master the Deep Strategic Gameplay.md +++ /dev/null @@ -1,164 +0,0 @@ - -

    Dislyte APK OBB: How to Download and Install the Stylish Urban Mythological Game

    -

    If you are looking for a new and exciting game to play on your Android device, you might want to check out Dislyte. Dislyte is a visually stunning story of heroes set in the near future with a stylish, urban mythological theme. In this article, we will tell you what Dislyte is, why you need Dislyte APK OBB, and how to download and install it on your device.

    -

    dislyte apk obb


    Download Filehttps://jinyurl.com/2uNUFD



    -

    What is Dislyte?

    -

    Dislyte is a game developed by Lilith Games, the creators of popular titles like AFK Arena and Rise of Kingdoms. Dislyte is a role-playing game (RPG) that combines action, strategy, and adventure elements. Here are some of the features that make Dislyte stand out:

    -

    A brief introduction to the game and its features

    - -

    The main characters and their abilities

    -

    Dislyte has a rich cast of characters that you can recruit and upgrade. Each character has their own personality, backstory, and role in the game. Here are some of the main characters and their abilities:

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameRoleAbility
    AlexLeaderA charismatic leader who can inspire his allies and boost their morale.
    LunaSniperA sharpshooter who can deal massive damage from a distance and pierce through enemies' defenses.
    KiraHackerA genius hacker who can hack into enemies' systems and disrupt their operations.
    RubyBrawlerA fierce brawler who can charge into enemies and knock them out with her fists.
    ZackMedicA skilled medic who can heal his allies and revive them when they fall.
    -

    The gameplay modes and challenges

    -

    Dislyte has various gameplay modes and challenges that will test your skills and strategy. You can choose from:

    - -

    Why do you need Dislyte APK OBB?

    -

    Dislyte is a game that requires a lot of storage space and data to run smoothly. The game has two main files: the APK file and the OBB file. The APK file is the application package that contains the game's code and resources. The OBB file is the expansion file that contains the game's data and assets. Here are some of the reasons why you need Dislyte APK OBB:

    -

    The benefits of downloading the APK OBB files

    - -

    The requirements and compatibility of the APK OBB files

    -

    Before you download and install Dislyte APK OBB, you need to make sure that your device meets the following requirements and compatibility:

    - -

    The risks and precautions of downloading the APK OBB files

    -

    While downloading and installing Dislyte APK OBB has many benefits, it also has some risks and precautions that you need to be aware of:

    -

    How to download and install Dislyte APK OBB?

    -

    Now that you know what Dislyte is and why you need Dislyte APK OBB, you might be wondering how to download and install it on your device. Don't worry, we have got you covered. Here are the steps you need to follow:

    -

    The steps to download the APK OBB files from a reliable source

    -
      -
    1. Go to a reliable and trusted source, such as [Dislyte Official Website] or [APKPure].
    2. -
    3. Find the Dislyte APK OBB files and click on the download button.
    4. -
    5. Wait for the download to complete. You will need about 2 GB of data to download the files.
    6. -
    7. Locate the downloaded files in your device's file manager or download folder.
    8. -
    -

    The steps to install the APK OBB files on your device

    -
      -
    1. Tap on the Dislyte APK file and follow the instructions to install it.
    2. -
    3. Do not open the game yet. You need to copy the OBB file to the right folder first.
    4. -
    5. Tap and hold on the Dislyte OBB file and select copy or move.
    6. -
    7. Navigate to the Android/OBB folder in your device's internal storage.
    8. -
    9. Create a new folder named com.lilithgame.dislyte and paste the OBB file inside it.
    10. -
    -

    The steps to verify and launch the game

    -
      -
    1. Go back to your device's home screen or app drawer and find the Dislyte icon.
    2. -
    3. Tap on the icon and wait for the game to load.
    4. -
    5. Verify your age and accept the terms of service.
    6. -
    7. Choose your preferred language and server.
    8. -
    9. Create or log in to your account using a social media platform, such as Facebook or Google.
    10. -
    11. Enjoy playing Dislyte!
    12. -
    -

    Conclusion

    -

    Dislyte is a stylish urban mythological game that offers an immersive storyline, stunning graphics, smooth gameplay, rich characters, and various modes and challenges. You can download and install Dislyte APK OBB on your Android device by following the steps we have provided in this article. However, you need to be careful about the source, compatibility, and security of the APK OBB files. We hope you found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below. Happy gaming!

    -

    FAQs

    -

    Here are some of the frequently asked questions about Dislyte APK OBB:

    -

    dislyte mod apk unlimited money
    -dislyte apk obb download for android
    -dislyte game apk obb offline
    -dislyte apk obb latest version
    -dislyte mod menu apk obb
    -dislyte apk obb free download
    -dislyte hack apk obb
    -dislyte apk obb full unlocked
    -dislyte apk obb highly compressed
    -dislyte apk obb no verification
    -dislyte apk obb update 2023
    -dislyte apk obb gameplay
    -dislyte apk obb installation guide
    -dislyte apk obb file size
    -dislyte apk obb requirements
    -dislyte apk obb modded by apkbounce[^1^]
    -dislyte apk obb best settings
    -dislyte apk obb cheats codes
    -dislyte apk obb english version
    -dislyte apk obb review
    -dislyte apk obb features
    -dislyte apk obb tips and tricks
    -dislyte apk obb new characters
    -dislyte apk obb how to play
    -dislyte apk obb compatible devices
    -dislyte apk obb error fix
    -dislyte apk obb graphics quality
    -dislyte apk obb sound effects
    -dislyte apk obb storyline
    -dislyte apk obb screenshots
    -dislyte apk obb trailer
    -dislyte apk obb official website
    -dislyte apk obb fan art
    -dislyte apk obb wallpapers
    -dislyte apk obb memes
    -dislyte apk obb reddit community
    -dislyte apk obb discord server
    -dislyte apk obb youtube videos
    -dislyte apk obb facebook page
    -dislyte apk obb twitter account
    -dislyte apk obb instagram profile
    -dislyte apk obb tiktok clips
    -dislyte apk obb merchandise store
    -dislyte apk obb developer contact
    -dislyte apk obb customer support
    -dislyte apk obb ratings and feedbacks
    -dislyte apk obb alternatives and similar games
    -dislyte apk obb frequently asked questions (FAQs)

    -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/1toTree/lora_test/ppdiffusers/schedulers/scheduling_sde_vp.py b/spaces/1toTree/lora_test/ppdiffusers/schedulers/scheduling_sde_vp.py deleted file mode 100644 index 04158a4288b6f40849d43e3c57864335cf2030f7..0000000000000000000000000000000000000000 --- a/spaces/1toTree/lora_test/ppdiffusers/schedulers/scheduling_sde_vp.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -# Copyright 2022 Google Brain and The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch - -import math - -import paddle - -from ..configuration_utils import ConfigMixin, register_to_config -from .scheduling_utils import SchedulerMixin - - -class ScoreSdeVpScheduler(SchedulerMixin, ConfigMixin): - """ - The variance preserving stochastic differential equation (SDE) scheduler. - - [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` - function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. - [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and - [`~SchedulerMixin.from_pretrained`] functions. - - For more information, see the original paper: https://arxiv.org/abs/2011.13456 - - UNDER CONSTRUCTION - - """ - - order = 1 - - @register_to_config - def __init__(self, num_train_timesteps=2000, beta_min=0.1, beta_max=20, sampling_eps=1e-3): - self.sigmas = None - self.discrete_sigmas = None - self.timesteps = None - - def set_timesteps(self, num_inference_steps): - self.timesteps = paddle.linspace(1, self.config.sampling_eps, num_inference_steps) - - def step_pred(self, score, x, t, generator=None): - if self.timesteps is None: - raise ValueError( - "`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" - ) - - # TODO(Patrick) better comments + non-Paddle - # postprocess model score - log_mean_coeff = ( - -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min - ) - std = paddle.sqrt(1.0 - paddle.exp(2.0 * log_mean_coeff)) - std = std.flatten() - while len(std.shape) < len(score.shape): - std = std.unsqueeze(-1) - score = -score / std - - # compute - dt = -1.0 / len(self.timesteps) - - beta_t = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) - beta_t = beta_t.flatten() - while len(beta_t.shape) < len(x.shape): - beta_t = beta_t.unsqueeze(-1) - drift = -0.5 * beta_t * x - - diffusion = paddle.sqrt(beta_t) - drift = drift - diffusion**2 * score - x_mean = x + drift * dt - - # add noise - noise = paddle.randn(x.shape, generator=generator) - x = x_mean + diffusion * math.sqrt(-dt) * noise - - return x, x_mean - - def __len__(self): - return self.config.num_train_timesteps diff --git a/spaces/4Taps/SadTalker/src/face3d/data/base_dataset.py b/spaces/4Taps/SadTalker/src/face3d/data/base_dataset.py deleted file mode 100644 index 1bd57d082d519f512d7114b4f867b6695fb7de06..0000000000000000000000000000000000000000 --- a/spaces/4Taps/SadTalker/src/face3d/data/base_dataset.py +++ /dev/null @@ -1,125 +0,0 @@ -"""This module implements an abstract base class (ABC) 'BaseDataset' for datasets. - -It also includes common transformation functions (e.g., get_transform, __scale_width), which can be later used in subclasses. -""" -import random -import numpy as np -import torch.utils.data as data -from PIL import Image -import torchvision.transforms as transforms -from abc import ABC, abstractmethod - - -class BaseDataset(data.Dataset, ABC): - """This class is an abstract base class (ABC) for datasets. - - To create a subclass, you need to implement the following four functions: - -- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt). - -- <__len__>: return the size of dataset. - -- <__getitem__>: get a data point. - -- : (optionally) add dataset-specific options and set default options. - """ - - def __init__(self, opt): - """Initialize the class; save the options in the class - - Parameters: - opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions - """ - self.opt = opt - # self.root = opt.dataroot - self.current_epoch = 0 - - @staticmethod - def modify_commandline_options(parser, is_train): - """Add new dataset-specific options, and rewrite default values for existing options. - - Parameters: - parser -- original option parser - is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. - - Returns: - the modified parser. - """ - return parser - - @abstractmethod - def __len__(self): - """Return the total number of images in the dataset.""" - return 0 - - @abstractmethod - def __getitem__(self, index): - """Return a data point and its metadata information. - - Parameters: - index - - a random integer for data indexing - - Returns: - a dictionary of data with their names. It ususally contains the data itself and its metadata information. - """ - pass - - -def get_transform(grayscale=False): - transform_list = [] - if grayscale: - transform_list.append(transforms.Grayscale(1)) - transform_list += [transforms.ToTensor()] - return transforms.Compose(transform_list) - -def get_affine_mat(opt, size): - shift_x, shift_y, scale, rot_angle, flip = 0., 0., 1., 0., False - w, h = size - - if 'shift' in opt.preprocess: - shift_pixs = int(opt.shift_pixs) - shift_x = random.randint(-shift_pixs, shift_pixs) - shift_y = random.randint(-shift_pixs, shift_pixs) - if 'scale' in opt.preprocess: - scale = 1 + opt.scale_delta * (2 * random.random() - 1) - if 'rot' in opt.preprocess: - rot_angle = opt.rot_angle * (2 * random.random() - 1) - rot_rad = -rot_angle * np.pi/180 - if 'flip' in opt.preprocess: - flip = random.random() > 0.5 - - shift_to_origin = np.array([1, 0, -w//2, 0, 1, -h//2, 0, 0, 1]).reshape([3, 3]) - flip_mat = np.array([-1 if flip else 1, 0, 0, 0, 1, 0, 0, 0, 1]).reshape([3, 3]) - shift_mat = np.array([1, 0, shift_x, 0, 1, shift_y, 0, 0, 1]).reshape([3, 3]) - rot_mat = np.array([np.cos(rot_rad), np.sin(rot_rad), 0, -np.sin(rot_rad), np.cos(rot_rad), 0, 0, 0, 1]).reshape([3, 3]) - scale_mat = np.array([scale, 0, 0, 0, scale, 0, 0, 0, 1]).reshape([3, 3]) - shift_to_center = np.array([1, 0, w//2, 0, 1, h//2, 0, 0, 1]).reshape([3, 3]) - - affine = shift_to_center @ scale_mat @ rot_mat @ shift_mat @ flip_mat @ shift_to_origin - affine_inv = np.linalg.inv(affine) - return affine, affine_inv, flip - -def apply_img_affine(img, affine_inv, method=Image.BICUBIC): - return img.transform(img.size, Image.AFFINE, data=affine_inv.flatten()[:6], resample=Image.BICUBIC) - -def apply_lm_affine(landmark, affine, flip, size): - _, h = size - lm = landmark.copy() - lm[:, 1] = h - 1 - lm[:, 1] - lm = np.concatenate((lm, np.ones([lm.shape[0], 1])), -1) - lm = lm @ np.transpose(affine) - lm[:, :2] = lm[:, :2] / lm[:, 2:] - lm = lm[:, :2] - lm[:, 1] = h - 1 - lm[:, 1] - if flip: - lm_ = lm.copy() - lm_[:17] = lm[16::-1] - lm_[17:22] = lm[26:21:-1] - lm_[22:27] = lm[21:16:-1] - lm_[31:36] = lm[35:30:-1] - lm_[36:40] = lm[45:41:-1] - lm_[40:42] = lm[47:45:-1] - lm_[42:46] = lm[39:35:-1] - lm_[46:48] = lm[41:39:-1] - lm_[48:55] = lm[54:47:-1] - lm_[55:60] = lm[59:54:-1] - lm_[60:65] = lm[64:59:-1] - lm_[65:68] = lm[67:64:-1] - lm = lm_ - return lm diff --git a/spaces/A00001/bingothoo/src/components/theme-toggle.tsx b/spaces/A00001/bingothoo/src/components/theme-toggle.tsx deleted file mode 100644 index 67d3f1a2c163ccbeb52c40a7e42f107190237154..0000000000000000000000000000000000000000 --- a/spaces/A00001/bingothoo/src/components/theme-toggle.tsx +++ /dev/null @@ -1,31 +0,0 @@ -'use client' - -import * as React from 'react' -import { useTheme } from 'next-themes' - -import { Button } from '@/components/ui/button' -import { IconMoon, IconSun } from '@/components/ui/icons' - -export function ThemeToggle() { - const { setTheme, theme } = useTheme() - const [_, startTransition] = React.useTransition() - - return ( - - ) -} diff --git a/spaces/AB-TW/team-ai/chains.py b/spaces/AB-TW/team-ai/chains.py deleted file mode 100644 index 4cb14d1a2de81c4c7cf7f2825e46fecf569dbaef..0000000000000000000000000000000000000000 --- a/spaces/AB-TW/team-ai/chains.py +++ /dev/null @@ -1,42 +0,0 @@ -from typing import Any, Optional -from langchain.chains import LLMChain -from langchain.base_language import BaseLanguageModel -from langchain.prompts import PromptTemplate -from langchain.memory.chat_memory import BaseMemory -from models import llm - -from promopts import CONTENT_RE_WRIGHT_PROMPT, FEEDBACK_PROMPT - - -class HumanFeedBackChain(LLMChain): - """Chain to run queries against LLMs.""" - - memory: Optional[BaseMemory] = None - - def __init__(self, verbose=True, llm: BaseLanguageModel = llm(temperature=0.7), memory: Optional[BaseMemory] = None, prompt: PromptTemplate = FEEDBACK_PROMPT): - super().__init__(llm=llm, prompt=prompt, memory=memory, verbose=verbose) - - def run(self, *args: Any, **kwargs: Any) -> str: - """Run the chain as text in, text out or multiple variables, text out.""" - if len(self.output_keys) != 1: - raise ValueError( - f"`run` not supported when there is not exactly " - f"one output key. Got {self.output_keys}." - ) - - if args and not kwargs: - if len(args) != 1: - raise ValueError( - "`run` supports only one positional argument.") - return self("Answer:" + args[0])[self.output_keys[0]] - - if kwargs and not args: - return self(kwargs)[self.output_keys[0]] - - raise ValueError( - f"`run` supported with either positional arguments or keyword arguments" - f" but not both. Got args: {args} and kwargs: {kwargs}." - ) - - -contextRewriteChain = LLMChain(llm=llm(temperature=0.7), prompt=CONTENT_RE_WRIGHT_PROMPT) \ No newline at end of file diff --git a/spaces/AIOSML/README/README.md b/spaces/AIOSML/README/README.md deleted file mode 100644 index ebccba50833e0940f4e3d2e9d6851aa81d8a3cd2..0000000000000000000000000000000000000000 --- a/spaces/AIOSML/README/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: README -emoji: 👀 -colorFrom: red -colorTo: gray -sdk: gradio -pinned: false -license: bsd ---- - -Edit this `README.md` markdown file to author your organization card 🔥 -AIOSML a noble attempt to bridge local machine learning with linux system administration and access control lists \ No newline at end of file diff --git a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/custom_dataset/yolov7_l_syncbn_fast_8x16b-300e_coco.py b/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/custom_dataset/yolov7_l_syncbn_fast_8x16b-300e_coco.py deleted file mode 100644 index c3c11c5ea042db35e7a73032a9b945522e3d7b21..0000000000000000000000000000000000000000 --- a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/custom_dataset/yolov7_l_syncbn_fast_8x16b-300e_coco.py +++ /dev/null @@ -1,472 +0,0 @@ -_base_ = ['../_base_/default_runtime.py', '../_base_/det_p5_tta.py'] - -data_root = './data-df2/' -train_ann_file = 'annotations/instances_train2017.json' -train_data_prefix = 'train2017/' -val_ann_file = 'annotations/instances_val2017.json' -val_data_prefix = 'val2017/' -num_classes = 13 -train_batch_size_per_gpu = 16 -train_num_workers = 8 -persistent_workers = True - -vis_backends = [ - dict(type='LocalVisBackend'), -] -visualizer = dict( - type='mmdet.DetLocalVisualizer', - vis_backends=[ - dict(type='LocalVisBackend'), - dict(type='WandbVisBackend') - ], - name='visualizer') -log_processor = dict(type='LogProcessor', window_size=50, by_epoch=True) -log_level = 'INFO' -load_from = None -resume = False - -anchors = [ - [(12, 16), (19, 36), (40, 28)], # P3/8 - [(36, 75), (76, 55), (72, 146)], # P4/16 - [(142, 110), (192, 243), (459, 401)] # P5/32 -] - -base_lr = 0.01 -max_epochs = 100 - -num_epoch_stage2 = 10 # The last 10 epochs switch evaluation interval -val_interval_stage2 = 1 - -model_test_cfg = dict( - multi_label=True, - nms_pre=30000, - score_thr=0.001, - nms=dict(type='nms', iou_threshold=0.65), - max_per_img=300) - -img_scale = (640, 640) -dataset_type = 'YOLOv5CocoDataset' -val_batch_size_per_gpu = 1 -val_num_workers = 2 -batch_shapes_cfg = dict( - type='BatchShapePolicy', - batch_size=val_batch_size_per_gpu, - img_size=img_scale[0], - size_divisor=32, - extra_pad_ratio=0.5) -strides = [8, 16, 32] # Strides of multi-scale prior box -num_det_layers = 3 -norm_cfg = dict(type='BN', momentum=0.03, eps=0.001) - -# Data augmentation -max_translate_ratio = 0.2 # YOLOv5RandomAffine -scaling_ratio_range = (0.1, 2.0) # YOLOv5RandomAffine -mixup_prob = 0.15 # YOLOv5MixUp -randchoice_mosaic_prob = [0.8, 0.2] -mixup_alpha = 8.0 # YOLOv5MixUp -mixup_beta = 8.0 # YOLOv5MixUp - -# -----train val related----- -loss_cls_weight = 0.3 -loss_bbox_weight = 0.05 -loss_obj_weight = 0.7 -# BatchYOLOv7Assigner params -simota_candidate_topk = 10 -simota_iou_weight = 3.0 -simota_cls_weight = 1.0 -prior_match_thr = 4. # Priori box matching threshold -obj_level_weights = [4., 1., - 0.4] # The obj loss weights of the three output layers - -lr_factor = 0.1 # Learning rate scaling factor -weight_decay = 0.0005 -save_epoch_intervals = 2 -max_keep_ckpts = 5 - -env_cfg = dict( - cudnn_benchmark=True, - mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), - dist_cfg=dict(backend='nccl')) - -# ===============================Unmodified in most cases==================== -model = dict( - type='YOLODetector', - data_preprocessor=dict( - type='YOLOv5DetDataPreprocessor', - mean=[0., 0., 0.], - std=[255., 255., 255.], - bgr_to_rgb=True), - backbone=dict( - type='YOLOv7Backbone', - arch='L', - norm_cfg=norm_cfg, - act_cfg=dict(type='SiLU', inplace=True)), - neck=dict( - type='YOLOv7PAFPN', - block_cfg=dict( - type='ELANBlock', - middle_ratio=0.5, - block_ratio=0.25, - num_blocks=4, - num_convs_in_block=1), - upsample_feats_cat_first=False, - in_channels=[512, 1024, 1024], - # The real output channel will be multiplied by 2 - out_channels=[128, 256, 512], - norm_cfg=norm_cfg, - act_cfg=dict(type='SiLU', inplace=True)), - bbox_head=dict( - type='YOLOv7Head', - head_module=dict( - type='YOLOv7HeadModule', - num_classes=num_classes, - in_channels=[256, 512, 1024], - featmap_strides=strides, - num_base_priors=3), - prior_generator=dict( - type='mmdet.YOLOAnchorGenerator', - base_sizes=anchors, - strides=strides), - # scaled based on number of detection layers - loss_cls=dict( - type='mmdet.CrossEntropyLoss', - use_sigmoid=True, - reduction='mean', - loss_weight=loss_cls_weight * - (num_classes / 80 * 3 / num_det_layers)), - loss_bbox=dict( - type='IoULoss', - iou_mode='ciou', - bbox_format='xywh', - reduction='mean', - loss_weight=loss_bbox_weight * (3 / num_det_layers), - return_iou=True), - loss_obj=dict( - type='mmdet.CrossEntropyLoss', - use_sigmoid=True, - reduction='mean', - loss_weight=loss_obj_weight * - ((img_scale[0] / 640)**2 * 3 / num_det_layers)), - prior_match_thr=prior_match_thr, - obj_level_weights=obj_level_weights, - # BatchYOLOv7Assigner params - simota_candidate_topk=simota_candidate_topk, - simota_iou_weight=simota_iou_weight, - simota_cls_weight=simota_cls_weight), - test_cfg=model_test_cfg) - -pre_transform = [ - dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args), - dict(type='LoadAnnotations', with_bbox=True) -] - -mosiac4_pipeline = [ - dict( - type='Mosaic', - img_scale=img_scale, - pad_val=114.0, - pre_transform=pre_transform), - dict( - type='YOLOv5RandomAffine', - max_rotate_degree=0.0, - max_shear_degree=0.0, - max_translate_ratio=max_translate_ratio, # note - scaling_ratio_range=scaling_ratio_range, # note - # img_scale is (width, height) - border=(-img_scale[0] // 2, -img_scale[1] // 2), - border_val=(114, 114, 114)), -] - -mosiac9_pipeline = [ - dict( - type='Mosaic9', - img_scale=img_scale, - pad_val=114.0, - pre_transform=pre_transform), - dict( - type='YOLOv5RandomAffine', - max_rotate_degree=0.0, - max_shear_degree=0.0, - max_translate_ratio=max_translate_ratio, # note - scaling_ratio_range=scaling_ratio_range, # note - # img_scale is (width, height) - border=(-img_scale[0] // 2, -img_scale[1] // 2), - border_val=(114, 114, 114)), -] - -randchoice_mosaic_pipeline = dict( - type='RandomChoice', - transforms=[mosiac4_pipeline, mosiac9_pipeline], - prob=randchoice_mosaic_prob) - -train_pipeline = [ - *pre_transform, - randchoice_mosaic_pipeline, - dict( - type='YOLOv5MixUp', - alpha=mixup_alpha, # note - beta=mixup_beta, # note - prob=mixup_prob, - pre_transform=[*pre_transform, randchoice_mosaic_pipeline]), - dict(type='YOLOv5HSVRandomAug'), - dict(type='mmdet.RandomFlip', prob=0.5), - dict( - type='mmdet.PackDetInputs', - meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', - 'flip_direction')) -] - -train_dataloader = dict( - batch_size=train_batch_size_per_gpu, - num_workers=train_num_workers, - persistent_workers=persistent_workers, - pin_memory=True, - sampler=dict(type='DefaultSampler', shuffle=True), - collate_fn=dict(type='yolov5_collate'), # FASTER - dataset=dict( - type=dataset_type, - data_root=data_root, - ann_file=train_ann_file, - data_prefix=dict(img=train_data_prefix), - filter_cfg=dict(filter_empty_gt=False, min_size=32), - pipeline=train_pipeline)) - -test_pipeline = [ - dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args), - dict(type='YOLOv5KeepRatioResize', scale=img_scale), - dict( - type='LetterResize', - scale=img_scale, - allow_scale_up=False, - pad_val=dict(img=114)), - dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'), - dict( - type='mmdet.PackDetInputs', - meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', - 'scale_factor', 'pad_param')) -] - -val_dataloader = dict( - batch_size=val_batch_size_per_gpu, - num_workers=val_num_workers, - persistent_workers=persistent_workers, - pin_memory=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - test_mode=True, - data_prefix=dict(img=val_data_prefix), - ann_file=val_ann_file, - pipeline=test_pipeline, - batch_shapes_cfg=batch_shapes_cfg)) - -test_dataloader = val_dataloader - -param_scheduler = None -optim_wrapper = dict( - type='OptimWrapper', - optimizer=dict( - type='SGD', - lr=base_lr, - momentum=0.937, - weight_decay=weight_decay, - nesterov=True, - batch_size_per_gpu=train_batch_size_per_gpu), - constructor='YOLOv7OptimWrapperConstructor') - -default_scope = 'mmyolo' -default_hooks = dict( - timer=dict(type='IterTimerHook'), - logger=dict(type='LoggerHook', interval=2), - param_scheduler=dict( - type='YOLOv5ParamSchedulerHook', - scheduler_type='cosine', - lr_factor=lr_factor, # note - max_epochs=max_epochs), - checkpoint=dict( - type='CheckpointHook', - save_param_scheduler=False, - interval=save_epoch_intervals, - save_best='auto', - max_keep_ckpts=max_keep_ckpts), - sampler_seed=dict(type='DistSamplerSeedHook'), - visualization=dict(type='mmdet.DetVisualizationHook')) - -custom_hooks = [ - dict( - type='EMAHook', - ema_type='ExpMomentumEMA', - momentum=0.0001, - update_buffers=True, - strict_load=False, - priority=49) -] - -val_evaluator = dict( - type='mmdet.CocoMetric', - proposal_nums=(100, 1, 10), # Can be accelerated - ann_file=data_root + val_ann_file, - metric='bbox') -test_evaluator = val_evaluator - -train_cfg = dict( - type='EpochBasedTrainLoop', - max_epochs=max_epochs, - val_interval=save_epoch_intervals, - dynamic_intervals=[(max_epochs - num_epoch_stage2, val_interval_stage2)]) -val_cfg = dict(type='ValLoop') -test_cfg = dict(type='TestLoop') - -# ============================ - -file_client_args = dict(backend='disk') -_file_client_args = dict(backend='disk') -tta_model = dict( - type='mmdet.DetTTAModel', - tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.65), max_per_img=300)) -img_scales = [ - ( - 640, - 640, - ), - ( - 320, - 320, - ), - ( - 960, - 960, - ), -] -_multiscale_resize_transforms = [ - dict( - type='Compose', - transforms=[ - dict(type='YOLOv5KeepRatioResize', scale=( - 640, - 640, - )), - dict( - type='LetterResize', - scale=( - 640, - 640, - ), - allow_scale_up=False, - pad_val=dict(img=114)), - ]), - dict( - type='Compose', - transforms=[ - dict(type='YOLOv5KeepRatioResize', scale=( - 320, - 320, - )), - dict( - type='LetterResize', - scale=( - 320, - 320, - ), - allow_scale_up=False, - pad_val=dict(img=114)), - ]), - dict( - type='Compose', - transforms=[ - dict(type='YOLOv5KeepRatioResize', scale=( - 960, - 960, - )), - dict( - type='LetterResize', - scale=( - 960, - 960, - ), - allow_scale_up=False, - pad_val=dict(img=114)), - ]), -] -tta_pipeline = [ - dict(type='LoadImageFromFile', file_client_args=dict(backend='disk')), - dict( - type='TestTimeAug', - transforms=[ - [ - dict( - type='Compose', - transforms=[ - dict(type='YOLOv5KeepRatioResize', scale=( - 640, - 640, - )), - dict( - type='LetterResize', - scale=( - 640, - 640, - ), - allow_scale_up=False, - pad_val=dict(img=114)), - ]), - dict( - type='Compose', - transforms=[ - dict(type='YOLOv5KeepRatioResize', scale=( - 320, - 320, - )), - dict( - type='LetterResize', - scale=( - 320, - 320, - ), - allow_scale_up=False, - pad_val=dict(img=114)), - ]), - dict( - type='Compose', - transforms=[ - dict(type='YOLOv5KeepRatioResize', scale=( - 960, - 960, - )), - dict( - type='LetterResize', - scale=( - 960, - 960, - ), - allow_scale_up=False, - pad_val=dict(img=114)), - ]), - ], - [ - dict(type='mmdet.RandomFlip', prob=1.0), - dict(type='mmdet.RandomFlip', prob=0.0), - ], - [ - dict(type='mmdet.LoadAnnotations', with_bbox=True), - ], - [ - dict( - type='mmdet.PackDetInputs', - meta_keys=( - 'img_id', - 'img_path', - 'ori_shape', - 'img_shape', - 'scale_factor', - 'pad_param', - 'flip', - 'flip_direction', - )), - ], - ]), -] - -launcher = 'none' \ No newline at end of file diff --git a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/AiAsk.py b/spaces/AchyuthGamer/OpenGPT/g4f/Provider/AiAsk.py deleted file mode 100644 index 27d3bf15bfe996930f2a6c970454b93f91abb105..0000000000000000000000000000000000000000 --- a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/AiAsk.py +++ /dev/null @@ -1,44 +0,0 @@ -from __future__ import annotations - -from aiohttp import ClientSession -from ..typing import AsyncGenerator -from .base_provider import AsyncGeneratorProvider - -class AiAsk(AsyncGeneratorProvider): - url = "https://e.aiask.me" - supports_gpt_35_turbo = True - working = True - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: list[dict[str, str]], - **kwargs - ) -> AsyncGenerator: - headers = { - "accept": "application/json, text/plain, */*", - "origin": cls.url, - "referer": f"{cls.url}/chat", - } - async with ClientSession(headers=headers) as session: - data = { - "continuous": True, - "id": "fRMSQtuHl91A4De9cCvKD", - "list": messages, - "models": "0", - "prompt": "", - "temperature": kwargs.get("temperature", 0.5), - "title": "", - } - buffer = "" - rate_limit = "您的免费额度不够使用这个模型啦,请点击右上角登录继续使用!" - async with session.post(f"{cls.url}/v1/chat/gpt/", json=data) as response: - response.raise_for_status() - async for chunk in response.content.iter_any(): - buffer += chunk.decode() - if not rate_limit.startswith(buffer): - yield buffer - buffer = "" - elif buffer == rate_limit: - raise RuntimeError("Rate limit reached") \ No newline at end of file diff --git a/spaces/AdWeeb/SuMmeet/app.py b/spaces/AdWeeb/SuMmeet/app.py deleted file mode 100644 index c755d4a8020748f1e7de2958d8cf5fd5b4d017b0..0000000000000000000000000000000000000000 --- a/spaces/AdWeeb/SuMmeet/app.py +++ /dev/null @@ -1,109 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Mon Mar 28 01:04:50 2022 - -@author: adeep -""" -from fnmatch import translate -import cv2 as cv -import tempfile -import numpy as np -import pandas as pd -import streamlit as st -import joblib -import os -from moviepy.editor import VideoFileClip -import speech_recognition as sr -from pydub import AudioSegment -from pydub.silence import split_on_silence -import transformers -from transformers import pipeline -import nltk -nltk.download('punkt') -nltk.download('averaged_perceptron_tagger') -import nltk -nltk.download('punkt') -nltk.download('averaged_perceptron_tagger') -from nltk.tokenize import sent_tokenize -import re -from utils import get_translation, welcome, get_large_audio_transcription - -from PIL import Image - -#import stanfordnlp - -def main(): - - - st.title("Summarize Text") - video = st.file_uploader("Choose a file", type=['mp4']) - button = st.button("Summarize") - - max_c = st.sidebar.slider('Select max words', 50, 500, step=10, value=150) - min_c = st.sidebar.slider('Select min words', 10, 450, step=10, value=50) - gen_summ = False - - - - with st.spinner("Running.."): - - if button and video: - tfile = tempfile.NamedTemporaryFile(delete=False) - tfile.write(video.read()) - #st.write(tfile.name) - v = VideoFileClip(tfile.name) - v.audio.write_audiofile("movie.wav") - #st.video(video, format="video/mp4", start_time=0) - #st.audio("movie.wav") - whole_text=get_large_audio_transcription("movie.wav") - #st.write(whole_text) - #summarizer = pipeline("summarization") - #summarizer = pipeline("summarization", model="t5-base", tokenizer="t5-base", framework="pt") - summarizer = pipeline("summarization", model="t5-large", tokenizer="t5-large", framework="pt") - summarized = summarizer(whole_text, min_length=min_c, max_length=max_c) - summ=summarized[0]['summary_text'] - #st.write(summ) - gen_summ = True - #stf_nlp = stanfordnlp.Pipeline(processors='tokenize,mwt,pos') - #doc = stf_nlp(summ) - #l=[w.text.capitalize() if w.upos in ["PROPN","NNS"] else w.text for sent in doc.sentences for w in sent.words] - #text=" ".join(l) - #summ=truecasing_by_sentence_segmentation(summ) - sentences = sent_tokenize(summ, language='english') - # capitalize the sentences - sentences_capitalized = [s.capitalize() for s in sentences] - # join the capitalized sentences - summ = re.sub(" (?=[\.,'!?:;])", "", ' '.join(sentences_capitalized)) - - if 'summary' not in st.session_state: - st.session_state.summary=True - st.session_state.summarization = summ - st.session_state.gen_summ = True - - - - translate = st.sidebar.radio('Do you want to translate the text to any different language?', ('No', 'Yes')) - if 'summary' in st.session_state: - summarized_text = st.session_state.summarization - st.write(summarized_text) - gen_summ = st.session_state.gen_summ - - if translate == 'Yes' and gen_summ == True: - lang_list = ['Hindi', 'Marathi', 'Malayalam', 'Kannada', 'Telugu', 'Tamil', 'Oriya', 'Bengali', 'Gujarati', 'Urdu'] - - s_type = st.sidebar.selectbox('Select the Language in which you want to Translate:',lang_list) - st.sidebar.write('You selected:', s_type) - - - translation = get_translation(source='English', dest=s_type, text=summarized_text) - - st.sidebar.write(translation) - elif translate == 'Yes' and gen_summ == False: - st.error("The summary has not been generated yet. Please generate the summary first and then translate") - - else: - st.write('') - -if __name__ == '__main__': - - main() diff --git a/spaces/Adapter/CoAdapter/ldm/data/dataset_coco.py b/spaces/Adapter/CoAdapter/ldm/data/dataset_coco.py deleted file mode 100644 index 0b4aa4facb12be8534522c9240ca6e63ce4a68b5..0000000000000000000000000000000000000000 --- a/spaces/Adapter/CoAdapter/ldm/data/dataset_coco.py +++ /dev/null @@ -1,36 +0,0 @@ -import json -import cv2 -import os -from basicsr.utils import img2tensor - - -class dataset_coco_mask_color(): - def __init__(self, path_json, root_path_im, root_path_mask, image_size): - super(dataset_coco_mask_color, self).__init__() - with open(path_json, 'r', encoding='utf-8') as fp: - data = json.load(fp) - data = data['annotations'] - self.files = [] - self.root_path_im = root_path_im - self.root_path_mask = root_path_mask - for file in data: - name = "%012d.png" % file['image_id'] - self.files.append({'name': name, 'sentence': file['caption']}) - - def __getitem__(self, idx): - file = self.files[idx] - name = file['name'] - # print(os.path.join(self.root_path_im, name)) - im = cv2.imread(os.path.join(self.root_path_im, name.replace('.png', '.jpg'))) - im = cv2.resize(im, (512, 512)) - im = img2tensor(im, bgr2rgb=True, float32=True) / 255. - - mask = cv2.imread(os.path.join(self.root_path_mask, name)) # [:,:,0] - mask = cv2.resize(mask, (512, 512)) - mask = img2tensor(mask, bgr2rgb=True, float32=True) / 255. # [0].unsqueeze(0)#/255. - - sentence = file['sentence'] - return {'im': im, 'mask': mask, 'sentence': sentence} - - def __len__(self): - return len(self.files) diff --git a/spaces/AgentVerse/agentVerse/agentverse/environments/tasksolving_env/rules/executor/base.py b/spaces/AgentVerse/agentVerse/agentverse/environments/tasksolving_env/rules/executor/base.py deleted file mode 100644 index a9cc6e7550c0ca74affd413b3efa0fbb9237aff6..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/agentverse/environments/tasksolving_env/rules/executor/base.py +++ /dev/null @@ -1,100 +0,0 @@ -from __future__ import annotations - -from abc import abstractmethod -from typing import TYPE_CHECKING, List, Tuple, Any - -from pydantic import BaseModel - -from agentverse.agents import ExecutorAgent -from agentverse.message import SolverMessage, ExecutorMessage - -from . import executor_registry - - -class BaseExecutor(BaseModel): - """ - The base class of execution. - """ - - def step( - self, - agent: ExecutorAgent, - task_description: str, - solution: List[SolverMessage], - *args, - **kwargs, - ) -> List[ExecutorMessage]: - pass - - async def astep( - self, - agent: ExecutorAgent, - task_description: str, - solution: List[str], - *args, - **kwargs, - ) -> List[ExecutorMessage]: - pass - - def reset(self): - pass - - -@executor_registry.register("none") -class NoneExecutor(BaseExecutor): - """ - The base class of execution. - """ - - def step( - self, - agent: ExecutorAgent, - task_description: str, - solution: List[SolverMessage], - *args, - **kwargs, - ) -> Any: - return [ExecutorMessage(content="")] - - async def astep( - self, - agent: ExecutorAgent, - task_description: str, - solution: List[SolverMessage], - *args, - **kwargs, - ) -> Any: - return [ExecutorMessage(content="")] - - def reset(self): - pass - - -@executor_registry.register("dummy") -class DummyExecutor(BaseExecutor): - """ - The base class of execution. - """ - - def step( - self, - agent: ExecutorAgent, - task_description: str, - solution: List[SolverMessage], - *args, - **kwargs, - ) -> Any: - return [ExecutorMessage(content=s.content) for s in solution] - - async def astep( - self, - agent: ExecutorAgent, - task_description: str, - solution: List[SolverMessage], - *args, - **kwargs, - ) -> Any: - return [ExecutorMessage(content=s.content) for s in solution] - - def reset(self): - pass diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/folder/Folder.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/folder/Folder.js deleted file mode 100644 index a17543c0b68138e984d94ca2c6d71e602af30277..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/folder/Folder.js +++ /dev/null @@ -1,122 +0,0 @@ -import Sizer from '../sizer/Sizer'; -import ChildTransition from './methods/ChildTransition.js'; -import ExpandMethods from './methods/ExpandMethods.js'; -import ClickMethods from '../basesizer/ClickMethods'; -import ConfigurationMethods from './methods/ConfigurationMethods.js'; - -const GetValue = Phaser.Utils.Objects.GetValue; - -class Folder extends Sizer { - constructor(scene, config) { - if (config === undefined) { - config = {}; - } - - if (!config.hasOwnProperty('orientation')) { - config.orientation = 1; - } - - super(scene, config); - this.type = 'rexFolder'; - - this.expanded = undefined; - this.expandDirection = (this.orientation === 1) ? 'y' : 'x'; - - var background = config.background; - var title = config.title; - var child = config.child; - - // background - if (background) { - this.addBackground(background); - } - - // title - var defaultAlign = (this.orientation === 1) ? 'left' : 'top'; - var align = GetValue(config, 'align.title', defaultAlign); - var expand = GetValue(config, 'expand.title', true); - this.add( - title, - { - proportion: 0, align: align, expand: expand, - } - ); - - var toggleByTarget = GetValue(config, 'toggleByTarget', undefined); - var toggleClickConfig = GetValue(config, 'toggleClickConfig'); - - if (toggleByTarget === undefined) { - toggleByTarget = title; - } - if (toggleByTarget) { - ClickMethods.onClick.call( - toggleByTarget, - function () { - this.toggle(); - }, - this, - toggleClickConfig - ); - } - - // child - this.childTransition = new ChildTransition(child); - - var customOrigin = GetValue(config, 'customChildOrigin', false); - if (!customOrigin) { - var origin = (!this.rtl) ? 0 : 1; - child.setOrigin(origin); - } - - var align = GetValue(config, 'align.child', 'left'); - var expand = GetValue(config, 'expand.child', true); - var proportion = (expand) ? 1 : 0; - this.add( - child, - { - proportion: proportion, align: align, expand: expand, - - } - ); - - this.addChildrenMap('title', title); - this.addChildrenMap('child', child); - this.addChildrenMap('background', background); - - var transitionConfig = config.transition; - this.setTransitionDuration(GetValue(transitionConfig, 'duration', 200)); - this.setExpandCallback(GetValue(transitionConfig, 'expandCallback', undefined)); - this.setCollapseCallback(GetValue(transitionConfig, 'collapseCallback', undefined)); - - this.reLayoutTarget = GetValue(config, 'reLayoutTarget', undefined); - - var onExpandStart = config.onExpandStart; - if (onExpandStart) { - this.on('expand.start', onExpandStart); - } - - var onExpandComplete = config.onExpandComplete; - if (onExpandComplete) { - this.on('expand.complete', onExpandComplete); - } - - var onCollapseStart = config.onCollapseStart; - if (onCollapseStart) { - this.on('collapse.start', onCollapseStart); - } - - var onCollapseComplete = config.onCollapseComplete; - if (onCollapseComplete) { - this.on('collapse.complete', onCollapseComplete); - } - - } -} - -Object.assign( - Folder.prototype, - ExpandMethods, - ConfigurationMethods, -) - -export default Folder; \ No newline at end of file diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/statesroundrectangle/StatesRoundRectangle.d.ts b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/statesroundrectangle/StatesRoundRectangle.d.ts deleted file mode 100644 index 5c7b561603f11da5957364b87458acc5c0d58638..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/statesroundrectangle/StatesRoundRectangle.d.ts +++ /dev/null @@ -1,49 +0,0 @@ -import RoundRectangle from '../roundrectangle/RoundRectangle'; - -export default StatesRoundRectangle; - -declare namespace StatesRoundRectangle { - interface IConfig extends RoundRectangle.IConfig { - 'active.color'?: number, - 'active.alpha'?: number, - 'active.strokeColor'?: number, - 'active.strokeAlpha'?: number, - 'active.strokeWidth'?: number, - 'active.radius'?: number | RoundRectangle.IRadiusConfig | ({ - radius?: (number | RoundRectangle.IRadiusConfig), - iteration?: number - }), - - 'hover.color'?: number, - 'hover.alpha'?: number, - 'hover.strokeColor'?: number, - 'hover.strokeAlpha'?: number, - 'hover.strokeWidth'?: number, - 'hover.radius'?: number | RoundRectangle.IRadiusConfig | ({ - radius?: (number | RoundRectangle.IRadiusConfig), - iteration?: number - }), - - 'disable.color'?: number, - 'disable.alpha'?: number, - 'disable.strokeColor'?: number, - 'disable.strokeAlpha'?: number, - 'disable.strokeWidth'?: number, - 'disable.radius'?: number | RoundRectangle.IRadiusConfig | ({ - radius?: (number | RoundRectangle.IRadiusConfig), - iteration?: number - }), - - } -} - -declare class StatesRoundRectangle extends RoundRectangle { - constructor( - scene: Phaser.Scene, - config?: StatesRoundRectangle.IConfig - ) - - setActiveState(enable?: boolean): this; - setHoverState(enable?: boolean): this; - setDisableState(enable?: boolean): this; -} \ No newline at end of file diff --git a/spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/ops/grid_sample_gradfix.py b/spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/ops/grid_sample_gradfix.py deleted file mode 100644 index c522ae9b6f36a89203ce62f3d4487514523b5b00..0000000000000000000000000000000000000000 --- a/spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/ops/grid_sample_gradfix.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright (c) SenseTime Research. All rights reserved. - -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Custom replacement for `torch.nn.functional.grid_sample` that -supports arbitrarily high order gradients between the input and output. -Only works on 2D images and assumes -`mode='bilinear'`, `padding_mode='zeros'`, `align_corners=False`.""" - -import warnings -import torch - -# pylint: disable=redefined-builtin -# pylint: disable=arguments-differ -# pylint: disable=protected-access - -# ---------------------------------------------------------------------------- - -enabled = False # Enable the custom op by setting this to true. - -# ---------------------------------------------------------------------------- - - -def grid_sample(input, grid): - if _should_use_custom_op(): - return _GridSample2dForward.apply(input, grid) - return torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False) - -# ---------------------------------------------------------------------------- - - -def _should_use_custom_op(): - if not enabled: - return False - if any(torch.__version__.startswith(x) for x in ['1.7.', '1.8.', '1.9']): - return True - warnings.warn( - f'grid_sample_gradfix not supported on PyTorch {torch.__version__}. Falling back to torch.nn.functional.grid_sample().') - return False - -# ---------------------------------------------------------------------------- - - -class _GridSample2dForward(torch.autograd.Function): - @staticmethod - def forward(ctx, input, grid): - assert input.ndim == 4 - assert grid.ndim == 4 - output = torch.nn.functional.grid_sample( - input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False) - ctx.save_for_backward(input, grid) - return output - - @staticmethod - def backward(ctx, grad_output): - input, grid = ctx.saved_tensors - grad_input, grad_grid = _GridSample2dBackward.apply( - grad_output, input, grid) - return grad_input, grad_grid - -# ---------------------------------------------------------------------------- - - -class _GridSample2dBackward(torch.autograd.Function): - @staticmethod - def forward(ctx, grad_output, input, grid): - op = torch._C._jit_get_operation('aten::grid_sampler_2d_backward') - grad_input, grad_grid = op(grad_output, input, grid, 0, 0, False) - ctx.save_for_backward(grid) - return grad_input, grad_grid - - @staticmethod - def backward(ctx, grad2_grad_input, grad2_grad_grid): - _ = grad2_grad_grid # unused - grid, = ctx.saved_tensors - grad2_grad_output = None - grad2_input = None - grad2_grid = None - - if ctx.needs_input_grad[0]: - grad2_grad_output = _GridSample2dForward.apply( - grad2_grad_input, grid) - - assert not ctx.needs_input_grad[2] - return grad2_grad_output, grad2_input, grad2_grid - -# ---------------------------------------------------------------------------- diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/pix2pix.md b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/pix2pix.md deleted file mode 100644 index 08990048e80bb96ca441abf981be817133a09b81..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/pix2pix.md +++ /dev/null @@ -1,38 +0,0 @@ - - -# InstructPix2Pix - -[InstructPix2Pix: Learning to Follow Image Editing Instructions](https://huggingface.co/papers/2211.09800) is by Tim Brooks, Aleksander Holynski and Alexei A. Efros. - -The abstract from the paper is: - -*We propose a method for editing images from human instructions: given an input image and a written instruction that tells the model what to do, our model follows these instructions to edit the image. To obtain training data for this problem, we combine the knowledge of two large pretrained models -- a language model (GPT-3) and a text-to-image model (Stable Diffusion) -- to generate a large dataset of image editing examples. Our conditional diffusion model, InstructPix2Pix, is trained on our generated data, and generalizes to real images and user-written instructions at inference time. Since it performs edits in the forward pass and does not require per example fine-tuning or inversion, our model edits images quickly, in a matter of seconds. We show compelling editing results for a diverse collection of input images and written instructions.* - -You can find additional information about InstructPix2Pix on the [project page](https://www.timothybrooks.com/instruct-pix2pix), [original codebase](https://github.com/timothybrooks/instruct-pix2pix), and try it out in a [demo](https://huggingface.co/spaces/timbrooks/instruct-pix2pix). - - - -Make sure to check out the Schedulers [guide](/using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](/using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines. - - - -## StableDiffusionInstructPix2PixPipeline -[[autodoc]] StableDiffusionInstructPix2PixPipeline - - __call__ - - all - - load_textual_inversion - - load_lora_weights - - save_lora_weights - -## StableDiffusionPipelineOutput -[[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput \ No newline at end of file diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/stable_diffusion/stable_diffusion_safe.md b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/stable_diffusion/stable_diffusion_safe.md deleted file mode 100644 index 217434c6b6698462d1bc5db0f7c9f6d8590121b9..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/stable_diffusion/stable_diffusion_safe.md +++ /dev/null @@ -1,61 +0,0 @@ - - -# Safe Stable Diffusion - -Safe Stable Diffusion was proposed in [Safe Latent Diffusion: Mitigating Inappropriate Degeneration in Diffusion Models](https://huggingface.co/papers/2211.05105) and mitigates inappropriate degeneration from Stable Diffusion models because they're trained on unfiltered web-crawled datasets. For instance Stable Diffusion may unexpectedly generate nudity, violence, images depicting self-harm, and otherwise offensive content. Safe Stable Diffusion is an extension of Stable Diffusion that drastically reduces this type of content. - -The abstract from the paper is: - -*Text-conditioned image generation models have recently achieved astonishing results in image quality and text alignment and are consequently employed in a fast-growing number of applications. Since they are highly data-driven, relying on billion-sized datasets randomly scraped from the internet, they also suffer, as we demonstrate, from degenerated and biased human behavior. In turn, they may even reinforce such biases. To help combat these undesired side effects, we present safe latent diffusion (SLD). Specifically, to measure the inappropriate degeneration due to unfiltered and imbalanced training sets, we establish a novel image generation test bed-inappropriate image prompts (I2P)-containing dedicated, real-world image-to-text prompts covering concepts such as nudity and violence. As our exhaustive empirical evaluation demonstrates, the introduced SLD removes and suppresses inappropriate image parts during the diffusion process, with no additional training required and no adverse effect on overall image quality or text alignment.* - -## Tips - -Use the `safety_concept` property of [`StableDiffusionPipelineSafe`] to check and edit the current safety concept: - -```python ->>> from diffusers import StableDiffusionPipelineSafe - ->>> pipeline = StableDiffusionPipelineSafe.from_pretrained("AIML-TUDA/stable-diffusion-safe") ->>> pipeline.safety_concept -'an image showing hate, harassment, violence, suffering, humiliation, harm, suicide, sexual, nudity, bodily fluids, blood, obscene gestures, illegal activity, drug use, theft, vandalism, weapons, child abuse, brutality, cruelty' -``` -For each image generation the active concept is also contained in [`StableDiffusionSafePipelineOutput`]. - -There are 4 configurations (`SafetyConfig.WEAK`, `SafetyConfig.MEDIUM`, `SafetyConfig.STRONG`, and `SafetyConfig.MAX`) that can be applied: - -```python ->>> from diffusers import StableDiffusionPipelineSafe ->>> from diffusers.pipelines.stable_diffusion_safe import SafetyConfig - ->>> pipeline = StableDiffusionPipelineSafe.from_pretrained("AIML-TUDA/stable-diffusion-safe") ->>> prompt = "the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c. leyendecker" ->>> out = pipeline(prompt=prompt, **SafetyConfig.MAX) -``` - - - -Make sure to check out the Stable Diffusion [Tips](overview#tips) section to learn how to explore the tradeoff between scheduler speed and quality, and how to reuse pipeline components efficiently! - - - -## StableDiffusionPipelineSafe - -[[autodoc]] StableDiffusionPipelineSafe - - all - - __call__ - -## StableDiffusionSafePipelineOutput - -[[autodoc]] pipelines.stable_diffusion_safe.StableDiffusionSafePipelineOutput - - all - - __call__ diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/training/text2image.md b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/training/text2image.md deleted file mode 100644 index 069388603124bc6f02b3c11f9b2dbe630909f0ec..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/training/text2image.md +++ /dev/null @@ -1,224 +0,0 @@ - - - -# Text-to-image - - - -text-to-image 파인튜닝 스크립트는 experimental 상태입니다. 과적합하기 쉽고 치명적인 망각과 같은 문제에 부딪히기 쉽습니다. 자체 데이터셋에서 최상의 결과를 얻으려면 다양한 하이퍼파라미터를 탐색하는 것이 좋습니다. - - - -Stable Diffusion과 같은 text-to-image 모델은 텍스트 프롬프트에서 이미지를 생성합니다. 이 가이드는 PyTorch 및 Flax를 사용하여 자체 데이터셋에서 [`CompVis/stable-diffusion-v1-4`](https://huggingface.co/CompVis/stable-diffusion-v1-4) 모델로 파인튜닝하는 방법을 보여줍니다. 이 가이드에 사용된 text-to-image 파인튜닝을 위한 모든 학습 스크립트에 관심이 있는 경우 이 [리포지토리](https://github.com/huggingface/diffusers/tree/main/examples/text_to_image)에서 자세히 찾을 수 있습니다. - -스크립트를 실행하기 전에, 라이브러리의 학습 dependency들을 설치해야 합니다: - -```bash -pip install git+https://github.com/huggingface/diffusers.git -pip install -U -r requirements.txt -``` - -그리고 [🤗Accelerate](https://github.com/huggingface/accelerate/) 환경을 초기화합니다: - -```bash -accelerate config -``` - -리포지토리를 이미 복제한 경우, 이 단계를 수행할 필요가 없습니다. 대신, 로컬 체크아웃 경로를 학습 스크립트에 명시할 수 있으며 거기에서 로드됩니다. - -### 하드웨어 요구 사항 - -`gradient_checkpointing` 및 `mixed_precision`을 사용하면 단일 24GB GPU에서 모델을 파인튜닝할 수 있습니다. 더 높은 `batch_size`와 더 빠른 훈련을 위해서는 GPU 메모리가 30GB 이상인 GPU를 사용하는 것이 좋습니다. TPU 또는 GPU에서 파인튜닝을 위해 JAX나 Flax를 사용할 수도 있습니다. 자세한 내용은 [아래](#flax-jax-finetuning)를 참조하세요. - -xFormers로 memory efficient attention을 활성화하여 메모리 사용량 훨씬 더 줄일 수 있습니다. [xFormers가 설치](./optimization/xformers)되어 있는지 확인하고 `--enable_xformers_memory_efficient_attention`를 학습 스크립트에 명시합니다. - -xFormers는 Flax에 사용할 수 없습니다. - -## Hub에 모델 업로드하기 - -학습 스크립트에 다음 인수를 추가하여 모델을 허브에 저장합니다: - -```bash - --push_to_hub -``` - - -## 체크포인트 저장 및 불러오기 - -학습 중 발생할 수 있는 일에 대비하여 정기적으로 체크포인트를 저장해 두는 것이 좋습니다. 체크포인트를 저장하려면 학습 스크립트에 다음 인수를 명시합니다. - -```bash - --checkpointing_steps=500 -``` - -500스텝마다 전체 학습 state가 'output_dir'의 하위 폴더에 저장됩니다. 체크포인트는 'checkpoint-'에 지금까지 학습된 step 수입니다. 예를 들어 'checkpoint-1500'은 1500 학습 step 후에 저장된 체크포인트입니다. - -학습을 재개하기 위해 체크포인트를 불러오려면 '--resume_from_checkpoint' 인수를 학습 스크립트에 명시하고 재개할 체크포인트를 지정하십시오. 예를 들어 다음 인수는 1500개의 학습 step 후에 저장된 체크포인트에서부터 훈련을 재개합니다. - -```bash - --resume_from_checkpoint="checkpoint-1500" -``` - -## 파인튜닝 - - - -다음과 같이 [Pokémon BLIP 캡션](https://huggingface.co/datasets/lambdalabs/pokemon-blip-captions) 데이터셋에서 파인튜닝 실행을 위해 [PyTorch 학습 스크립트](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image.py)를 실행합니다: - - -```bash -export MODEL_NAME="CompVis/stable-diffusion-v1-4" -export dataset_name="lambdalabs/pokemon-blip-captions" - -accelerate launch train_text_to_image.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --dataset_name=$dataset_name \ - --use_ema \ - --resolution=512 --center_crop --random_flip \ - --train_batch_size=1 \ - --gradient_accumulation_steps=4 \ - --gradient_checkpointing \ - --mixed_precision="fp16" \ - --max_train_steps=15000 \ - --learning_rate=1e-05 \ - --max_grad_norm=1 \ - --lr_scheduler="constant" --lr_warmup_steps=0 \ - --output_dir="sd-pokemon-model" -``` - -자체 데이터셋으로 파인튜닝하려면 🤗 [Datasets](https://huggingface.co/docs/datasets/index)에서 요구하는 형식에 따라 데이터셋을 준비하세요. [데이터셋을 허브에 업로드](https://huggingface.co/docs/datasets/image_dataset#upload-dataset-to-the-hub)하거나 [파일들이 있는 로컬 폴더를 준비](https ://huggingface.co/docs/datasets/image_dataset#imagefolder)할 수 있습니다. - -사용자 커스텀 loading logic을 사용하려면 스크립트를 수정하십시오. 도움이 되도록 코드의 적절한 위치에 포인터를 남겼습니다. 🤗 아래 예제 스크립트는 `TRAIN_DIR`의 로컬 데이터셋으로를 파인튜닝하는 방법과 `OUTPUT_DIR`에서 모델을 저장할 위치를 보여줍니다: - - -```bash -export MODEL_NAME="CompVis/stable-diffusion-v1-4" -export TRAIN_DIR="path_to_your_dataset" -export OUTPUT_DIR="path_to_save_model" - -accelerate launch train_text_to_image.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --train_data_dir=$TRAIN_DIR \ - --use_ema \ - --resolution=512 --center_crop --random_flip \ - --train_batch_size=1 \ - --gradient_accumulation_steps=4 \ - --gradient_checkpointing \ - --mixed_precision="fp16" \ - --max_train_steps=15000 \ - --learning_rate=1e-05 \ - --max_grad_norm=1 \ - --lr_scheduler="constant" --lr_warmup_steps=0 \ - --output_dir=${OUTPUT_DIR} -``` - - - -[@duongna211](https://github.com/duongna21)의 기여로, Flax를 사용해 TPU 및 GPU에서 Stable Diffusion 모델을 더 빠르게 학습할 수 있습니다. 이는 TPU 하드웨어에서 매우 효율적이지만 GPU에서도 훌륭하게 작동합니다. Flax 학습 스크립트는 gradient checkpointing나 gradient accumulation과 같은 기능을 아직 지원하지 않으므로 메모리가 30GB 이상인 GPU 또는 TPU v3가 필요합니다. - -스크립트를 실행하기 전에 요구 사항이 설치되어 있는지 확인하십시오: - -```bash -pip install -U -r requirements_flax.txt -``` - -그러면 다음과 같이 [Flax 학습 스크립트](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_flax.py)를 실행할 수 있습니다. - -```bash -export MODEL_NAME="runwayml/stable-diffusion-v1-5" -export dataset_name="lambdalabs/pokemon-blip-captions" - -python train_text_to_image_flax.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --dataset_name=$dataset_name \ - --resolution=512 --center_crop --random_flip \ - --train_batch_size=1 \ - --max_train_steps=15000 \ - --learning_rate=1e-05 \ - --max_grad_norm=1 \ - --output_dir="sd-pokemon-model" -``` - -자체 데이터셋으로 파인튜닝하려면 🤗 [Datasets](https://huggingface.co/docs/datasets/index)에서 요구하는 형식에 따라 데이터셋을 준비하세요. [데이터셋을 허브에 업로드](https://huggingface.co/docs/datasets/image_dataset#upload-dataset-to-the-hub)하거나 [파일들이 있는 로컬 폴더를 준비](https ://huggingface.co/docs/datasets/image_dataset#imagefolder)할 수 있습니다. - -사용자 커스텀 loading logic을 사용하려면 스크립트를 수정하십시오. 도움이 되도록 코드의 적절한 위치에 포인터를 남겼습니다. 🤗 아래 예제 스크립트는 `TRAIN_DIR`의 로컬 데이터셋으로를 파인튜닝하는 방법을 보여줍니다: - -```bash -export MODEL_NAME="duongna/stable-diffusion-v1-4-flax" -export TRAIN_DIR="path_to_your_dataset" - -python train_text_to_image_flax.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --train_data_dir=$TRAIN_DIR \ - --resolution=512 --center_crop --random_flip \ - --train_batch_size=1 \ - --mixed_precision="fp16" \ - --max_train_steps=15000 \ - --learning_rate=1e-05 \ - --max_grad_norm=1 \ - --output_dir="sd-pokemon-model" -``` - - - -## LoRA - -Text-to-image 모델 파인튜닝을 위해, 대규모 모델 학습을 가속화하기 위한 파인튜닝 기술인 LoRA(Low-Rank Adaptation of Large Language Models)를 사용할 수 있습니다. 자세한 내용은 [LoRA 학습](lora#text-to-image) 가이드를 참조하세요. - -## 추론 - -허브의 모델 경로 또는 모델 이름을 [`StableDiffusionPipeline`]에 전달하여 추론을 위해 파인 튜닝된 모델을 불러올 수 있습니다: - - - -```python -from diffusers import StableDiffusionPipeline - -model_path = "path_to_saved_model" -pipe = StableDiffusionPipeline.from_pretrained(model_path, torch_dtype=torch.float16) -pipe.to("cuda") - -image = pipe(prompt="yoda").images[0] -image.save("yoda-pokemon.png") -``` - - -```python -import jax -import numpy as np -from flax.jax_utils import replicate -from flax.training.common_utils import shard -from diffusers import FlaxStableDiffusionPipeline - -model_path = "path_to_saved_model" -pipe, params = FlaxStableDiffusionPipeline.from_pretrained(model_path, dtype=jax.numpy.bfloat16) - -prompt = "yoda pokemon" -prng_seed = jax.random.PRNGKey(0) -num_inference_steps = 50 - -num_samples = jax.device_count() -prompt = num_samples * [prompt] -prompt_ids = pipeline.prepare_inputs(prompt) - -# shard inputs and rng -params = replicate(params) -prng_seed = jax.random.split(prng_seed, jax.device_count()) -prompt_ids = shard(prompt_ids) - -images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images -images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:]))) -image.save("yoda-pokemon.png") -``` - - \ No newline at end of file diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion.py deleted file mode 100644 index 68c720ab2ad0e65486d911ce63bd5a2ce5361405..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion.py +++ /dev/null @@ -1,421 +0,0 @@ -import inspect -from typing import Callable, List, Optional, Union - -import PIL.Image -import torch -from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModel - -from ...models import AutoencoderKL, UNet2DConditionModel -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import logging -from ..pipeline_utils import DiffusionPipeline -from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline -from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline -from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline - - -logger = logging.get_logger(__name__) # pylint: disable=invalid-name - - -class VersatileDiffusionPipeline(DiffusionPipeline): - r""" - Pipeline for text-to-image generation using Stable Diffusion. - - This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods - implemented for all pipelines (downloading, saving, running on a particular device, etc.). - - Args: - vae ([`AutoencoderKL`]): - Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. - text_encoder ([`~transformers.CLIPTextModel`]): - Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). - tokenizer ([`~transformers.CLIPTokenizer`]): - A `CLIPTokenizer` to tokenize text. - unet ([`UNet2DConditionModel`]): - A `UNet2DConditionModel` to denoise the encoded image latents. - scheduler ([`SchedulerMixin`]): - A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of - [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. - safety_checker ([`StableDiffusionSafetyChecker`]): - Classification module that estimates whether generated images could be considered offensive or harmful. - Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details - about a model's potential harms. - feature_extractor ([`~transformers.CLIPImageProcessor`]): - A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. - """ - - tokenizer: CLIPTokenizer - image_feature_extractor: CLIPImageProcessor - text_encoder: CLIPTextModel - image_encoder: CLIPVisionModel - image_unet: UNet2DConditionModel - text_unet: UNet2DConditionModel - vae: AutoencoderKL - scheduler: KarrasDiffusionSchedulers - - def __init__( - self, - tokenizer: CLIPTokenizer, - image_feature_extractor: CLIPImageProcessor, - text_encoder: CLIPTextModel, - image_encoder: CLIPVisionModel, - image_unet: UNet2DConditionModel, - text_unet: UNet2DConditionModel, - vae: AutoencoderKL, - scheduler: KarrasDiffusionSchedulers, - ): - super().__init__() - - self.register_modules( - tokenizer=tokenizer, - image_feature_extractor=image_feature_extractor, - text_encoder=text_encoder, - image_encoder=image_encoder, - image_unet=image_unet, - text_unet=text_unet, - vae=vae, - scheduler=scheduler, - ) - self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) - - @torch.no_grad() - def image_variation( - self, - image: Union[torch.FloatTensor, PIL.Image.Image], - height: Optional[int] = None, - width: Optional[int] = None, - num_inference_steps: int = 50, - guidance_scale: float = 7.5, - negative_prompt: Optional[Union[str, List[str]]] = None, - num_images_per_prompt: Optional[int] = 1, - eta: float = 0.0, - generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, - latents: Optional[torch.FloatTensor] = None, - output_type: Optional[str] = "pil", - return_dict: bool = True, - callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, - callback_steps: int = 1, - ): - r""" - The call function to the pipeline for generation. - - Args: - image (`PIL.Image.Image`, `List[PIL.Image.Image]` or `torch.Tensor`): - The image prompt or prompts to guide the image generation. - height (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): - The height in pixels of the generated image. - width (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): - The width in pixels of the generated image. - num_inference_steps (`int`, *optional*, defaults to 50): - The number of denoising steps. More denoising steps usually lead to a higher quality image at the - expense of slower inference. - guidance_scale (`float`, *optional*, defaults to 7.5): - A higher guidance scale value encourages the model to generate images closely linked to the text - `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. - negative_prompt (`str` or `List[str]`, *optional*): - The prompt or prompts to guide what to not include in image generation. If not defined, you need to - pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). - num_images_per_prompt (`int`, *optional*, defaults to 1): - The number of images to generate per prompt. - eta (`float`, *optional*, defaults to 0.0): - Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies - to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. - generator (`torch.Generator`, *optional*): - A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make - generation deterministic. - latents (`torch.FloatTensor`, *optional*): - Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image - generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor is generated by sampling using the supplied random `generator`. - output_type (`str`, *optional*, defaults to `"pil"`): - The output format of the generated image. Choose between `PIL.Image` or `np.array`. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a - plain tuple. - callback (`Callable`, *optional*): - A function that calls every `callback_steps` steps during inference. The function is called with the - following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. - callback_steps (`int`, *optional*, defaults to 1): - The frequency at which the `callback` function is called. If not specified, the callback is called at - every step. - - Examples: - - ```py - >>> from diffusers import VersatileDiffusionPipeline - >>> import torch - >>> import requests - >>> from io import BytesIO - >>> from PIL import Image - - >>> # let's download an initial image - >>> url = "https://huggingface.co/datasets/diffusers/images/resolve/main/benz.jpg" - - >>> response = requests.get(url) - >>> image = Image.open(BytesIO(response.content)).convert("RGB") - - >>> pipe = VersatileDiffusionPipeline.from_pretrained( - ... "shi-labs/versatile-diffusion", torch_dtype=torch.float16 - ... ) - >>> pipe = pipe.to("cuda") - - >>> generator = torch.Generator(device="cuda").manual_seed(0) - >>> image = pipe.image_variation(image, generator=generator).images[0] - >>> image.save("./car_variation.png") - ``` - - Returns: - [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: - If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, - otherwise a `tuple` is returned where the first element is a list with the generated images and the - second element is a list of `bool`s indicating whether the corresponding generated image contains - "not-safe-for-work" (nsfw) content. - """ - expected_components = inspect.signature(VersatileDiffusionImageVariationPipeline.__init__).parameters.keys() - components = {name: component for name, component in self.components.items() if name in expected_components} - return VersatileDiffusionImageVariationPipeline(**components)( - image=image, - height=height, - width=width, - num_inference_steps=num_inference_steps, - guidance_scale=guidance_scale, - negative_prompt=negative_prompt, - num_images_per_prompt=num_images_per_prompt, - eta=eta, - generator=generator, - latents=latents, - output_type=output_type, - return_dict=return_dict, - callback=callback, - callback_steps=callback_steps, - ) - - @torch.no_grad() - def text_to_image( - self, - prompt: Union[str, List[str]], - height: Optional[int] = None, - width: Optional[int] = None, - num_inference_steps: int = 50, - guidance_scale: float = 7.5, - negative_prompt: Optional[Union[str, List[str]]] = None, - num_images_per_prompt: Optional[int] = 1, - eta: float = 0.0, - generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, - latents: Optional[torch.FloatTensor] = None, - output_type: Optional[str] = "pil", - return_dict: bool = True, - callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, - callback_steps: int = 1, - ): - r""" - The call function to the pipeline for generation. - - Args: - prompt (`str` or `List[str]`): - The prompt or prompts to guide image generation. - height (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): - The height in pixels of the generated image. - width (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): - The width in pixels of the generated image. - num_inference_steps (`int`, *optional*, defaults to 50): - The number of denoising steps. More denoising steps usually lead to a higher quality image at the - expense of slower inference. - guidance_scale (`float`, *optional*, defaults to 7.5): - A higher guidance scale value encourages the model to generate images closely linked to the text - `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. - negative_prompt (`str` or `List[str]`, *optional*): - The prompt or prompts to guide what to not include in image generation. If not defined, you need to - pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). - num_images_per_prompt (`int`, *optional*, defaults to 1): - The number of images to generate per prompt. - eta (`float`, *optional*, defaults to 0.0): - Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies - to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. - generator (`torch.Generator`, *optional*): - A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make - generation deterministic. - latents (`torch.FloatTensor`, *optional*): - Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image - generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor is generated by sampling using the supplied random `generator`. - output_type (`str`, *optional*, defaults to `"pil"`): - The output format of the generated image. Choose between `PIL.Image` or `np.array`. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a - plain tuple. - callback (`Callable`, *optional*): - A function that calls every `callback_steps` steps during inference. The function is called with the - following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. - callback_steps (`int`, *optional*, defaults to 1): - The frequency at which the `callback` function is called. If not specified, the callback is called at - every step. - - Examples: - - ```py - >>> from diffusers import VersatileDiffusionPipeline - >>> import torch - - >>> pipe = VersatileDiffusionPipeline.from_pretrained( - ... "shi-labs/versatile-diffusion", torch_dtype=torch.float16 - ... ) - >>> pipe = pipe.to("cuda") - - >>> generator = torch.Generator(device="cuda").manual_seed(0) - >>> image = pipe.text_to_image("an astronaut riding on a horse on mars", generator=generator).images[0] - >>> image.save("./astronaut.png") - ``` - - Returns: - [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: - If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, - otherwise a `tuple` is returned where the first element is a list with the generated images and the - second element is a list of `bool`s indicating whether the corresponding generated image contains - "not-safe-for-work" (nsfw) content. - """ - expected_components = inspect.signature(VersatileDiffusionTextToImagePipeline.__init__).parameters.keys() - components = {name: component for name, component in self.components.items() if name in expected_components} - temp_pipeline = VersatileDiffusionTextToImagePipeline(**components) - output = temp_pipeline( - prompt=prompt, - height=height, - width=width, - num_inference_steps=num_inference_steps, - guidance_scale=guidance_scale, - negative_prompt=negative_prompt, - num_images_per_prompt=num_images_per_prompt, - eta=eta, - generator=generator, - latents=latents, - output_type=output_type, - return_dict=return_dict, - callback=callback, - callback_steps=callback_steps, - ) - # swap the attention blocks back to the original state - temp_pipeline._swap_unet_attention_blocks() - - return output - - @torch.no_grad() - def dual_guided( - self, - prompt: Union[PIL.Image.Image, List[PIL.Image.Image]], - image: Union[str, List[str]], - text_to_image_strength: float = 0.5, - height: Optional[int] = None, - width: Optional[int] = None, - num_inference_steps: int = 50, - guidance_scale: float = 7.5, - num_images_per_prompt: Optional[int] = 1, - eta: float = 0.0, - generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, - latents: Optional[torch.FloatTensor] = None, - output_type: Optional[str] = "pil", - return_dict: bool = True, - callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, - callback_steps: int = 1, - ): - r""" - The call function to the pipeline for generation. - - Args: - prompt (`str` or `List[str]`): - The prompt or prompts to guide image generation. - height (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): - The height in pixels of the generated image. - width (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): - The width in pixels of the generated image. - num_inference_steps (`int`, *optional*, defaults to 50): - The number of denoising steps. More denoising steps usually lead to a higher quality image at the - expense of slower inference. - guidance_scale (`float`, *optional*, defaults to 7.5): - A higher guidance scale value encourages the model to generate images closely linked to the text - `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. - negative_prompt (`str` or `List[str]`, *optional*): - The prompt or prompts to guide what to not include in image generation. If not defined, you need to - pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). - num_images_per_prompt (`int`, *optional*, defaults to 1): - The number of images to generate per prompt. - eta (`float`, *optional*, defaults to 0.0): - Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies - to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. - generator (`torch.Generator` or `List[torch.Generator]`, *optional*): - A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make - generation deterministic. - latents (`torch.FloatTensor`, *optional*): - Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image - generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor is generated by sampling using the supplied random `generator`. - output_type (`str`, *optional*, defaults to `"pil"`): - The output format of the generated image. Choose between `PIL.Image` or `np.array`. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a - plain tuple. - callback (`Callable`, *optional*): - A function that calls every `callback_steps` steps during inference. The function is called with the - following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. - callback_steps (`int`, *optional*, defaults to 1): - The frequency at which the `callback` function is called. If not specified, the callback is called at - every step. - - Examples: - - ```py - >>> from diffusers import VersatileDiffusionPipeline - >>> import torch - >>> import requests - >>> from io import BytesIO - >>> from PIL import Image - - >>> # let's download an initial image - >>> url = "https://huggingface.co/datasets/diffusers/images/resolve/main/benz.jpg" - - >>> response = requests.get(url) - >>> image = Image.open(BytesIO(response.content)).convert("RGB") - >>> text = "a red car in the sun" - - >>> pipe = VersatileDiffusionPipeline.from_pretrained( - ... "shi-labs/versatile-diffusion", torch_dtype=torch.float16 - ... ) - >>> pipe = pipe.to("cuda") - - >>> generator = torch.Generator(device="cuda").manual_seed(0) - >>> text_to_image_strength = 0.75 - - >>> image = pipe.dual_guided( - ... prompt=text, image=image, text_to_image_strength=text_to_image_strength, generator=generator - ... ).images[0] - >>> image.save("./car_variation.png") - ``` - - Returns: - [`~pipelines.ImagePipelineOutput`] or `tuple`: - If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is - returned where the first element is a list with the generated images. - """ - - expected_components = inspect.signature(VersatileDiffusionDualGuidedPipeline.__init__).parameters.keys() - components = {name: component for name, component in self.components.items() if name in expected_components} - temp_pipeline = VersatileDiffusionDualGuidedPipeline(**components) - output = temp_pipeline( - prompt=prompt, - image=image, - text_to_image_strength=text_to_image_strength, - height=height, - width=width, - num_inference_steps=num_inference_steps, - guidance_scale=guidance_scale, - num_images_per_prompt=num_images_per_prompt, - eta=eta, - generator=generator, - latents=latents, - output_type=output_type, - return_dict=return_dict, - callback=callback, - callback_steps=callback_steps, - ) - temp_pipeline._revert_dual_attention() - - return output diff --git a/spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/CLIP/clip/clip.py b/spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/CLIP/clip/clip.py deleted file mode 100644 index fe4d86470fd52f0461ab1bb403777d5b86d3eab9..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/CLIP/clip/clip.py +++ /dev/null @@ -1,225 +0,0 @@ -import hashlib -import os -import urllib -import warnings -from typing import Any, Union, List - -import torch -from PIL import Image -from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize -from tqdm import tqdm - -from .model import build_model -from .simple_tokenizer import SimpleTokenizer as _Tokenizer - -try: - from torchvision.transforms import InterpolationMode - BICUBIC = InterpolationMode.BICUBIC -except ImportError: - BICUBIC = Image.BICUBIC - - -if torch.__version__.split(".") < ["1", "7", "1"]: - warnings.warn("PyTorch version 1.7.1 or higher is recommended") - - -__all__ = ["available_models", "load", "tokenize"] -_tokenizer = _Tokenizer() - -_MODELS = { - "RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt", - "RN101": "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt", - "RN50x4": "https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt", - "RN50x16": "https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt", - "ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt", - "ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt", -} - - -def _download(url: str, root: str): - os.makedirs(root, exist_ok=True) - filename = os.path.basename(url) - - expected_sha256 = url.split("/")[-2] - download_target = os.path.join(root, filename) - - if os.path.exists(download_target) and not os.path.isfile(download_target): - raise RuntimeError(f"{download_target} exists and is not a regular file") - - if os.path.isfile(download_target): - if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256: - return download_target - else: - warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file") - import pdb - pdb.set_trace() - with urllib.request.urlopen(url) as source, open(download_target, "wb") as output: - with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True, unit_divisor=1024) as loop: - while True: - buffer = source.read(8192) - if not buffer: - break - - output.write(buffer) - loop.update(len(buffer)) - - if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256: - raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match") - - return download_target - - -def _transform(n_px): - return Compose([ - Resize(n_px, interpolation=BICUBIC), - CenterCrop(n_px), - lambda image: image.convert("RGB"), - ToTensor(), - Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)), - ]) - - -def available_models() -> List[str]: - """Returns the names of available CLIP models""" - return list(_MODELS.keys()) - - -def load(name: str, device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", jit: bool = False, download_root: str = None): - """Load a CLIP model - - Parameters - ---------- - name : str - A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict - - device : Union[str, torch.device] - The device to put the loaded model - - jit : bool - Whether to load the optimized JIT model or more hackable non-JIT model (default). - - download_root: str - path to download the model files; by default, it uses "~/.cache/clip" - - Returns - ------- - model : torch.nn.Module - The CLIP model - - preprocess : Callable[[PIL.Image], torch.Tensor] - A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input - """ - if name in _MODELS: - model_path = _download(_MODELS[name], download_root or os.path.expanduser("~/.cache/clip")) - elif os.path.isfile(name): - model_path = name - else: - raise RuntimeError(f"Model {name} not found; available models = {available_models()}") - - try: - # loading JIT archive - model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval() - state_dict = None - except RuntimeError: - # loading saved state dict - if jit: - warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead") - jit = False - state_dict = torch.load(model_path, map_location="cpu") - - if not jit: - model = build_model(state_dict or model.state_dict()).to(device) - if str(device) == "cpu": - model.float() - return model, _transform(model.visual.input_resolution) - - # patch the device names - device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[]) - device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1] - - def patch_device(module): - try: - graphs = [module.graph] if hasattr(module, "graph") else [] - except RuntimeError: - graphs = [] - - if hasattr(module, "forward1"): - graphs.append(module.forward1.graph) - - for graph in graphs: - for node in graph.findAllNodes("prim::Constant"): - if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"): - node.copyAttributes(device_node) - - model.apply(patch_device) - patch_device(model.encode_image) - patch_device(model.encode_text) - - # patch dtype to float32 on CPU - if str(device) == "cpu": - float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[]) - float_input = list(float_holder.graph.findNode("aten::to").inputs())[1] - float_node = float_input.node() - - def patch_float(module): - try: - graphs = [module.graph] if hasattr(module, "graph") else [] - except RuntimeError: - graphs = [] - - if hasattr(module, "forward1"): - graphs.append(module.forward1.graph) - - for graph in graphs: - for node in graph.findAllNodes("aten::to"): - inputs = list(node.inputs()) - for i in [1, 2]: # dtype can be the second or third argument to aten::to() - if inputs[i].node()["value"] == 5: - inputs[i].node().copyAttributes(float_node) - - model.apply(patch_float) - patch_float(model.encode_image) - patch_float(model.encode_text) - - model.float() - - return model, _transform(model.input_resolution.item()) - - -def tokenize(texts: Union[str, List[str]], context_length: int = 77, truncate: bool = False) -> torch.LongTensor: - """ - Returns the tokenized representation of given input string(s) - - Parameters - ---------- - texts : Union[str, List[str]] - An input string or a list of input strings to tokenize - - context_length : int - The context length to use; all CLIP models use 77 as the context length - - truncate: bool - Whether to truncate the text in case its encoding is longer than the context length - - Returns - ------- - A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length] - """ - if isinstance(texts, str): - texts = [texts] - - sot_token = _tokenizer.encoder["<|startoftext|>"] - eot_token = _tokenizer.encoder["<|endoftext|>"] - all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts] - result = torch.zeros(len(all_tokens), context_length, dtype=torch.long) - - for i, tokens in enumerate(all_tokens): - if len(tokens) > context_length: - if truncate: - tokens = tokens[:context_length] - tokens[-1] = eot_token - else: - raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}") - result[i, :len(tokens)] = torch.tensor(tokens) - - return result diff --git a/spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/scripts/super_res_sample.py b/spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/scripts/super_res_sample.py deleted file mode 100644 index 93400c4a8f66aa808595c126fe2930a7d554781c..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/scripts/super_res_sample.py +++ /dev/null @@ -1,119 +0,0 @@ -""" -Generate a large batch of samples from a super resolution model, given a batch -of samples from a regular model from image_sample.py. -""" - -import argparse -import os - -import blobfile as bf -import numpy as np -import torch as th -import torch.distributed as dist - -from guided_diffusion import dist_util, logger -from guided_diffusion.script_util import ( - sr_model_and_diffusion_defaults, - sr_create_model_and_diffusion, - args_to_dict, - add_dict_to_argparser, -) - - -def main(): - args = create_argparser().parse_args() - - dist_util.setup_dist() - logger.configure() - - logger.log("creating model...") - model, diffusion = sr_create_model_and_diffusion( - **args_to_dict(args, sr_model_and_diffusion_defaults().keys()) - ) - model.load_state_dict( - dist_util.load_state_dict(args.model_path, map_location="cpu") - ) - model.to(dist_util.dev()) - if args.use_fp16: - model.convert_to_fp16() - model.eval() - - logger.log("loading data...") - data = load_data_for_worker(args.base_samples, args.batch_size, args.class_cond) - - logger.log("creating samples...") - all_images = [] - while len(all_images) * args.batch_size < args.num_samples: - model_kwargs = next(data) - model_kwargs = {k: v.to(dist_util.dev()) for k, v in model_kwargs.items()} - sample = diffusion.p_sample_loop( - model, - (args.batch_size, 3, args.large_size, args.large_size), - clip_denoised=args.clip_denoised, - model_kwargs=model_kwargs, - ) - sample = ((sample + 1) * 127.5).clamp(0, 255).to(th.uint8) - sample = sample.permute(0, 2, 3, 1) - sample = sample.contiguous() - - all_samples = [th.zeros_like(sample) for _ in range(dist.get_world_size())] - dist.all_gather(all_samples, sample) # gather not supported with NCCL - for sample in all_samples: - all_images.append(sample.cpu().numpy()) - logger.log(f"created {len(all_images) * args.batch_size} samples") - - arr = np.concatenate(all_images, axis=0) - arr = arr[: args.num_samples] - if dist.get_rank() == 0: - shape_str = "x".join([str(x) for x in arr.shape]) - out_path = os.path.join(logger.get_dir(), f"samples_{shape_str}.npz") - logger.log(f"saving to {out_path}") - np.savez(out_path, arr) - - dist.barrier() - logger.log("sampling complete") - - -def load_data_for_worker(base_samples, batch_size, class_cond): - with bf.BlobFile(base_samples, "rb") as f: - obj = np.load(f) - image_arr = obj["arr_0"] - if class_cond: - label_arr = obj["arr_1"] - rank = dist.get_rank() - num_ranks = dist.get_world_size() - buffer = [] - label_buffer = [] - while True: - for i in range(rank, len(image_arr), num_ranks): - buffer.append(image_arr[i]) - if class_cond: - label_buffer.append(label_arr[i]) - if len(buffer) == batch_size: - batch = th.from_numpy(np.stack(buffer)).float() - batch = batch / 127.5 - 1.0 - batch = batch.permute(0, 3, 1, 2) - res = dict(low_res=batch) - if class_cond: - res["y"] = th.from_numpy(np.stack(label_buffer)) - yield res - buffer, label_buffer = [], [] - - -def create_argparser(): - defaults = dict( - clip_denoised=True, - num_samples=10000, - batch_size=16, - use_ddim=False, - base_samples="", - model_path="", - ) - defaults.update(sr_model_and_diffusion_defaults()) - parser = argparse.ArgumentParser() - add_dict_to_argparser(parser, defaults) - return parser - - -if __name__ == "__main__": - main() diff --git a/spaces/AnthonyTruchetPoC/persistent-docker/tests/conftest.py b/spaces/AnthonyTruchetPoC/persistent-docker/tests/conftest.py deleted file mode 100644 index 741008eb8d261a0f16b3d191be8a6c94eb70a9e2..0000000000000000000000000000000000000000 --- a/spaces/AnthonyTruchetPoC/persistent-docker/tests/conftest.py +++ /dev/null @@ -1,11 +0,0 @@ -# conftest.py -import pytest - - -@pytest.fixture -def unstub(): - """Ensure calls patched by mockito are cleared after each test""" - from mockito import unstub - - yield - unstub() diff --git a/spaces/Ashrafb/translate/tokenization_small100.py b/spaces/Ashrafb/translate/tokenization_small100.py deleted file mode 100644 index e3b71e1f12c1c98bf545b60025c40ccf9ff76955..0000000000000000000000000000000000000000 --- a/spaces/Ashrafb/translate/tokenization_small100.py +++ /dev/null @@ -1,364 +0,0 @@ -# Copyright (c) 2022 Idiap Research Institute, http://www.idiap.ch/ -# Written by Alireza Mohammadshahi -# This is a modified version of https://github.com/huggingface/transformers/blob/main/src/transformers/models/m2m_100/tokenization_m2m_100.py -# which owns by Fariseq Authors and The HuggingFace Inc. team. -# -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Tokenization classes for SMALL100.""" -import json -import os -from pathlib import Path -from shutil import copyfile -from typing import Any, Dict, List, Optional, Tuple, Union - -import sentencepiece - -from transformers.tokenization_utils import BatchEncoding, PreTrainedTokenizer -from transformers.utils import logging - - -logger = logging.get_logger(__name__) - -SPIECE_UNDERLINE = "▁" - -VOCAB_FILES_NAMES = { - "vocab_file": "vocab.json", - "spm_file": "sentencepiece.bpe.model", - "tokenizer_config_file": "tokenizer_config.json", -} - -PRETRAINED_VOCAB_FILES_MAP = { - "vocab_file": { - "alirezamsh/small100": "https://huggingface.co/alirezamsh/small100/resolve/main/vocab.json", - }, - "spm_file": { - "alirezamsh/small100": "https://huggingface.co/alirezamsh/small100/resolve/main/sentencepiece.bpe.model", - }, - "tokenizer_config_file": { - "alirezamsh/small100": "https://huggingface.co/alirezamsh/small100/resolve/main/tokenizer_config.json", - }, -} - -PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { - "alirezamsh/small100": 1024, -} - -# fmt: off -FAIRSEQ_LANGUAGE_CODES = { - "m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"] -} -# fmt: on - - -class SMALL100Tokenizer(PreTrainedTokenizer): - """ - Construct an SMALL100 tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). - This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to - this superclass for more information regarding those methods. - Args: - vocab_file (`str`): - Path to the vocabulary file. - spm_file (`str`): - Path to [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that - contains the vocabulary. - tgt_lang (`str`, *optional*): - A string representing the target language. - eos_token (`str`, *optional*, defaults to `""`): - The end of sequence token. - sep_token (`str`, *optional*, defaults to `""`): - The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for - sequence classification or for a text and a question for question answering. It is also used as the last - token of a sequence built with special tokens. - unk_token (`str`, *optional*, defaults to `""`): - The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this - token instead. - pad_token (`str`, *optional*, defaults to `""`): - The token used for padding, for example when batching sequences of different lengths. - language_codes (`str`, *optional*): - What language codes to use. Should be `"m2m100"`. - sp_model_kwargs (`dict`, *optional*): - Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for - SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, - to set: - - `enable_sampling`: Enable subword regularization. - - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - - `nbest_size = {0,1}`: No sampling is performed. - - `nbest_size > 1`: samples from the nbest_size results. - - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) - using forward-filtering-and-backward-sampling algorithm. - - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for - BPE-dropout. - Examples: - ```python - >>> from tokenization_small100 import SMALL100Tokenizer - >>> tokenizer = SMALL100Tokenizer.from_pretrained("alirezamsh/small100", tgt_lang="ro") - >>> src_text = " UN Chief Says There Is No Military Solution in Syria" - >>> tgt_text = "Şeful ONU declară că nu există o soluţie militară în Siria" - >>> model_inputs = tokenizer(src_text, text_target=tgt_text, return_tensors="pt") - >>> model(**model_inputs) # should work - ```""" - - vocab_files_names = VOCAB_FILES_NAMES - max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES - pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP - model_input_names = ["input_ids", "attention_mask"] - - prefix_tokens: List[int] = [] - suffix_tokens: List[int] = [] - - def __init__( - self, - vocab_file, - spm_file, - tgt_lang=None, - bos_token="", - eos_token="", - sep_token="", - pad_token="", - unk_token="", - language_codes="m2m100", - sp_model_kwargs: Optional[Dict[str, Any]] = None, - num_madeup_words=8, - **kwargs, - ) -> None: - self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs - - self.language_codes = language_codes - fairseq_language_code = FAIRSEQ_LANGUAGE_CODES[language_codes] - self.lang_code_to_token = {lang_code: f"__{lang_code}__" for lang_code in fairseq_language_code} - - kwargs["additional_special_tokens"] = kwargs.get("additional_special_tokens", []) - kwargs["additional_special_tokens"] += [ - self.get_lang_token(lang_code) - for lang_code in fairseq_language_code - if self.get_lang_token(lang_code) not in kwargs["additional_special_tokens"] - ] - - super().__init__( - tgt_lang=tgt_lang, - bos_token=bos_token, - eos_token=eos_token, - sep_token=sep_token, - unk_token=unk_token, - pad_token=pad_token, - language_codes=language_codes, - sp_model_kwargs=self.sp_model_kwargs, - num_madeup_words=num_madeup_words, - **kwargs, - ) - - self.vocab_file = vocab_file - self.encoder = load_json(vocab_file) - self.decoder = {v: k for k, v in self.encoder.items()} - self.spm_file = spm_file - self.sp_model = load_spm(spm_file, self.sp_model_kwargs) - - self.encoder_size = len(self.encoder) - - self.lang_token_to_id = { - self.get_lang_token(lang_code): self.encoder_size + i for i, lang_code in enumerate(fairseq_language_code) - } - self.lang_code_to_id = {lang_code: self.encoder_size + i for i, lang_code in enumerate(fairseq_language_code)} - self.id_to_lang_token = {v: k for k, v in self.lang_token_to_id.items()} - - self._tgt_lang = tgt_lang if tgt_lang is not None else "en" - self.cur_lang_id = self.get_lang_id(self._tgt_lang) - self.set_lang_special_tokens(self._tgt_lang) - - self.num_madeup_words = num_madeup_words - - @property - def vocab_size(self) -> int: - return len(self.encoder) + len(self.lang_token_to_id) + self.num_madeup_words - - @property - def tgt_lang(self) -> str: - return self._tgt_lang - - @tgt_lang.setter - def tgt_lang(self, new_tgt_lang: str) -> None: - self._tgt_lang = new_tgt_lang - self.set_lang_special_tokens(self._tgt_lang) - - def _tokenize(self, text: str) -> List[str]: - return self.sp_model.encode(text, out_type=str) - - def _convert_token_to_id(self, token): - if token in self.lang_token_to_id: - return self.lang_token_to_id[token] - return self.encoder.get(token, self.encoder[self.unk_token]) - - def _convert_id_to_token(self, index: int) -> str: - """Converts an index (integer) in a token (str) using the decoder.""" - if index in self.id_to_lang_token: - return self.id_to_lang_token[index] - return self.decoder.get(index, self.unk_token) - - def convert_tokens_to_string(self, tokens: List[str]) -> str: - """Converts a sequence of tokens (strings for sub-words) in a single string.""" - return self.sp_model.decode(tokens) - - def get_special_tokens_mask( - self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False - ) -> List[int]: - """ - Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding - special tokens using the tokenizer `prepare_for_model` method. - Args: - token_ids_0 (`List[int]`): - List of IDs. - token_ids_1 (`List[int]`, *optional*): - Optional second list of IDs for sequence pairs. - already_has_special_tokens (`bool`, *optional*, defaults to `False`): - Whether or not the token list is already formatted with special tokens for the model. - Returns: - `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. - """ - - if already_has_special_tokens: - return super().get_special_tokens_mask( - token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True - ) - - prefix_ones = [1] * len(self.prefix_tokens) - suffix_ones = [1] * len(self.suffix_tokens) - if token_ids_1 is None: - return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones - return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones - - def build_inputs_with_special_tokens( - self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None - ) -> List[int]: - """ - Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and - adding special tokens. An MBART sequence has the following format, where `X` represents the sequence: - - `input_ids` (for encoder) `X [eos, src_lang_code]` - - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]` - BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a - separator. - Args: - token_ids_0 (`List[int]`): - List of IDs to which the special tokens will be added. - token_ids_1 (`List[int]`, *optional*): - Optional second list of IDs for sequence pairs. - Returns: - `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. - """ - if token_ids_1 is None: - if self.prefix_tokens is None: - return token_ids_0 + self.suffix_tokens - else: - return self.prefix_tokens + token_ids_0 + self.suffix_tokens - # We don't expect to process pairs, but leave the pair logic for API consistency - if self.prefix_tokens is None: - return token_ids_0 + token_ids_1 + self.suffix_tokens - else: - return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens - - def get_vocab(self) -> Dict: - vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} - vocab.update(self.added_tokens_encoder) - return vocab - - def __getstate__(self) -> Dict: - state = self.__dict__.copy() - state["sp_model"] = None - return state - - def __setstate__(self, d: Dict) -> None: - self.__dict__ = d - - # for backward compatibility - if not hasattr(self, "sp_model_kwargs"): - self.sp_model_kwargs = {} - - self.sp_model = load_spm(self.spm_file, self.sp_model_kwargs) - - def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: - save_dir = Path(save_directory) - if not save_dir.is_dir(): - raise OSError(f"{save_directory} should be a directory") - vocab_save_path = save_dir / ( - (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"] - ) - spm_save_path = save_dir / ( - (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"] - ) - - save_json(self.encoder, vocab_save_path) - - if os.path.abspath(self.spm_file) != os.path.abspath(spm_save_path) and os.path.isfile(self.spm_file): - copyfile(self.spm_file, spm_save_path) - elif not os.path.isfile(self.spm_file): - with open(spm_save_path, "wb") as fi: - content_spiece_model = self.sp_model.serialized_model_proto() - fi.write(content_spiece_model) - - return (str(vocab_save_path), str(spm_save_path)) - - def prepare_seq2seq_batch( - self, - src_texts: List[str], - tgt_texts: Optional[List[str]] = None, - tgt_lang: str = "ro", - **kwargs, - ) -> BatchEncoding: - self.tgt_lang = tgt_lang - self.set_lang_special_tokens(self.tgt_lang) - return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs) - - def _build_translation_inputs(self, raw_inputs, tgt_lang: Optional[str], **extra_kwargs): - """Used by translation pipeline, to prepare inputs for the generate function""" - if tgt_lang is None: - raise ValueError("Translation requires a `tgt_lang` for this model") - self.tgt_lang = tgt_lang - inputs = self(raw_inputs, add_special_tokens=True, **extra_kwargs) - return inputs - - def _switch_to_input_mode(self): - self.set_lang_special_tokens(self.tgt_lang) - - def _switch_to_target_mode(self): - self.prefix_tokens = None - self.suffix_tokens = [self.eos_token_id] - - def set_lang_special_tokens(self, src_lang: str) -> None: - """Reset the special tokens to the tgt lang setting. No prefix and suffix=[eos, tgt_lang_code].""" - lang_token = self.get_lang_token(src_lang) - self.cur_lang_id = self.lang_token_to_id[lang_token] - self.prefix_tokens = [self.cur_lang_id] - self.suffix_tokens = [self.eos_token_id] - - def get_lang_token(self, lang: str) -> str: - return self.lang_code_to_token[lang] - - def get_lang_id(self, lang: str) -> int: - lang_token = self.get_lang_token(lang) - return self.lang_token_to_id[lang_token] - - -def load_spm(path: str, sp_model_kwargs: Dict[str, Any]) -> sentencepiece.SentencePieceProcessor: - spm = sentencepiece.SentencePieceProcessor(**sp_model_kwargs) - spm.Load(str(path)) - return spm - - -def load_json(path: str) -> Union[Dict, List]: - with open(path, "r") as f: - return json.load(f) - - -def save_json(data, path: str) -> None: - with open(path, "w") as f: - json.dump(data, f, indent=2) diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/.github/ISSUE_TEMPLATE/documentation.md b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/.github/ISSUE_TEMPLATE/documentation.md deleted file mode 100644 index 88214d62e5228639491e019c78bb4171d535cdd1..0000000000000000000000000000000000000000 --- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/.github/ISSUE_TEMPLATE/documentation.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -name: "\U0001F4DA Documentation Issue" -about: Report a problem about existing documentation, comments, website or tutorials. -labels: documentation - ---- - -## 📚 Documentation Issue - -This issue category is for problems about existing documentation, not for asking how-to questions. - -* Provide a link to an existing documentation/comment/tutorial: - -* How should the above documentation/comment/tutorial improve: diff --git a/spaces/Banbri/zcvzcv/src/lib/cropImage.ts b/spaces/Banbri/zcvzcv/src/lib/cropImage.ts deleted file mode 100644 index 2d6b7e1f8c112564f372ab1da3af76a337b7f35b..0000000000000000000000000000000000000000 --- a/spaces/Banbri/zcvzcv/src/lib/cropImage.ts +++ /dev/null @@ -1,53 +0,0 @@ -async function cropImage(inputImage: string): Promise<{ croppedImage: string; x: number; y: number; width: number; height: number }> { - return new Promise((resolve, reject) => { - const img = new Image(); - img.src = inputImage; - img.onload = () => { - const canvas = document.createElement('canvas'); - const context = canvas.getContext('2d'); - if (!context) { - reject("Context is null"); - return; - } - canvas.width = img.width; - canvas.height = img.height; - context.drawImage(img, 0, 0, img.width, img.height); - const imageData = context.getImageData(0, 0, img.width, img.height); - const data = imageData.data; - let minX = img.width, minY = img.height, maxX = 0, maxY = 0; - - for (let y = 0; y < img.height; y++) { - for (let x = 0; x < img.width; x++) { - const i = (y * 4) * img.width + x * 4; - const avg = (data[i] + data[i + 1] + data[i + 2]) / 3; - if (avg < 255) { - minX = Math.min(minX, x); - minY = Math.min(minY, y); - maxX = Math.max(maxX, x); - maxY = Math.max(maxY, y); - } - } - } - - const width = maxX - minX; - const height = maxY - minY; - const croppedCanvas = document.createElement('canvas'); - croppedCanvas.width = width; - croppedCanvas.height = height; - const croppedCtx = croppedCanvas.getContext('2d'); - if (!croppedCtx) { - reject("croppedCtx is null"); - return; - } - croppedCtx.drawImage(canvas, minX, minY, width, height, 0, 0, width, height); - resolve({ - croppedImage: croppedCanvas.toDataURL(), - x: minX, - y: minY, - width, - height - }); - }; - img.onerror = reject; - }); -} \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Descargar Ataque En Titan Mvil Apk.md b/spaces/Benson/text-generation/Examples/Descargar Ataque En Titan Mvil Apk.md deleted file mode 100644 index fa2e4c09cb5b42f2f981bf5c3b5c4da3d5bec8b8..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Descargar Ataque En Titan Mvil Apk.md +++ /dev/null @@ -1,80 +0,0 @@ -
    -

    Descargar Ataque a Titan Mobile APK para Android: Una guía para los fans

    -

    Si eres fan de la popular serie de anime y manga Attack on Titan, quizás te interese jugar a un juego basado en ella. Sin embargo, no hay ningún juego oficial para dispositivos Android todavía, por lo que es posible que tenga que recurrir a juegos no oficiales hechos por fans. Uno de ellos es Ataque a Titan Mobile APK, que es un juego gratuito que le permite experimentar la emoción de luchar contra los titanes. En este artículo, te contaremos todo lo que necesitas saber sobre este juego, incluyendo qué es, cómo descargarlo e instalarlo, y cuáles son los riesgos y alternativas. ¡Vamos a empezar!

    -

    descargar ataque en titan móvil apk


    Download File 🆓 https://bltlly.com/2v6LJ6



    -

    ¿Qué es el ataque a Titán?

    -

    Antes de sumergirnos en el juego, primero vamos a tener una visión general rápida de lo que es Attack on Titan. Attack on Titan, también conocido como Shingeki no Kyojin en japonés, es una serie de manga escrita e ilustrada por Hajime Isayama. Comenzó en 2009 y ha sido serializado en la revista Bessatsu Shonen de Kodansha. También ha sido adaptado en una serie de anime por Wit Studio y MAPPA, que tiene cuatro temporadas hasta ahora. La serie ha ganado una enorme base de fans en todo el mundo, gracias a su historia cautivadora, animación impresionante y personajes memorables.

    -

    Una breve introducción a la serie de anime y manga

    -

    La historia de Attack on Titan se desarrolla en un mundo donde la humanidad vive dentro de tres paredes concéntricas que los protegen de criaturas humanoides gigantes llamadas titanes. Estos titanes no tienen inteligencia ni razón, y solo existen para devorar humanos. La historia sigue a Eren Yeager, un joven que sueña con unirse al Survey Corps, una rama militar de élite que se aventura fuera de los muros para luchar contra los titanes. Junto con sus amigos Mikasa Ackerman y Armin Arlert, es testigo de la caída de su ciudad natal cuando un titán colosal rompe la pared exterior. Él jura matar a todos los titanes y descubrir los secretos detrás de su origen y existencia.

    -

    Los personajes principales y las facciones

    - -
      -
    • Eren Yeager: El protagonista de la serie, que tiene la capacidad de transformarse en un titan. Es determinado, impulsivo y apasionado con su objetivo.
    • -
    • Mikasa Ackerman: Amiga de la infancia de Eren y hermana adoptiva, que es una luchadora experta y protectora leal. Es tranquila, estoica y de fuerte voluntad.
    • -
    • Armin Arlert: el mejor amigo de Eren y un estratega genio. Es tímido, amable e inteligente.
    • -
    • Levi Ackerman: El capitán del Escuadrón de Operaciones Especiales del Cuerpo de Investigación, que es ampliamente considerado como el soldado más fuerte de la humanidad. Es frío, despiadado y disciplinado.
    • -
    • Hange Zoe: La comandante del Cuarto Escuadrón del Cuerpo de Investigación, que está obsesionada con estudiar a los titanes. Es excéntrica, entusiasta y curiosa.
    • -
    -

    La serie también presenta varias facciones que tienen diferentes agendas y motivos. Algunos de ellos son:

    -
      -
    • The Survey Corps: La rama militar que explora fuera de los muros y lucha contra los titanes. Son valientes, aventureros e idealistas.
    • -
    • La Policía Militar: La rama de los militares que mantiene el orden dentro de los muros y sirve al rey. Son corruptos, perezosos y egoístas.
    • -
    • La Guarnición: La rama de los militares que guarda y mantiene los muros. Son pragmáticos, cautelosos y leales.
    • -
    • Los Marleyanos: La nación que gobierna la mayor parte del mundo y oprime a los Eldianos, la raza que puede convertirse en titanes. Son imperialistas, despiadados y ambiciosos.
    • -
    • Los restauradores eldianos: Un grupo rebelde que busca derrocar el régimen de Marleyan y restaurar el Imperio eldiano. Son patrióticos, rebeldes y esperanzados.
    • -
    -

    La trama y los temas

    - -

    ¿Qué es el ataque a Titan Mobile APK?

    -

    Ahora que tienes un entendimiento básico de lo que es Attack on Titan, hablemos del juego para el que estamos aquí. Ataque a Titan Mobile APK es un juego no oficial hecho por fans que se basa en la serie. No está afiliado o avalado por los creadores originales o editores de Attack on Titan. Es un juego gratuito que puedes descargar y jugar en tu dispositivo Android.

    -

    Un juego hecho por fans basado en la serie

    -

    Ataque a Titan Mobile APK es un juego que fue creado por los fans que aman la serie y quería hacer su propia versión de la misma. El juego está inspirado en el anime y el manga, pero no sigue la historia exacta o canon. El juego cuenta con personajes originales, escenarios y misiones que son diferentes del material de origen. El juego también tiene algunos elementos que no están presentes en la serie, como magia, fantasía y romance.

    -

    Las características y el juego

    -

    Ataque a Titan Mobile APK es un juego que combina acción, aventura, y el juego de roles. El juego te permite crear tu propio personaje y personalizar su apariencia, habilidades y equipo. Puedes elegir entre diferentes clases, como soldado, explorador, mago o ingeniero. También puede unirse a diferentes facciones, como el Cuerpo de Investigación, la Policía Militar, o los Marleyans.

    -

    -

    El juego te permite explorar varios lugares de la serie, como Shiganshina District, Trost District, Wall Rose, Wall Maria y Marley. Puedes interactuar con otros personajes, tanto amistosos como hostiles. También puede asumir varias misiones y misiones que pondrán a prueba sus habilidades y coraje. Puedes luchar contra diferentes tipos de titanes, como titanes normales, titanes anormales, metamorfos o titanes colosales. Puedes usar diferentes armas y equipos, como espadas, armas, cañones o el equipo de movilidad omnidireccional (ODM gear), que te permite moverte usando ganchos de agarre.

    - -

    Los requisitos y la compatibilidad

    -

    Ataque a Titan Mobile APK es un juego que requiere una gran cantidad de recursos y espacio de almacenamiento para funcionar sin problemas. El juego tiene gráficos de alta calidad y efectos de sonido que lo hacen inmersivo y realista. Sin embargo, esto también significa que el juego podría no funcionar bien en dispositivos de gama baja o modelos más antiguos. El juego también requiere una conexión a Internet estable para jugar online.

    -

    El juego es compatible con la mayoría de los dispositivos Android que tienen Android 4.4 o versiones superiores instaladas. Sin embargo, es posible que algunos dispositivos no puedan ejecutar el juego debido a limitaciones de hardware o software. El juego también podría no estar disponible en algunas regiones o países debido a problemas legales o de licencia.

    -

    Cómo descargar e instalar Ataque en Titan Mobile APK?

    -

    Si usted está interesado en jugar Ataque en Titan Mobile APK en su dispositivo Android, tendrá que seguir algunos pasos para descargar e instalar. Estos son los pasos que debes seguir:

    -

    Los pasos a seguir

    -
      -
    1. En primer lugar, tendrá que habilitar la instalación de aplicaciones de fuentes desconocidas en su dispositivo. Para hacer esto, vaya a la configuración de su dispositivo, luego la seguridad, luego cambie la opción que dice "permitir la instalación de aplicaciones de fuentes desconocidas". Esto le permitirá instalar aplicaciones que no son de Google Play Store.
    2. -
    3. A continuación, tendrá que encontrar una fuente confiable y segura para descargar el ataque en Titan Mobile APK archivo. Puede buscar en línea sitios web que ofrecen el archivo, pero tenga cuidado con los enlaces falsos o maliciosos que podrían dañar su dispositivo o robar sus datos. También puede utilizar una aplicación de escáner de código QR para escanear el código de abajo, que le llevará a una fuente de confianza que hemos verificado.
    4. -
    5. Una vez que haya encontrado la fuente, haga clic en el botón de descarga y espere a que el archivo se descargue en su dispositivo. El tamaño del archivo es de unos 300 MB, por lo que puede llevar algún tiempo dependiendo de la velocidad de Internet y la conexión.
    6. - -
    7. Cuando la instalación se ha completado, verá un mensaje que dice "aplicación instalada". Toque en "abrir" para iniciar el juego y disfrutar!
    8. -
    -

    Las precauciones y riesgos

    -

    Mientras que la descarga e instalación de ataque en Titan Mobile APK puede parecer fácil y divertido, hay algunas precauciones y riesgos que usted debe ser consciente de. Estos son algunos de ellos:

    -
      -
    • El juego no es un producto oficial de Attack on Titan o sus creadores o editores. Es un juego hecho por fans que puede tener errores, errores o fallos que podrían afectar el rendimiento de tu juego o dispositivo. Es posible que el juego no se actualice regularmente o en absoluto, por lo que podrías perderte nuevas características o mejoras.
    • -
    • El juego no está disponible en la Google Play Store, lo que significa que no ha sido verificado o aprobado por Google o cualquier otra autoridad. Esto significa que el juego podría contener virus, malware, spyware u otro software dañino que podría dañar su dispositivo o comprometer su seguridad o privacidad. El juego también puede acceder a su información personal, como sus contactos, fotos, ubicación o mensajes, sin su permiso o conocimiento.
    • -
    • El juego podría violar los derechos de propiedad intelectual de Attack en Titan o sus creadores o editores. Esto significa que el juego podría ser ilegal o infringir en algunos países o regiones, y podría enfrentar consecuencias legales o sanciones por descargarlo o jugarlo. El juego también puede ser derribado o eliminado por las autoridades en cualquier momento, sin previo aviso o advertencia.
    • -
    -

    Las alternativas y fuentes

    -

    Si no se siente cómodo con la descarga y la instalación de ataque en Titan Mobile APK, o si se encuentra con cualquier problema o problemas con él, hay algunas alternativas y fuentes que se pueden probar en su lugar. Estos son algunos de ellos:

    -
      - -
    • Puedes ver la serie de anime de Attack on Titan online o offline usando varios servicios o plataformas de streaming, como Netflix, Hulu, Crunchyroll, Funimation o Amazon Prime Video. Estos servicios o plataformas ofrecen acceso legal y seguro a todos los episodios y temporadas de Attack on Titan, así como a otros programas de anime y películas. También puedes leer la serie manga de Attack on Titan online o offline usando varios sitios web o aplicaciones, como Kodansha Comics, Manga Rock, Manga Plus o Comixology. Estos sitios web o aplicaciones ofrecen acceso legal y seguro a todos los capítulos y volúmenes de Attack on Titan, así como a otros títulos y géneros de manga.
    • -
    -

    Conclusión

    -

    En conclusión, Ataque a Titan Mobile APK es un juego hecho por fans basado en la popular serie de anime y manga Ataque a Titan. Es un juego gratuito que puedes descargar y jugar en tu dispositivo Android. Sin embargo, no es un producto oficial de Attack on Titan o sus creadores o editores. Es un juego que puede tener algunos errores, errores o fallos, y también puede contener software dañino o violar los derechos de propiedad intelectual. Por lo tanto, debe tener cuidado y precaución al descargarlo e instalarlo, y también debe considerar las alternativas y fuentes que hemos mencionado. Esperamos que este artículo le ha ayudado a aprender más acerca de Ataque a Titan Mobile APK y cómo descargarlo e instalarlo. Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación. Gracias por leer y divertirse jugando!

    -

    Preguntas frecuentes

    -

    Aquí hay algunas preguntas frecuentes que usted podría tener sobre Ataque a Titan Mobile APK:

    -
      -
    1. Q: ¿Es el ataque a Titan Mobile APK seguro para descargar y jugar?
      - -
    2. Q: ¿Es el ataque a Titan Mobile APK legal para descargar y jugar?
      -A: Ataque a Titan Mobile APK no es un juego legal para descargar y jugar, ya que podría violar los derechos de propiedad intelectual de Ataque a Titan o sus creadores o editores. El juego también podría ser ilegal o infringir en algunos países o regiones, y podría enfrentar consecuencias legales o sanciones por descargarlo o jugarlo. Por lo tanto, solo debe descargarlo y reproducirlo si está seguro de que está permitido en su área y que no está rompiendo ninguna ley.
    3. -
    4. Q: Es el ataque a Titan Mobile APK actualizado regularmente?
      -R: Ataque a Titan Mobile APK no se actualiza regularmente, ya que es un juego hecho por fans que no está afiliado o respaldado por los creadores originales o editores de Ataque a Titan. Es posible que el juego no reciba nuevas características o mejoras, y también puede dejar de funcionar o ser retirado en cualquier momento, sin previo aviso o advertencia. Por lo tanto, no debes esperar actualizaciones o soporte de los desarrolladores del juego.
    5. -
    6. Q: ¿Cómo puedo contactar a los desarrolladores de Attack on Titan Mobile APK?
      -R: Puede ponerse en contacto con los desarrolladores de Ataque en Titan Mobile APK visitando su sitio web oficial o cuentas de medios sociales, que se vinculan a continuación. Sin embargo, no debes esperar ninguna respuesta o asistencia de ellos, ya que no están obligados a proporcionar ningún servicio al cliente o soporte técnico para el juego.
    7. -
    8. Q: ¿Dónde puedo encontrar más información acerca de Ataque a Titan Mobile APK?
      -R: Usted puede encontrar más información acerca de Ataque en Titan Mobile APK visitando su sitio web oficial o cuentas de medios sociales, que están vinculados a continuación. Sin embargo, no debes confiar en todo lo que dicen o muestran, ya que podrían ser sesgados o inexactos. También debes hacer tu propia investigación y verificación antes de descargar o jugar el juego.
    9. -

    64aa2da5cf
    -
    -
    \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Descargar Fondos De Escritorio Jdm Coches.md b/spaces/Benson/text-generation/Examples/Descargar Fondos De Escritorio Jdm Coches.md deleted file mode 100644 index 007b07ba93ea9a46c24cfeb2ecbc61fce0f83d22..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Descargar Fondos De Escritorio Jdm Coches.md +++ /dev/null @@ -1,146 +0,0 @@ - -

    Descargar fondo de pantalla Coches JDM: Una guía para los entusiastas del coche

    -

    Si usted es un fan de los coches, especialmente los coches japoneses, es posible que haya oído hablar del término JDM. JDM significa Mercado Nacional Japonés, y se refiere a los coches que se fabrican y venden en Japón. Los coches JDM son conocidos por su alto rendimiento, fiabilidad, innovación y estilo. Tienen un seguimiento leal entre los entusiastas del automóvil de todo el mundo, que admiran su historia, cultura y estética.

    -

    descargar fondos de escritorio jdm coches


    Downloadhttps://bltlly.com/2v6Jcj



    -

    En este artículo, vamos a explorar el mundo de los coches JDM y le mostrará cómo descargar los coches JDM fondo de pantalla para sus dispositivos. Ya sea que desee decorar su escritorio, computadora portátil, tableta o teléfono inteligente con impresionantes imágenes de sus autos JDM favoritos, lo tenemos cubierto. También compartiremos algunos de los beneficios de tener coches JDM fondo de pantalla y responder a algunas preguntas comunes que usted puede tener. Así que, vamos a empezar!

    -

    La historia de los coches JDM

    -

    Los autos JDM tienen una larga y rica historia que se remonta a principios del siglo XX. Japón fue uno de los primeros países en adoptar el automóvil como un modo de transporte, y en la década de 1930, tenía varios fabricantes nacionales de automóviles como Toyota, Nissan, Honda, Mazda, Mitsubishi y Subaru. Sin embargo, después de la Segunda Guerra Mundial, la industria automovilística japonesa sufrió un gran revés debido a la devastación causada por la guerra y la ocupación por las fuerzas aliadas.

    -

    No fue hasta la década de 1950 que la industria automovilística de Japón comenzó a recuperarse y crecer de nuevo. Los fabricantes de automóviles de Japón se centraron en la producción de automóviles pequeños, asequibles y de bajo consumo de combustible que satisfacían las necesidades del mercado interno. También invirtieron fuertemente en investigación y desarrollo, control de calidad y servicio al cliente. En las décadas de 1960 y 1970, la industria automovilística japonesa se había convertido en una fuerza mundial, compitiendo con los fabricantes de automóviles estadounidenses y europeos en términos de ventas, innovación y reputación.

    - -

    Estos entusiastas también fueron responsables de popularizar el término JDM, que originalmente se refería a las piezas y accesorios que se hicieron específicamente para el mercado japonés. Estas piezas y accesorios eran a menudo superiores en calidad, rendimiento o diseño que los fabricados para otros mercados. También eran raros y exclusivos, lo que los hacía más deseables y valiosos. Finalmente, el término JDM se expandió para incluir no solo las piezas y accesorios sino también los propios coches.

    -

    -

    Las características de los coches JDM

    -

    Entonces, ¿qué hace que un coche JDM? No hay una respuesta definitiva a esta pregunta, ya que diferentes personas pueden tener diferentes opiniones o preferencias. Sin embargo, hay algunas características comunes que la mayoría de los coches JDM comparten. Estos incluyen:

    -
      -
    • Rendimiento: Los coches JDM están diseñados para ofrecer un alto rendimiento en términos de velocidad, potencia, manejo y eficiencia. A menudo cuentan con motores avanzados, transmisiones, suspensiones, frenos y otros componentes que mejoran su rendimiento. También tienen cuerpos ligeros, formas aerodinámicas y centros de gravedad bajos que reducen la resistencia y mejoran la estabilidad.
    • -
    • Fiabilidad: Los coches JDM están construidos para durar y soportar diversas condiciones y situaciones. Tienen altos estándares de calidad y durabilidad que garantizan su longevidad y seguridad. También requieren mantenimiento y reparaciones mínimas, lo que los hace rentables y convenientes.
    • -
    • Innovación: Los coches JDM evolucionan y mejoran constantemente, gracias a la creatividad e ingenio de sus fabricantes y entusiastas. A menudo cuentan con tecnologías de vanguardia, características o diseños que los distinguen de otros coches. También se adaptan a las cambiantes necesidades y preferencias del mercado, ofreciendo nuevos modelos, variantes u opciones que se adaptan a diferentes gustos y presupuestos.
    • - -
    -

    Por supuesto, estas características no son exclusivas de los coches JDM, ya que otros coches también pueden tener algunos o todos ellos. Sin embargo, los coches JDM tienen cierto encanto y atractivo que los hacen destacar entre la multitud y capturar los corazones y las mentes de los entusiastas del automóvil.

    -

    Los 20 mejores coches JDM de todos los tiempos

    -

    Con tantos coches JDM para elegir, puede ser difícil elegir los mejores. Sin embargo, basado en la popularidad, la influencia y la reputación, aquí están algunos de los 20 mejores coches JDM de todos los tiempos:

    - - -Nombre -Descripción -Imagen - - -Nissan Skyline GT-R -Uno de los coches JDM más icónicos y legendarios jamás hecho, el Nissan Skyline GT-R es un coche deportivo de alto rendimiento que debutó en 1969. Ha pasado por varias generaciones y versiones, cada una con sus propias mejoras e innovaciones. Es conocido por su potente motor, sistema de tracción total, tecnología avanzada y diseño elegante. También es famosa por sus apariciones en varios medios como películas, videojuegos, anime y manga. -=Nissan Skyline GT-R/< - - -Honda Civic Tipo R -El Honda Civic Type R es una versión de alto rendimiento del Honda Civic, un coche compacto que debutó en 1972. La variante Tipo R se introdujo en 1997 y se ha producido en varias generaciones desde entonces. Es conocido por su cuerpo ligero, potente motor, manejo sensible y diseño deportivo. También es popular entre los sintonizadores y corredores que lo modifican para un mejor rendimiento o apariencia. ->Civic Type R>>Honda - - -Mazda RX-7 - -Mazx-7/" - -Toyota Supra -El Toyota Supra es un automóvil deportivo que debutó en 1978 y fue producido hasta 2002. Es un sucesor del Toyota Celica, un coche más pequeño y menos potente. El Supra es conocido por su motor grande y turboalimentado, sistema de tracción trasera, tecnología sofisticada y diseño elegante. También es famosa por sus apariciones en varios medios como películas, videojuegos, anime y manga. -Toyota Supra></td>>
-</tr>
-<tr>
-<td>Honda NSX</td>
-<td>El Honda NSX es un automóvil deportivo que debutó en 1990 y fue producido hasta 2005. También es conocido como el Acura NSX en América del Norte y Hong Kong. Es uno de los primeros coches en utilizar un cuerpo totalmente de aluminio, lo que lo hace más ligero y más fuerte que el acero. También es uno de los primeros coches en presentar un diseño de motor medio, que mejora el equilibrio y la manipulación del coche. Es conocido por su motor refinado, manejo ágil y diseño elegante. También es famoso por ser desarrollado con la entrada de la leyenda de Fórmula Uno Ayrton Senna.</td>
-<td><img src= - - -Impresión de WRX STI - - - -Evolución de Mitsubishi Lancer -El Mitsubishi Lancer Evolution es una versión de alto rendimiento del Mitsubishi Lancer, un coche compacto que debutó en 1973. La variante Evolution se introdujo en 1992 y se ha producido en diez generaciones desde entonces. Es conocido por su motor turboalimentado, sistema de tracción total, tecnología inspirada en los rallyes y diseño deportivo. También es popular entre los sintonizadores y corredores que lo modifican para un mejor rendimiento o apariencia. - - -Nissan 350Z -El Nissan 350Z es un automóvil deportivo que debutó en 2002 y fue producido hasta 2009. También es conocido como el Nissan Fairlady Z en Japón. Es un sucesor del Nissan 300ZX, una generación anterior de la serie Z-car. El 350Z es conocido por su motor V6, sistema de tracción trasera, tecnología moderna y diseño atractivo. También es famosa por sus apariciones en varios medios como películas, videojuegos, anime y manga. -Toyota AE86 - - -Honda S2000 -El Honda S2000 es un roadster que debutó en 1999 y fue producido hasta 2009. Es uno de los pocos coches que utiliza un motor de aspiración natural, lo que significa que no utiliza un turbocompresor o un sobrealimentador para aumentar su potencia. Es conocido por su motor de altas revoluciones, sistema de tracción trasera, manejo equilibrado y diseño convertible. También es popular entre los sintonizadores y corredores que lo modifican para un mejor rendimiento o apariencia. -Honda S2000/td>> - - -Mazda MX-5 Miata -El Mazda MX-5 Miata es un roadster que debutó en 1989 y todavía está en producción hoy en día. También es conocido como el Mazda Roadster o el Mazda Eunos Roadster en Japón. Es uno de los coches deportivos más vendidos de todos los tiempos, con más de un millón de unidades vendidas en todo el mundo. Es conocido por su cuerpo ligero, manejo divertido de conducir y precio asequible. También es famosa por sus apariciones en varios medios como películas, videojuegos, anime y manga. -Mazda MX-5 Miata - - -Lexus LFA - -Lexus A> - - -Nissan Silvia -El Nissan Silvia es un automóvil deportivo que debutó en 1964 y fue producido hasta 2002. También es conocido como el Nissan 180SX o el Nissan 240SX en América del Norte. Es uno de los coches más populares para la deriva, gracias a su diseño de tracción trasera, motor potente y cuerpo fácil de modificar. Es conocido por su rendimiento, fiabilidad y estilo. También es famosa por sus apariciones en varios medios como películas, videojuegos, anime y manga. -Nissan Silvia - - -Honda Integra Tipo R -El Honda Integra Type R es una versión de alto rendimiento de , y manga. -Toyota MR2 - - -Mazda RX-8 -El Mazda RX-8 es un coche deportivo que debutó en 2003 y se produjo hasta 2012. Es un sucesor del Mazda RX-7, otro coche de motor rotativo. El RX-8 es conocido por su diseño único, que cuenta con cuatro puertas, cuatro asientos y un motor de forma triangular. También es conocido por su rendimiento, manejo y sonido. También es popular entre los sintonizadores y corredores que lo modifican para un mejor rendimiento o apariencia. -Mazda RX-8> - - -Toyota Celica - -Toyota Celica></td
-</tr>
-</tabla>
- <h2>Cómo descargar fondos de escritorio JDM Cars</h2>
-<p>Ahora que ha aprendido acerca de algunos de los mejores coches JDM de todos los tiempos, es posible que se pregunte cómo descargar los coches JDM fondo de pantalla para sus dispositivos. Bueno, no es tan difícil, siempre y cuando sigas estos sencillos pasos:</p>
-<ol>
-<li>Encontrar un sitio web que ofrece fondos de escritorio JDM coches. Hay muchos sitios web que ofrecen libre o pagado papel pintado JDM coches para descargar. Algunos de los más populares son <a href=WallpaperAccess, WallpapersWide, y WhatIsMyScreenResolution.com para averiguar la resolución de la pantalla. -
  • ¿Dónde puedo encontrar más coches JDM fondo de pantalla?
    -Usted puede encontrar más coches JDM fondo de pantalla visitando otros sitios web que los ofrecen. También puedes usar motores de búsqueda como Google o Bing para encontrar más sitios web que se adapten a tus preferencias. También puedes usar plataformas de redes sociales como Pinterest, Instagram o Facebook para encontrar más coches de JDM que otros usuarios hayan publicado o compartido.
  • -
  • ¿Cómo puedo hacer mi propio fondo de pantalla JDM coches?
    -Puede hacer sus propios coches JDM fondo de pantalla mediante el uso de software de edición de fotos como Photoshop, GIMP, o Paint.NET. También puede utilizar herramientas en línea como Canva, Fotor, o PicMonkey para crear sus propios coches JDM de fondo de pantalla. Puede utilizar sus propias fotos de coches JDM o descargar imágenes de Internet. También puede agregar texto, filtros, efectos u otros elementos para hacer sus coches JDM fondo de pantalla más único y personal.
  • -
  • ¿Cómo puedo compartir mi fondo de pantalla JDM coches con los demás?
    - -
  • ¿Cómo puedo cambiar mi fondo de pantalla JDM coches?
    -Puede cambiar sus coches JDM fondo de pantalla siguiendo los mismos pasos que utilizó para establecerlos como fondo. También puede utilizar aplicaciones o software que le permiten cambiar su fondo de pantalla de forma automática o periódica. Por ejemplo, puedes usar Wallpaper Changer para Android, Wallpaper Engine para Steam para cambiar tus coches JDM según tus preferencias.
  • -

    64aa2da5cf
    -
    -
    \ No newline at end of file diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/_fileno.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/_fileno.py deleted file mode 100644 index b17ee6511742d7a8d5950bf0ee57ced4d5fd45c2..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/_fileno.py +++ /dev/null @@ -1,24 +0,0 @@ -from __future__ import annotations - -from typing import IO, Callable - - -def get_fileno(file_like: IO[str]) -> int | None: - """Get fileno() from a file, accounting for poorly implemented file-like objects. - - Args: - file_like (IO): A file-like object. - - Returns: - int | None: The result of fileno if available, or None if operation failed. - """ - fileno: Callable[[], int] | None = getattr(file_like, "fileno", None) - if fileno is not None: - try: - return fileno() - except Exception: - # `fileno` is documented as potentially raising a OSError - # Alas, from the issues, there are so many poorly implemented file-like objects, - # that `fileno()` can raise just about anything. - return None - return None diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/pretty.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/pretty.py deleted file mode 100644 index 2bd9eb0073d3e0a6c56311b42097ff322f75dcdd..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/pretty.py +++ /dev/null @@ -1,994 +0,0 @@ -import builtins -import collections -import dataclasses -import inspect -import os -import sys -from array import array -from collections import Counter, UserDict, UserList, defaultdict, deque -from dataclasses import dataclass, fields, is_dataclass -from inspect import isclass -from itertools import islice -from types import MappingProxyType -from typing import ( - TYPE_CHECKING, - Any, - Callable, - DefaultDict, - Dict, - Iterable, - List, - Optional, - Sequence, - Set, - Tuple, - Union, -) - -from pip._vendor.rich.repr import RichReprResult - -try: - import attr as _attr_module - - _has_attrs = hasattr(_attr_module, "ib") -except ImportError: # pragma: no cover - _has_attrs = False - -from . import get_console -from ._loop import loop_last -from ._pick import pick_bool -from .abc import RichRenderable -from .cells import cell_len -from .highlighter import ReprHighlighter -from .jupyter import JupyterMixin, JupyterRenderable -from .measure import Measurement -from .text import Text - -if TYPE_CHECKING: - from .console import ( - Console, - ConsoleOptions, - HighlighterType, - JustifyMethod, - OverflowMethod, - RenderResult, - ) - - -def _is_attr_object(obj: Any) -> bool: - """Check if an object was created with attrs module.""" - return _has_attrs and _attr_module.has(type(obj)) - - -def _get_attr_fields(obj: Any) -> Sequence["_attr_module.Attribute[Any]"]: - """Get fields for an attrs object.""" - return _attr_module.fields(type(obj)) if _has_attrs else [] - - -def _is_dataclass_repr(obj: object) -> bool: - """Check if an instance of a dataclass contains the default repr. - - Args: - obj (object): A dataclass instance. - - Returns: - bool: True if the default repr is used, False if there is a custom repr. - """ - # Digging in to a lot of internals here - # Catching all exceptions in case something is missing on a non CPython implementation - try: - return obj.__repr__.__code__.co_filename == dataclasses.__file__ - except Exception: # pragma: no coverage - return False - - -_dummy_namedtuple = collections.namedtuple("_dummy_namedtuple", []) - - -def _has_default_namedtuple_repr(obj: object) -> bool: - """Check if an instance of namedtuple contains the default repr - - Args: - obj (object): A namedtuple - - Returns: - bool: True if the default repr is used, False if there's a custom repr. - """ - obj_file = None - try: - obj_file = inspect.getfile(obj.__repr__) - except (OSError, TypeError): - # OSError handles case where object is defined in __main__ scope, e.g. REPL - no filename available. - # TypeError trapped defensively, in case of object without filename slips through. - pass - default_repr_file = inspect.getfile(_dummy_namedtuple.__repr__) - return obj_file == default_repr_file - - -def _ipy_display_hook( - value: Any, - console: Optional["Console"] = None, - overflow: "OverflowMethod" = "ignore", - crop: bool = False, - indent_guides: bool = False, - max_length: Optional[int] = None, - max_string: Optional[int] = None, - max_depth: Optional[int] = None, - expand_all: bool = False, -) -> Union[str, None]: - # needed here to prevent circular import: - from .console import ConsoleRenderable - - # always skip rich generated jupyter renderables or None values - if _safe_isinstance(value, JupyterRenderable) or value is None: - return None - - console = console or get_console() - - with console.capture() as capture: - # certain renderables should start on a new line - if _safe_isinstance(value, ConsoleRenderable): - console.line() - console.print( - value - if _safe_isinstance(value, RichRenderable) - else Pretty( - value, - overflow=overflow, - indent_guides=indent_guides, - max_length=max_length, - max_string=max_string, - max_depth=max_depth, - expand_all=expand_all, - margin=12, - ), - crop=crop, - new_line_start=True, - end="", - ) - # strip trailing newline, not usually part of a text repr - # I'm not sure if this should be prevented at a lower level - return capture.get().rstrip("\n") - - -def _safe_isinstance( - obj: object, class_or_tuple: Union[type, Tuple[type, ...]] -) -> bool: - """isinstance can fail in rare cases, for example types with no __class__""" - try: - return isinstance(obj, class_or_tuple) - except Exception: - return False - - -def install( - console: Optional["Console"] = None, - overflow: "OverflowMethod" = "ignore", - crop: bool = False, - indent_guides: bool = False, - max_length: Optional[int] = None, - max_string: Optional[int] = None, - max_depth: Optional[int] = None, - expand_all: bool = False, -) -> None: - """Install automatic pretty printing in the Python REPL. - - Args: - console (Console, optional): Console instance or ``None`` to use global console. Defaults to None. - overflow (Optional[OverflowMethod], optional): Overflow method. Defaults to "ignore". - crop (Optional[bool], optional): Enable cropping of long lines. Defaults to False. - indent_guides (bool, optional): Enable indentation guides. Defaults to False. - max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation. - Defaults to None. - max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None. - max_depth (int, optional): Maximum depth of nested data structures, or None for no maximum. Defaults to None. - expand_all (bool, optional): Expand all containers. Defaults to False. - max_frames (int): Maximum number of frames to show in a traceback, 0 for no maximum. Defaults to 100. - """ - from pip._vendor.rich import get_console - - console = console or get_console() - assert console is not None - - def display_hook(value: Any) -> None: - """Replacement sys.displayhook which prettifies objects with Rich.""" - if value is not None: - assert console is not None - builtins._ = None # type: ignore[attr-defined] - console.print( - value - if _safe_isinstance(value, RichRenderable) - else Pretty( - value, - overflow=overflow, - indent_guides=indent_guides, - max_length=max_length, - max_string=max_string, - max_depth=max_depth, - expand_all=expand_all, - ), - crop=crop, - ) - builtins._ = value # type: ignore[attr-defined] - - if "get_ipython" in globals(): - ip = get_ipython() # type: ignore[name-defined] - from IPython.core.formatters import BaseFormatter - - class RichFormatter(BaseFormatter): # type: ignore[misc] - pprint: bool = True - - def __call__(self, value: Any) -> Any: - if self.pprint: - return _ipy_display_hook( - value, - console=get_console(), - overflow=overflow, - indent_guides=indent_guides, - max_length=max_length, - max_string=max_string, - max_depth=max_depth, - expand_all=expand_all, - ) - else: - return repr(value) - - # replace plain text formatter with rich formatter - rich_formatter = RichFormatter() - ip.display_formatter.formatters["text/plain"] = rich_formatter - else: - sys.displayhook = display_hook - - -class Pretty(JupyterMixin): - """A rich renderable that pretty prints an object. - - Args: - _object (Any): An object to pretty print. - highlighter (HighlighterType, optional): Highlighter object to apply to result, or None for ReprHighlighter. Defaults to None. - indent_size (int, optional): Number of spaces in indent. Defaults to 4. - justify (JustifyMethod, optional): Justify method, or None for default. Defaults to None. - overflow (OverflowMethod, optional): Overflow method, or None for default. Defaults to None. - no_wrap (Optional[bool], optional): Disable word wrapping. Defaults to False. - indent_guides (bool, optional): Enable indentation guides. Defaults to False. - max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation. - Defaults to None. - max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None. - max_depth (int, optional): Maximum depth of nested data structures, or None for no maximum. Defaults to None. - expand_all (bool, optional): Expand all containers. Defaults to False. - margin (int, optional): Subtrace a margin from width to force containers to expand earlier. Defaults to 0. - insert_line (bool, optional): Insert a new line if the output has multiple new lines. Defaults to False. - """ - - def __init__( - self, - _object: Any, - highlighter: Optional["HighlighterType"] = None, - *, - indent_size: int = 4, - justify: Optional["JustifyMethod"] = None, - overflow: Optional["OverflowMethod"] = None, - no_wrap: Optional[bool] = False, - indent_guides: bool = False, - max_length: Optional[int] = None, - max_string: Optional[int] = None, - max_depth: Optional[int] = None, - expand_all: bool = False, - margin: int = 0, - insert_line: bool = False, - ) -> None: - self._object = _object - self.highlighter = highlighter or ReprHighlighter() - self.indent_size = indent_size - self.justify: Optional["JustifyMethod"] = justify - self.overflow: Optional["OverflowMethod"] = overflow - self.no_wrap = no_wrap - self.indent_guides = indent_guides - self.max_length = max_length - self.max_string = max_string - self.max_depth = max_depth - self.expand_all = expand_all - self.margin = margin - self.insert_line = insert_line - - def __rich_console__( - self, console: "Console", options: "ConsoleOptions" - ) -> "RenderResult": - pretty_str = pretty_repr( - self._object, - max_width=options.max_width - self.margin, - indent_size=self.indent_size, - max_length=self.max_length, - max_string=self.max_string, - max_depth=self.max_depth, - expand_all=self.expand_all, - ) - pretty_text = Text.from_ansi( - pretty_str, - justify=self.justify or options.justify, - overflow=self.overflow or options.overflow, - no_wrap=pick_bool(self.no_wrap, options.no_wrap), - style="pretty", - ) - pretty_text = ( - self.highlighter(pretty_text) - if pretty_text - else Text( - f"{type(self._object)}.__repr__ returned empty string", - style="dim italic", - ) - ) - if self.indent_guides and not options.ascii_only: - pretty_text = pretty_text.with_indent_guides( - self.indent_size, style="repr.indent" - ) - if self.insert_line and "\n" in pretty_text: - yield "" - yield pretty_text - - def __rich_measure__( - self, console: "Console", options: "ConsoleOptions" - ) -> "Measurement": - pretty_str = pretty_repr( - self._object, - max_width=options.max_width, - indent_size=self.indent_size, - max_length=self.max_length, - max_string=self.max_string, - max_depth=self.max_depth, - expand_all=self.expand_all, - ) - text_width = ( - max(cell_len(line) for line in pretty_str.splitlines()) if pretty_str else 0 - ) - return Measurement(text_width, text_width) - - -def _get_braces_for_defaultdict(_object: DefaultDict[Any, Any]) -> Tuple[str, str, str]: - return ( - f"defaultdict({_object.default_factory!r}, {{", - "})", - f"defaultdict({_object.default_factory!r}, {{}})", - ) - - -def _get_braces_for_array(_object: "array[Any]") -> Tuple[str, str, str]: - return (f"array({_object.typecode!r}, [", "])", f"array({_object.typecode!r})") - - -_BRACES: Dict[type, Callable[[Any], Tuple[str, str, str]]] = { - os._Environ: lambda _object: ("environ({", "})", "environ({})"), - array: _get_braces_for_array, - defaultdict: _get_braces_for_defaultdict, - Counter: lambda _object: ("Counter({", "})", "Counter()"), - deque: lambda _object: ("deque([", "])", "deque()"), - dict: lambda _object: ("{", "}", "{}"), - UserDict: lambda _object: ("{", "}", "{}"), - frozenset: lambda _object: ("frozenset({", "})", "frozenset()"), - list: lambda _object: ("[", "]", "[]"), - UserList: lambda _object: ("[", "]", "[]"), - set: lambda _object: ("{", "}", "set()"), - tuple: lambda _object: ("(", ")", "()"), - MappingProxyType: lambda _object: ("mappingproxy({", "})", "mappingproxy({})"), -} -_CONTAINERS = tuple(_BRACES.keys()) -_MAPPING_CONTAINERS = (dict, os._Environ, MappingProxyType, UserDict) - - -def is_expandable(obj: Any) -> bool: - """Check if an object may be expanded by pretty print.""" - return ( - _safe_isinstance(obj, _CONTAINERS) - or (is_dataclass(obj)) - or (hasattr(obj, "__rich_repr__")) - or _is_attr_object(obj) - ) and not isclass(obj) - - -@dataclass -class Node: - """A node in a repr tree. May be atomic or a container.""" - - key_repr: str = "" - value_repr: str = "" - open_brace: str = "" - close_brace: str = "" - empty: str = "" - last: bool = False - is_tuple: bool = False - is_namedtuple: bool = False - children: Optional[List["Node"]] = None - key_separator: str = ": " - separator: str = ", " - - def iter_tokens(self) -> Iterable[str]: - """Generate tokens for this node.""" - if self.key_repr: - yield self.key_repr - yield self.key_separator - if self.value_repr: - yield self.value_repr - elif self.children is not None: - if self.children: - yield self.open_brace - if self.is_tuple and not self.is_namedtuple and len(self.children) == 1: - yield from self.children[0].iter_tokens() - yield "," - else: - for child in self.children: - yield from child.iter_tokens() - if not child.last: - yield self.separator - yield self.close_brace - else: - yield self.empty - - def check_length(self, start_length: int, max_length: int) -> bool: - """Check the length fits within a limit. - - Args: - start_length (int): Starting length of the line (indent, prefix, suffix). - max_length (int): Maximum length. - - Returns: - bool: True if the node can be rendered within max length, otherwise False. - """ - total_length = start_length - for token in self.iter_tokens(): - total_length += cell_len(token) - if total_length > max_length: - return False - return True - - def __str__(self) -> str: - repr_text = "".join(self.iter_tokens()) - return repr_text - - def render( - self, max_width: int = 80, indent_size: int = 4, expand_all: bool = False - ) -> str: - """Render the node to a pretty repr. - - Args: - max_width (int, optional): Maximum width of the repr. Defaults to 80. - indent_size (int, optional): Size of indents. Defaults to 4. - expand_all (bool, optional): Expand all levels. Defaults to False. - - Returns: - str: A repr string of the original object. - """ - lines = [_Line(node=self, is_root=True)] - line_no = 0 - while line_no < len(lines): - line = lines[line_no] - if line.expandable and not line.expanded: - if expand_all or not line.check_length(max_width): - lines[line_no : line_no + 1] = line.expand(indent_size) - line_no += 1 - - repr_str = "\n".join(str(line) for line in lines) - return repr_str - - -@dataclass -class _Line: - """A line in repr output.""" - - parent: Optional["_Line"] = None - is_root: bool = False - node: Optional[Node] = None - text: str = "" - suffix: str = "" - whitespace: str = "" - expanded: bool = False - last: bool = False - - @property - def expandable(self) -> bool: - """Check if the line may be expanded.""" - return bool(self.node is not None and self.node.children) - - def check_length(self, max_length: int) -> bool: - """Check this line fits within a given number of cells.""" - start_length = ( - len(self.whitespace) + cell_len(self.text) + cell_len(self.suffix) - ) - assert self.node is not None - return self.node.check_length(start_length, max_length) - - def expand(self, indent_size: int) -> Iterable["_Line"]: - """Expand this line by adding children on their own line.""" - node = self.node - assert node is not None - whitespace = self.whitespace - assert node.children - if node.key_repr: - new_line = yield _Line( - text=f"{node.key_repr}{node.key_separator}{node.open_brace}", - whitespace=whitespace, - ) - else: - new_line = yield _Line(text=node.open_brace, whitespace=whitespace) - child_whitespace = self.whitespace + " " * indent_size - tuple_of_one = node.is_tuple and len(node.children) == 1 - for last, child in loop_last(node.children): - separator = "," if tuple_of_one else node.separator - line = _Line( - parent=new_line, - node=child, - whitespace=child_whitespace, - suffix=separator, - last=last and not tuple_of_one, - ) - yield line - - yield _Line( - text=node.close_brace, - whitespace=whitespace, - suffix=self.suffix, - last=self.last, - ) - - def __str__(self) -> str: - if self.last: - return f"{self.whitespace}{self.text}{self.node or ''}" - else: - return ( - f"{self.whitespace}{self.text}{self.node or ''}{self.suffix.rstrip()}" - ) - - -def _is_namedtuple(obj: Any) -> bool: - """Checks if an object is most likely a namedtuple. It is possible - to craft an object that passes this check and isn't a namedtuple, but - there is only a minuscule chance of this happening unintentionally. - - Args: - obj (Any): The object to test - - Returns: - bool: True if the object is a namedtuple. False otherwise. - """ - try: - fields = getattr(obj, "_fields", None) - except Exception: - # Being very defensive - if we cannot get the attr then its not a namedtuple - return False - return isinstance(obj, tuple) and isinstance(fields, tuple) - - -def traverse( - _object: Any, - max_length: Optional[int] = None, - max_string: Optional[int] = None, - max_depth: Optional[int] = None, -) -> Node: - """Traverse object and generate a tree. - - Args: - _object (Any): Object to be traversed. - max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation. - Defaults to None. - max_string (int, optional): Maximum length of string before truncating, or None to disable truncating. - Defaults to None. - max_depth (int, optional): Maximum depth of data structures, or None for no maximum. - Defaults to None. - - Returns: - Node: The root of a tree structure which can be used to render a pretty repr. - """ - - def to_repr(obj: Any) -> str: - """Get repr string for an object, but catch errors.""" - if ( - max_string is not None - and _safe_isinstance(obj, (bytes, str)) - and len(obj) > max_string - ): - truncated = len(obj) - max_string - obj_repr = f"{obj[:max_string]!r}+{truncated}" - else: - try: - obj_repr = repr(obj) - except Exception as error: - obj_repr = f"" - return obj_repr - - visited_ids: Set[int] = set() - push_visited = visited_ids.add - pop_visited = visited_ids.remove - - def _traverse(obj: Any, root: bool = False, depth: int = 0) -> Node: - """Walk the object depth first.""" - - obj_id = id(obj) - if obj_id in visited_ids: - # Recursion detected - return Node(value_repr="...") - - obj_type = type(obj) - children: List[Node] - reached_max_depth = max_depth is not None and depth >= max_depth - - def iter_rich_args(rich_args: Any) -> Iterable[Union[Any, Tuple[str, Any]]]: - for arg in rich_args: - if _safe_isinstance(arg, tuple): - if len(arg) == 3: - key, child, default = arg - if default == child: - continue - yield key, child - elif len(arg) == 2: - key, child = arg - yield key, child - elif len(arg) == 1: - yield arg[0] - else: - yield arg - - try: - fake_attributes = hasattr( - obj, "awehoi234_wdfjwljet234_234wdfoijsdfmmnxpi492" - ) - except Exception: - fake_attributes = False - - rich_repr_result: Optional[RichReprResult] = None - if not fake_attributes: - try: - if hasattr(obj, "__rich_repr__") and not isclass(obj): - rich_repr_result = obj.__rich_repr__() - except Exception: - pass - - if rich_repr_result is not None: - push_visited(obj_id) - angular = getattr(obj.__rich_repr__, "angular", False) - args = list(iter_rich_args(rich_repr_result)) - class_name = obj.__class__.__name__ - - if args: - children = [] - append = children.append - - if reached_max_depth: - if angular: - node = Node(value_repr=f"<{class_name}...>") - else: - node = Node(value_repr=f"{class_name}(...)") - else: - if angular: - node = Node( - open_brace=f"<{class_name} ", - close_brace=">", - children=children, - last=root, - separator=" ", - ) - else: - node = Node( - open_brace=f"{class_name}(", - close_brace=")", - children=children, - last=root, - ) - for last, arg in loop_last(args): - if _safe_isinstance(arg, tuple): - key, child = arg - child_node = _traverse(child, depth=depth + 1) - child_node.last = last - child_node.key_repr = key - child_node.key_separator = "=" - append(child_node) - else: - child_node = _traverse(arg, depth=depth + 1) - child_node.last = last - append(child_node) - else: - node = Node( - value_repr=f"<{class_name}>" if angular else f"{class_name}()", - children=[], - last=root, - ) - pop_visited(obj_id) - elif _is_attr_object(obj) and not fake_attributes: - push_visited(obj_id) - children = [] - append = children.append - - attr_fields = _get_attr_fields(obj) - if attr_fields: - if reached_max_depth: - node = Node(value_repr=f"{obj.__class__.__name__}(...)") - else: - node = Node( - open_brace=f"{obj.__class__.__name__}(", - close_brace=")", - children=children, - last=root, - ) - - def iter_attrs() -> Iterable[ - Tuple[str, Any, Optional[Callable[[Any], str]]] - ]: - """Iterate over attr fields and values.""" - for attr in attr_fields: - if attr.repr: - try: - value = getattr(obj, attr.name) - except Exception as error: - # Can happen, albeit rarely - yield (attr.name, error, None) - else: - yield ( - attr.name, - value, - attr.repr if callable(attr.repr) else None, - ) - - for last, (name, value, repr_callable) in loop_last(iter_attrs()): - if repr_callable: - child_node = Node(value_repr=str(repr_callable(value))) - else: - child_node = _traverse(value, depth=depth + 1) - child_node.last = last - child_node.key_repr = name - child_node.key_separator = "=" - append(child_node) - else: - node = Node( - value_repr=f"{obj.__class__.__name__}()", children=[], last=root - ) - pop_visited(obj_id) - elif ( - is_dataclass(obj) - and not _safe_isinstance(obj, type) - and not fake_attributes - and _is_dataclass_repr(obj) - ): - push_visited(obj_id) - children = [] - append = children.append - if reached_max_depth: - node = Node(value_repr=f"{obj.__class__.__name__}(...)") - else: - node = Node( - open_brace=f"{obj.__class__.__name__}(", - close_brace=")", - children=children, - last=root, - empty=f"{obj.__class__.__name__}()", - ) - - for last, field in loop_last( - field for field in fields(obj) if field.repr - ): - child_node = _traverse(getattr(obj, field.name), depth=depth + 1) - child_node.key_repr = field.name - child_node.last = last - child_node.key_separator = "=" - append(child_node) - - pop_visited(obj_id) - elif _is_namedtuple(obj) and _has_default_namedtuple_repr(obj): - push_visited(obj_id) - class_name = obj.__class__.__name__ - if reached_max_depth: - # If we've reached the max depth, we still show the class name, but not its contents - node = Node( - value_repr=f"{class_name}(...)", - ) - else: - children = [] - append = children.append - node = Node( - open_brace=f"{class_name}(", - close_brace=")", - children=children, - empty=f"{class_name}()", - ) - for last, (key, value) in loop_last(obj._asdict().items()): - child_node = _traverse(value, depth=depth + 1) - child_node.key_repr = key - child_node.last = last - child_node.key_separator = "=" - append(child_node) - pop_visited(obj_id) - elif _safe_isinstance(obj, _CONTAINERS): - for container_type in _CONTAINERS: - if _safe_isinstance(obj, container_type): - obj_type = container_type - break - - push_visited(obj_id) - - open_brace, close_brace, empty = _BRACES[obj_type](obj) - - if reached_max_depth: - node = Node(value_repr=f"{open_brace}...{close_brace}") - elif obj_type.__repr__ != type(obj).__repr__: - node = Node(value_repr=to_repr(obj), last=root) - elif obj: - children = [] - node = Node( - open_brace=open_brace, - close_brace=close_brace, - children=children, - last=root, - ) - append = children.append - num_items = len(obj) - last_item_index = num_items - 1 - - if _safe_isinstance(obj, _MAPPING_CONTAINERS): - iter_items = iter(obj.items()) - if max_length is not None: - iter_items = islice(iter_items, max_length) - for index, (key, child) in enumerate(iter_items): - child_node = _traverse(child, depth=depth + 1) - child_node.key_repr = to_repr(key) - child_node.last = index == last_item_index - append(child_node) - else: - iter_values = iter(obj) - if max_length is not None: - iter_values = islice(iter_values, max_length) - for index, child in enumerate(iter_values): - child_node = _traverse(child, depth=depth + 1) - child_node.last = index == last_item_index - append(child_node) - if max_length is not None and num_items > max_length: - append(Node(value_repr=f"... +{num_items - max_length}", last=True)) - else: - node = Node(empty=empty, children=[], last=root) - - pop_visited(obj_id) - else: - node = Node(value_repr=to_repr(obj), last=root) - node.is_tuple = _safe_isinstance(obj, tuple) - node.is_namedtuple = _is_namedtuple(obj) - return node - - node = _traverse(_object, root=True) - return node - - -def pretty_repr( - _object: Any, - *, - max_width: int = 80, - indent_size: int = 4, - max_length: Optional[int] = None, - max_string: Optional[int] = None, - max_depth: Optional[int] = None, - expand_all: bool = False, -) -> str: - """Prettify repr string by expanding on to new lines to fit within a given width. - - Args: - _object (Any): Object to repr. - max_width (int, optional): Desired maximum width of repr string. Defaults to 80. - indent_size (int, optional): Number of spaces to indent. Defaults to 4. - max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation. - Defaults to None. - max_string (int, optional): Maximum length of string before truncating, or None to disable truncating. - Defaults to None. - max_depth (int, optional): Maximum depth of nested data structure, or None for no depth. - Defaults to None. - expand_all (bool, optional): Expand all containers regardless of available width. Defaults to False. - - Returns: - str: A possibly multi-line representation of the object. - """ - - if _safe_isinstance(_object, Node): - node = _object - else: - node = traverse( - _object, max_length=max_length, max_string=max_string, max_depth=max_depth - ) - repr_str: str = node.render( - max_width=max_width, indent_size=indent_size, expand_all=expand_all - ) - return repr_str - - -def pprint( - _object: Any, - *, - console: Optional["Console"] = None, - indent_guides: bool = True, - max_length: Optional[int] = None, - max_string: Optional[int] = None, - max_depth: Optional[int] = None, - expand_all: bool = False, -) -> None: - """A convenience function for pretty printing. - - Args: - _object (Any): Object to pretty print. - console (Console, optional): Console instance, or None to use default. Defaults to None. - max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation. - Defaults to None. - max_string (int, optional): Maximum length of strings before truncating, or None to disable. Defaults to None. - max_depth (int, optional): Maximum depth for nested data structures, or None for unlimited depth. Defaults to None. - indent_guides (bool, optional): Enable indentation guides. Defaults to True. - expand_all (bool, optional): Expand all containers. Defaults to False. - """ - _console = get_console() if console is None else console - _console.print( - Pretty( - _object, - max_length=max_length, - max_string=max_string, - max_depth=max_depth, - indent_guides=indent_guides, - expand_all=expand_all, - overflow="ignore", - ), - soft_wrap=True, - ) - - -if __name__ == "__main__": # pragma: no cover - - class BrokenRepr: - def __repr__(self) -> str: - 1 / 0 - return "this will fail" - - from typing import NamedTuple - - class StockKeepingUnit(NamedTuple): - name: str - description: str - price: float - category: str - reviews: List[str] - - d = defaultdict(int) - d["foo"] = 5 - data = { - "foo": [ - 1, - "Hello World!", - 100.123, - 323.232, - 432324.0, - {5, 6, 7, (1, 2, 3, 4), 8}, - ], - "bar": frozenset({1, 2, 3}), - "defaultdict": defaultdict( - list, {"crumble": ["apple", "rhubarb", "butter", "sugar", "flour"]} - ), - "counter": Counter( - [ - "apple", - "orange", - "pear", - "kumquat", - "kumquat", - "durian" * 100, - ] - ), - "atomic": (False, True, None), - "namedtuple": StockKeepingUnit( - "Sparkling British Spring Water", - "Carbonated spring water", - 0.9, - "water", - ["its amazing!", "its terrible!"], - ), - "Broken": BrokenRepr(), - } - data["foo"].append(data) # type: ignore[attr-defined] - - from pip._vendor.rich import print - - # print(Pretty(data, indent_guides=True, max_string=20)) - - class Thing: - def __repr__(self) -> str: - return "Hello\x1b[38;5;239m World!" - - print(Pretty(Thing())) diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/command/py36compat.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/command/py36compat.py deleted file mode 100644 index 343547a4d316e48144ba6bdf342dcc24cd6cb6cd..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/command/py36compat.py +++ /dev/null @@ -1,134 +0,0 @@ -import os -from glob import glob -from distutils.util import convert_path -from distutils.command import sdist - - -class sdist_add_defaults: - """ - Mix-in providing forward-compatibility for functionality as found in - distutils on Python 3.7. - - Do not edit the code in this class except to update functionality - as implemented in distutils. Instead, override in the subclass. - """ - - def add_defaults(self): - """Add all the default files to self.filelist: - - README or README.txt - - setup.py - - test/test*.py - - all pure Python modules mentioned in setup script - - all files pointed by package_data (build_py) - - all files defined in data_files. - - all files defined as scripts. - - all C sources listed as part of extensions or C libraries - in the setup script (doesn't catch C headers!) - Warns if (README or README.txt) or setup.py are missing; everything - else is optional. - """ - self._add_defaults_standards() - self._add_defaults_optional() - self._add_defaults_python() - self._add_defaults_data_files() - self._add_defaults_ext() - self._add_defaults_c_libs() - self._add_defaults_scripts() - - @staticmethod - def _cs_path_exists(fspath): - """ - Case-sensitive path existence check - - >>> sdist_add_defaults._cs_path_exists(__file__) - True - >>> sdist_add_defaults._cs_path_exists(__file__.upper()) - False - """ - if not os.path.exists(fspath): - return False - # make absolute so we always have a directory - abspath = os.path.abspath(fspath) - directory, filename = os.path.split(abspath) - return filename in os.listdir(directory) - - def _add_defaults_standards(self): - standards = [self.READMES, self.distribution.script_name] - for fn in standards: - if isinstance(fn, tuple): - alts = fn - got_it = False - for fn in alts: - if self._cs_path_exists(fn): - got_it = True - self.filelist.append(fn) - break - - if not got_it: - self.warn("standard file not found: should have one of " + - ', '.join(alts)) - else: - if self._cs_path_exists(fn): - self.filelist.append(fn) - else: - self.warn("standard file '%s' not found" % fn) - - def _add_defaults_optional(self): - optional = ['test/test*.py', 'setup.cfg'] - for pattern in optional: - files = filter(os.path.isfile, glob(pattern)) - self.filelist.extend(files) - - def _add_defaults_python(self): - # build_py is used to get: - # - python modules - # - files defined in package_data - build_py = self.get_finalized_command('build_py') - - # getting python files - if self.distribution.has_pure_modules(): - self.filelist.extend(build_py.get_source_files()) - - # getting package_data files - # (computed in build_py.data_files by build_py.finalize_options) - for pkg, src_dir, build_dir, filenames in build_py.data_files: - for filename in filenames: - self.filelist.append(os.path.join(src_dir, filename)) - - def _add_defaults_data_files(self): - # getting distribution.data_files - if self.distribution.has_data_files(): - for item in self.distribution.data_files: - if isinstance(item, str): - # plain file - item = convert_path(item) - if os.path.isfile(item): - self.filelist.append(item) - else: - # a (dirname, filenames) tuple - dirname, filenames = item - for f in filenames: - f = convert_path(f) - if os.path.isfile(f): - self.filelist.append(f) - - def _add_defaults_ext(self): - if self.distribution.has_ext_modules(): - build_ext = self.get_finalized_command('build_ext') - self.filelist.extend(build_ext.get_source_files()) - - def _add_defaults_c_libs(self): - if self.distribution.has_c_libraries(): - build_clib = self.get_finalized_command('build_clib') - self.filelist.extend(build_clib.get_source_files()) - - def _add_defaults_scripts(self): - if self.distribution.has_scripts(): - build_scripts = self.get_finalized_command('build_scripts') - self.filelist.extend(build_scripts.get_source_files()) - - -if hasattr(sdist.sdist, '_add_defaults_standards'): - # disable the functionality already available upstream - class sdist_add_defaults: # noqa - pass diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/DensePose/densepose/roi_head.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/DensePose/densepose/roi_head.py deleted file mode 100644 index 023119760b77cf5294ed18292e77e7f495099770..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/DensePose/densepose/roi_head.py +++ /dev/null @@ -1,213 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved - -import numpy as np -from typing import Dict -import fvcore.nn.weight_init as weight_init -import torch -import torch.nn as nn -from torch.nn import functional as F - -from detectron2.layers import Conv2d, ShapeSpec, get_norm -from detectron2.modeling import ROI_HEADS_REGISTRY, StandardROIHeads -from detectron2.modeling.poolers import ROIPooler -from detectron2.modeling.roi_heads import select_foreground_proposals - -from .densepose_head import ( - build_densepose_data_filter, - build_densepose_head, - build_densepose_losses, - build_densepose_predictor, - densepose_inference, -) - - -class Decoder(nn.Module): - """ - A semantic segmentation head described in detail in the Panoptic Feature Pyramid Networks paper - (https://arxiv.org/abs/1901.02446). It takes FPN features as input and merges information from - all levels of the FPN into single output. - """ - - def __init__(self, cfg, input_shape: Dict[str, ShapeSpec], in_features): - super(Decoder, self).__init__() - - # fmt: off - self.in_features = in_features - feature_strides = {k: v.stride for k, v in input_shape.items()} - feature_channels = {k: v.channels for k, v in input_shape.items()} - num_classes = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECODER_NUM_CLASSES - conv_dims = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECODER_CONV_DIMS - self.common_stride = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECODER_COMMON_STRIDE - norm = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECODER_NORM - # fmt: on - - self.scale_heads = [] - for in_feature in self.in_features: - head_ops = [] - head_length = max( - 1, int(np.log2(feature_strides[in_feature]) - np.log2(self.common_stride)) - ) - for k in range(head_length): - conv = Conv2d( - feature_channels[in_feature] if k == 0 else conv_dims, - conv_dims, - kernel_size=3, - stride=1, - padding=1, - bias=not norm, - norm=get_norm(norm, conv_dims), - activation=F.relu, - ) - weight_init.c2_msra_fill(conv) - head_ops.append(conv) - if feature_strides[in_feature] != self.common_stride: - head_ops.append( - nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False) - ) - self.scale_heads.append(nn.Sequential(*head_ops)) - self.add_module(in_feature, self.scale_heads[-1]) - self.predictor = Conv2d(conv_dims, num_classes, kernel_size=1, stride=1, padding=0) - weight_init.c2_msra_fill(self.predictor) - - def forward(self, features): - for i, _ in enumerate(self.in_features): - if i == 0: - x = self.scale_heads[i](features[i]) - else: - x = x + self.scale_heads[i](features[i]) - x = self.predictor(x) - return x - - -@ROI_HEADS_REGISTRY.register() -class DensePoseROIHeads(StandardROIHeads): - """ - A Standard ROIHeads which contains an addition of DensePose head. - """ - - def __init__(self, cfg, input_shape): - super().__init__(cfg, input_shape) - self._init_densepose_head(cfg, input_shape) - - def _init_densepose_head(self, cfg, input_shape): - # fmt: off - self.densepose_on = cfg.MODEL.DENSEPOSE_ON - if not self.densepose_on: - return - self.densepose_data_filter = build_densepose_data_filter(cfg) - dp_pooler_resolution = cfg.MODEL.ROI_DENSEPOSE_HEAD.POOLER_RESOLUTION - dp_pooler_sampling_ratio = cfg.MODEL.ROI_DENSEPOSE_HEAD.POOLER_SAMPLING_RATIO - dp_pooler_type = cfg.MODEL.ROI_DENSEPOSE_HEAD.POOLER_TYPE - self.use_decoder = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECODER_ON - # fmt: on - if self.use_decoder: - dp_pooler_scales = (1.0 / input_shape[self.in_features[0]].stride,) - else: - dp_pooler_scales = tuple(1.0 / input_shape[k].stride for k in self.in_features) - in_channels = [input_shape[f].channels for f in self.in_features][0] - - if self.use_decoder: - self.decoder = Decoder(cfg, input_shape, self.in_features) - - self.densepose_pooler = ROIPooler( - output_size=dp_pooler_resolution, - scales=dp_pooler_scales, - sampling_ratio=dp_pooler_sampling_ratio, - pooler_type=dp_pooler_type, - ) - self.densepose_head = build_densepose_head(cfg, in_channels) - self.densepose_predictor = build_densepose_predictor( - cfg, self.densepose_head.n_out_channels - ) - self.densepose_losses = build_densepose_losses(cfg) - - def _forward_densepose(self, features, instances): - """ - Forward logic of the densepose prediction branch. - - Args: - features (list[Tensor]): #level input features for densepose prediction - instances (list[Instances]): the per-image instances to train/predict densepose. - In training, they can be the proposals. - In inference, they can be the predicted boxes. - - Returns: - In training, a dict of losses. - In inference, update `instances` with new fields "densepose" and return it. - """ - if not self.densepose_on: - return {} if self.training else instances - - features = [features[f] for f in self.in_features] - if self.training: - proposals, _ = select_foreground_proposals(instances, self.num_classes) - proposals_dp = self.densepose_data_filter(proposals) - if len(proposals_dp) > 0: - # NOTE may deadlock in DDP if certain workers have empty proposals_dp - proposal_boxes = [x.proposal_boxes for x in proposals_dp] - - if self.use_decoder: - features = [self.decoder(features)] - - features_dp = self.densepose_pooler(features, proposal_boxes) - densepose_head_outputs = self.densepose_head(features_dp) - densepose_outputs, _, confidences, _ = self.densepose_predictor( - densepose_head_outputs - ) - densepose_loss_dict = self.densepose_losses( - proposals_dp, densepose_outputs, confidences - ) - return densepose_loss_dict - else: - pred_boxes = [x.pred_boxes for x in instances] - - if self.use_decoder: - features = [self.decoder(features)] - - features_dp = self.densepose_pooler(features, pred_boxes) - if len(features_dp) > 0: - densepose_head_outputs = self.densepose_head(features_dp) - densepose_outputs, _, confidences, _ = self.densepose_predictor( - densepose_head_outputs - ) - else: - # If no detection occurred instances - # set densepose_outputs to empty tensors - empty_tensor = torch.zeros(size=(0, 0, 0, 0), device=features_dp.device) - densepose_outputs = tuple([empty_tensor] * 4) - confidences = tuple([empty_tensor] * 4) - - densepose_inference(densepose_outputs, confidences, instances) - return instances - - def forward(self, images, features, proposals, targets=None): - instances, losses = super().forward(images, features, proposals, targets) - del targets, images - - if self.training: - losses.update(self._forward_densepose(features, instances)) - return instances, losses - - def forward_with_given_boxes(self, features, instances): - """ - Use the given boxes in `instances` to produce other (non-box) per-ROI outputs. - - This is useful for downstream tasks where a box is known, but need to obtain - other attributes (outputs of other heads). - Test-time augmentation also uses this. - - Args: - features: same as in `forward()` - instances (list[Instances]): instances to predict other outputs. Expect the keys - "pred_boxes" and "pred_classes" to exist. - - Returns: - instances (list[Instances]): - the same `Instances` objects, with extra - fields such as `pred_masks` or `pred_keypoints`. - """ - - instances = super().forward_with_given_boxes(features, instances) - instances = self._forward_densepose(features, instances) - return instances diff --git a/spaces/CVPR/LIVE/pybind11/tests/test_exceptions.cpp b/spaces/CVPR/LIVE/pybind11/tests/test_exceptions.cpp deleted file mode 100644 index 537819d987a46746cf65ccb812c312219fcd41ba..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/pybind11/tests/test_exceptions.cpp +++ /dev/null @@ -1,224 +0,0 @@ -/* - tests/test_custom-exceptions.cpp -- exception translation - - Copyright (c) 2016 Pim Schellart - - All rights reserved. Use of this source code is governed by a - BSD-style license that can be found in the LICENSE file. -*/ - -#include "pybind11_tests.h" - -// A type that should be raised as an exception in Python -class MyException : public std::exception { -public: - explicit MyException(const char * m) : message{m} {} - virtual const char * what() const noexcept override {return message.c_str();} -private: - std::string message = ""; -}; - -// A type that should be translated to a standard Python exception -class MyException2 : public std::exception { -public: - explicit MyException2(const char * m) : message{m} {} - virtual const char * what() const noexcept override {return message.c_str();} -private: - std::string message = ""; -}; - -// A type that is not derived from std::exception (and is thus unknown) -class MyException3 { -public: - explicit MyException3(const char * m) : message{m} {} - virtual const char * what() const noexcept {return message.c_str();} -private: - std::string message = ""; -}; - -// A type that should be translated to MyException -// and delegated to its exception translator -class MyException4 : public std::exception { -public: - explicit MyException4(const char * m) : message{m} {} - virtual const char * what() const noexcept override {return message.c_str();} -private: - std::string message = ""; -}; - - -// Like the above, but declared via the helper function -class MyException5 : public std::logic_error { -public: - explicit MyException5(const std::string &what) : std::logic_error(what) {} -}; - -// Inherits from MyException5 -class MyException5_1 : public MyException5 { - using MyException5::MyException5; -}; - -struct PythonCallInDestructor { - PythonCallInDestructor(const py::dict &d) : d(d) {} - ~PythonCallInDestructor() { d["good"] = true; } - - py::dict d; -}; - - - -struct PythonAlreadySetInDestructor { - PythonAlreadySetInDestructor(const py::str &s) : s(s) {} - ~PythonAlreadySetInDestructor() { - py::dict foo; - try { - // Assign to a py::object to force read access of nonexistent dict entry - py::object o = foo["bar"]; - } - catch (py::error_already_set& ex) { - ex.discard_as_unraisable(s); - } - } - - py::str s; -}; - - -TEST_SUBMODULE(exceptions, m) { - m.def("throw_std_exception", []() { - throw std::runtime_error("This exception was intentionally thrown."); - }); - - // make a new custom exception and use it as a translation target - static py::exception ex(m, "MyException"); - py::register_exception_translator([](std::exception_ptr p) { - try { - if (p) std::rethrow_exception(p); - } catch (const MyException &e) { - // Set MyException as the active python error - ex(e.what()); - } - }); - - // register new translator for MyException2 - // no need to store anything here because this type will - // never by visible from Python - py::register_exception_translator([](std::exception_ptr p) { - try { - if (p) std::rethrow_exception(p); - } catch (const MyException2 &e) { - // Translate this exception to a standard RuntimeError - PyErr_SetString(PyExc_RuntimeError, e.what()); - } - }); - - // register new translator for MyException4 - // which will catch it and delegate to the previously registered - // translator for MyException by throwing a new exception - py::register_exception_translator([](std::exception_ptr p) { - try { - if (p) std::rethrow_exception(p); - } catch (const MyException4 &e) { - throw MyException(e.what()); - } - }); - - // A simple exception translation: - auto ex5 = py::register_exception(m, "MyException5"); - // A slightly more complicated one that declares MyException5_1 as a subclass of MyException5 - py::register_exception(m, "MyException5_1", ex5.ptr()); - - m.def("throws1", []() { throw MyException("this error should go to a custom type"); }); - m.def("throws2", []() { throw MyException2("this error should go to a standard Python exception"); }); - m.def("throws3", []() { throw MyException3("this error cannot be translated"); }); - m.def("throws4", []() { throw MyException4("this error is rethrown"); }); - m.def("throws5", []() { throw MyException5("this is a helper-defined translated exception"); }); - m.def("throws5_1", []() { throw MyException5_1("MyException5 subclass"); }); - m.def("throws_logic_error", []() { throw std::logic_error("this error should fall through to the standard handler"); }); - m.def("throws_overflow_error", []() {throw std::overflow_error(""); }); - m.def("exception_matches", []() { - py::dict foo; - try { - // Assign to a py::object to force read access of nonexistent dict entry - py::object o = foo["bar"]; - } - catch (py::error_already_set& ex) { - if (!ex.matches(PyExc_KeyError)) throw; - return true; - } - return false; - }); - m.def("exception_matches_base", []() { - py::dict foo; - try { - // Assign to a py::object to force read access of nonexistent dict entry - py::object o = foo["bar"]; - } - catch (py::error_already_set &ex) { - if (!ex.matches(PyExc_Exception)) throw; - return true; - } - return false; - }); - m.def("modulenotfound_exception_matches_base", []() { - try { - // On Python >= 3.6, this raises a ModuleNotFoundError, a subclass of ImportError - py::module::import("nonexistent"); - } - catch (py::error_already_set &ex) { - if (!ex.matches(PyExc_ImportError)) throw; - return true; - } - return false; - }); - - m.def("throw_already_set", [](bool err) { - if (err) - PyErr_SetString(PyExc_ValueError, "foo"); - try { - throw py::error_already_set(); - } catch (const std::runtime_error& e) { - if ((err && e.what() != std::string("ValueError: foo")) || - (!err && e.what() != std::string("Unknown internal error occurred"))) - { - PyErr_Clear(); - throw std::runtime_error("error message mismatch"); - } - } - PyErr_Clear(); - if (err) - PyErr_SetString(PyExc_ValueError, "foo"); - throw py::error_already_set(); - }); - - m.def("python_call_in_destructor", [](py::dict d) { - try { - PythonCallInDestructor set_dict_in_destructor(d); - PyErr_SetString(PyExc_ValueError, "foo"); - throw py::error_already_set(); - } catch (const py::error_already_set&) { - return true; - } - return false; - }); - - m.def("python_alreadyset_in_destructor", [](py::str s) { - PythonAlreadySetInDestructor alreadyset_in_destructor(s); - return true; - }); - - // test_nested_throws - m.def("try_catch", [m](py::object exc_type, py::function f, py::args args) { - try { f(*args); } - catch (py::error_already_set &ex) { - if (ex.matches(exc_type)) - py::print(ex.what()); - else - throw; - } - }); - - // Test repr that cannot be displayed - m.def("simple_bool_passthrough", [](bool x) {return x;}); - -} diff --git a/spaces/CVPR/LIVE/thrust/thrust/random/linear_congruential_engine.h b/spaces/CVPR/LIVE/thrust/thrust/random/linear_congruential_engine.h deleted file mode 100644 index 0dc72b3b136e7c49ddd572d201d575b8a2d2320a..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/random/linear_congruential_engine.h +++ /dev/null @@ -1,295 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -/*! \file linear_congruential_engine.h - * \brief A linear congruential pseudorandom number engine. - */ - -#pragma once - -#include -#include -#include -#include -#include - -namespace thrust -{ - -namespace random -{ - -/*! \addtogroup random_number_engine_templates Random Number Engine Class Templates - * \ingroup random - * \{ - */ - -/*! \class linear_congruential_engine - * \brief A \p linear_congruential_engine random number engine produces unsigned integer - * random numbers using a linear congruential random number generation algorithm. - * - * The generation algorithm has the form x_i = (a * x_{i-1} + c) mod m. - * - * \tparam UIntType The type of unsigned integer to produce. - * \tparam a The multiplier used in the generation algorithm. - * \tparam c The increment used in the generation algorithm. - * \tparam m The modulus used in the generation algorithm. - * - * \note Inexperienced users should not use this class template directly. Instead, use - * \p minstd_rand or \p minstd_rand0. - * - * The following code snippet shows examples of use of a \p linear_congruential_engine instance: - * - * \code - * #include - * #include - * - * int main(void) - * { - * // create a minstd_rand object, which is an instance of linear_congruential_engine - * thrust::minstd_rand rng1; - * - * // output some random values to cout - * std::cout << rng1() << std::endl; - * - * // a random value is printed - * - * // create a new minstd_rand from a seed - * thrust::minstd_rand rng2(13); - * - * // discard some random values - * rng2.discard(13); - * - * // stream the object to an iostream - * std::cout << rng2 << std::endl; - * - * // rng2's current state is printed - * - * // print the minimum and maximum values that minstd_rand can produce - * std::cout << thrust::minstd_rand::min << std::endl; - * std::cout << thrust::minstd_rand::max << std::endl; - * - * // the range of minstd_rand is printed - * - * // save the state of rng2 to a different object - * thrust::minstd_rand rng3 = rng2; - * - * // compare rng2 and rng3 - * std::cout << (rng2 == rng3) << std::endl; - * - * // 1 is printed - * - * // re-seed rng2 with a different seed - * rng2.seed(7); - * - * // compare rng2 and rng3 - * std::cout << (rng2 == rng3) << std::endl; - * - * // 0 is printed - * - * return 0; - * } - * - * \endcode - * - * \see thrust::random::minstd_rand - * \see thrust::random::minstd_rand0 - */ -template - class linear_congruential_engine -{ - public: - // types - - /*! \typedef result_type - * \brief The type of the unsigned integer produced by this \p linear_congruential_engine. - */ - typedef UIntType result_type; - - // engine characteristics - - /*! The multiplier used in the generation algorithm. - */ - static const result_type multiplier = a; - - /*! The increment used in the generation algorithm. - */ - static const result_type increment = c; - - /*! The modulus used in the generation algorithm. - */ - static const result_type modulus = m; - - /*! The smallest value this \p linear_congruential_engine may potentially produce. - */ - static const result_type min = c == 0u ? 1u : 0u; - - /*! The largest value this \p linear_congruential_engine may potentially produce. - */ - static const result_type max = m - 1u; - - /*! The default seed of this \p linear_congruential_engine. - */ - static const result_type default_seed = 1u; - - // constructors and seeding functions - - /*! This constructor, which optionally accepts a seed, initializes a new - * \p linear_congruential_engine. - * - * \param s The seed used to intialize this \p linear_congruential_engine's state. - */ - __host__ __device__ - explicit linear_congruential_engine(result_type s = default_seed); - - /*! This method initializes this \p linear_congruential_engine's state, and optionally accepts - * a seed value. - * - * \param s The seed used to initializes this \p linear_congruential_engine's state. - */ - __host__ __device__ - void seed(result_type s = default_seed); - - // generating functions - - /*! This member function produces a new random value and updates this \p linear_congruential_engine's state. - * \return A new random number. - */ - __host__ __device__ - result_type operator()(void); - - /*! This member function advances this \p linear_congruential_engine's state a given number of times - * and discards the results. - * - * \param z The number of random values to discard. - * \note This function is provided because an implementation may be able to accelerate it. - */ - __host__ __device__ - void discard(unsigned long long z); - - /*! \cond - */ - private: - result_type m_x; - - static void transition(result_type &state); - - friend struct thrust::random::detail::random_core_access; - - friend struct thrust::random::detail::linear_congruential_engine_discard; - - __host__ __device__ - bool equal(const linear_congruential_engine &rhs) const; - - template - std::basic_ostream& stream_out(std::basic_ostream &os) const; - - template - std::basic_istream& stream_in(std::basic_istream &is); - - /*! \endcond - */ -}; // end linear_congruential_engine - - -/*! This function checks two \p linear_congruential_engines for equality. - * \param lhs The first \p linear_congruential_engine to test. - * \param rhs The second \p linear_congruential_engine to test. - * \return \c true if \p lhs is equal to \p rhs; \c false, otherwise. - */ -template -__host__ __device__ -bool operator==(const linear_congruential_engine &lhs, - const linear_congruential_engine &rhs); - - -/*! This function checks two \p linear_congruential_engines for inequality. - * \param lhs The first \p linear_congruential_engine to test. - * \param rhs The second \p linear_congruential_engine to test. - * \return \c true if \p lhs is not equal to \p rhs; \c false, otherwise. - */ -template -__host__ __device__ -bool operator!=(const linear_congruential_engine &lhs, - const linear_congruential_engine &rhs); - - -/*! This function streams a linear_congruential_engine to a \p std::basic_ostream. - * \param os The \p basic_ostream to stream out to. - * \param e The \p linear_congruential_engine to stream out. - * \return \p os - */ -template -std::basic_ostream& -operator<<(std::basic_ostream &os, - const linear_congruential_engine &e); - - -/*! This function streams a linear_congruential_engine in from a std::basic_istream. - * \param is The \p basic_istream to stream from. - * \param e The \p linear_congruential_engine to stream in. - * \return \p is - */ -template -std::basic_istream& -operator>>(std::basic_istream &is, - linear_congruential_engine &e); - - -/*! \} // random_number_engine_templates - */ - - -/*! \addtogroup predefined_random - * \{ - */ - -// XXX the type N2111 used here was uint_fast32_t - -/*! \typedef minstd_rand0 - * \brief A random number engine with predefined parameters which implements a version of - * the Minimal Standard random number generation algorithm. - * \note The 10000th consecutive invocation of a default-constructed object of type \p minstd_rand0 - * shall produce the value \c 1043618065 . - */ -typedef linear_congruential_engine minstd_rand0; - - -/*! \typedef minstd_rand - * \brief A random number engine with predefined parameters which implements a version of - * the Minimal Standard random number generation algorithm. - * \note The 10000th consecutive invocation of a default-constructed object of type \p minstd_rand - * shall produce the value \c 399268537 . - */ -typedef linear_congruential_engine minstd_rand; - -/*! \} // predefined_random - */ - -} // end random - -// import names into thrust:: -using random::linear_congruential_engine; -using random::minstd_rand; -using random::minstd_rand0; - -} // end thrust - -#include - diff --git a/spaces/ChristopherMarais/Andrew_AI-BB_classification-beta/mysite/andrew_alpha/apps.py b/spaces/ChristopherMarais/Andrew_AI-BB_classification-beta/mysite/andrew_alpha/apps.py deleted file mode 100644 index 9282ec75b33b5930ebffd74affa2e829e08b0fbb..0000000000000000000000000000000000000000 --- a/spaces/ChristopherMarais/Andrew_AI-BB_classification-beta/mysite/andrew_alpha/apps.py +++ /dev/null @@ -1,6 +0,0 @@ -from django.apps import AppConfig - - -class AndrewAlphaConfig(AppConfig): - default_auto_field = 'django.db.models.BigAutoField' - name = 'andrew_alpha' diff --git a/spaces/Clebersla/RVC_V2_Huggingface_Version/lib/infer_pack/models_onnx.py b/spaces/Clebersla/RVC_V2_Huggingface_Version/lib/infer_pack/models_onnx.py deleted file mode 100644 index 963e67b29f828e9fdd096397952054fe77cf3d10..0000000000000000000000000000000000000000 --- a/spaces/Clebersla/RVC_V2_Huggingface_Version/lib/infer_pack/models_onnx.py +++ /dev/null @@ -1,819 +0,0 @@ -import math, pdb, os -from time import time as ttime -import torch -from torch import nn -from torch.nn import functional as F -from lib.infer_pack import modules -from lib.infer_pack import attentions -from lib.infer_pack import commons -from lib.infer_pack.commons import init_weights, get_padding -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from lib.infer_pack.commons import init_weights -import numpy as np -from lib.infer_pack import commons - - -class TextEncoder256(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class TextEncoder768(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(768, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - def remove_weight_norm(self): - for i in range(self.n_flows): - self.flows[i * 2].remove_weight_norm() - - -class PosteriorEncoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class Generator(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=0, - ): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class SineGen(torch.nn.Module): - """Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__( - self, - samp_rate, - harmonic_num=0, - sine_amp=0.1, - noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False, - ): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - return uv - - def forward(self, f0, upp): - """sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0 = f0[:, None].transpose(1, 2) - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( - idx + 2 - ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 - rand_ini = torch.rand( - f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device - ) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 - tmp_over_one *= upp - tmp_over_one = F.interpolate( - tmp_over_one.transpose(2, 1), - scale_factor=upp, - mode="linear", - align_corners=True, - ).transpose(2, 1) - rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose( - 2, 1 - ) ####### - tmp_over_one %= 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi - ) - sine_waves = sine_waves * self.sine_amp - uv = self._f02uv(f0) - uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose(2, 1) - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__( - self, - sampling_rate, - harmonic_num=0, - sine_amp=0.1, - add_noise_std=0.003, - voiced_threshod=0, - is_half=True, - ): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - self.is_half = is_half - # to produce sine waveforms - self.l_sin_gen = SineGen( - sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod - ) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x, upp=None): - sine_wavs, uv, _ = self.l_sin_gen(x, upp) - if self.is_half: - sine_wavs = sine_wavs.half() - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - return sine_merge, None, None # noise, uv - - -class GeneratorNSF(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - sr, - is_half=False, - ): - super(GeneratorNSF, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) - self.m_source = SourceModuleHnNSF( - sampling_rate=sr, harmonic_num=0, is_half=is_half - ) - self.noise_convs = nn.ModuleList() - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - c_cur = upsample_initial_channel // (2 ** (i + 1)) - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) - self.noise_convs.append( - Conv1d( - 1, - c_cur, - kernel_size=stride_f0 * 2, - stride=stride_f0, - padding=stride_f0 // 2, - ) - ) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - self.upp = np.prod(upsample_rates) - - def forward(self, x, f0, g=None): - har_source, noi_source, uv = self.m_source(f0, self.upp) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -sr2sr = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -class SynthesizerTrnMsNSFsidM(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - version, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - if version == "v1": - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - else: - self.enc_p = TextEncoder768( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - self.speaker_map = None - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def construct_spkmixmap(self, n_speaker): - self.speaker_map = torch.zeros((n_speaker, 1, 1, self.gin_channels)) - for i in range(n_speaker): - self.speaker_map[i] = self.emb_g(torch.LongTensor([[i]])) - self.speaker_map = self.speaker_map.unsqueeze(0) - - def forward(self, phone, phone_lengths, pitch, nsff0, g, rnd, max_len=None): - if self.speaker_map is not None: # [N, S] * [S, B, 1, H] - g = g.reshape((g.shape[0], g.shape[1], 1, 1, 1)) # [N, S, B, 1, 1] - g = g * self.speaker_map # [N, S, B, 1, H] - g = torch.sum(g, dim=1) # [N, 1, B, 1, H] - g = g.transpose(0, -1).transpose(0, -2).squeeze(0) # [B, H, N] - else: - g = g.unsqueeze(0) - g = self.emb_g(g).transpose(1, 2) - - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * rnd) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) - return o - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11, 17] - # periods = [3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class MultiPeriodDiscriminatorV2(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminatorV2, self).__init__() - # periods = [2, 3, 5, 7, 11, 17] - periods = [2, 3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 1024, - 1024, - (kernel_size, 1), - 1, - padding=(get_padding(kernel_size, 1), 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap diff --git a/spaces/Cong723/gpt-academic-public/crazy_functions/test_project/cpp/cppipc/shm.cpp b/spaces/Cong723/gpt-academic-public/crazy_functions/test_project/cpp/cppipc/shm.cpp deleted file mode 100644 index 593ce3129dc1574dbc8fc8b088cf595df215de93..0000000000000000000000000000000000000000 --- a/spaces/Cong723/gpt-academic-public/crazy_functions/test_project/cpp/cppipc/shm.cpp +++ /dev/null @@ -1,103 +0,0 @@ - -#include -#include - -#include "libipc/shm.h" - -#include "libipc/utility/pimpl.h" -#include "libipc/memory/resource.h" - -namespace ipc { -namespace shm { - -class handle::handle_ : public pimpl { -public: - shm::id_t id_ = nullptr; - void* m_ = nullptr; - - ipc::string n_; - std::size_t s_ = 0; -}; - -handle::handle() - : p_(p_->make()) { -} - -handle::handle(char const * name, std::size_t size, unsigned mode) - : handle() { - acquire(name, size, mode); -} - -handle::handle(handle&& rhs) - : handle() { - swap(rhs); -} - -handle::~handle() { - release(); - p_->clear(); -} - -void handle::swap(handle& rhs) { - std::swap(p_, rhs.p_); -} - -handle& handle::operator=(handle rhs) { - swap(rhs); - return *this; -} - -bool handle::valid() const noexcept { - return impl(p_)->m_ != nullptr; -} - -std::size_t handle::size() const noexcept { - return impl(p_)->s_; -} - -char const * handle::name() const noexcept { - return impl(p_)->n_.c_str(); -} - -std::int32_t handle::ref() const noexcept { - return shm::get_ref(impl(p_)->id_); -} - -void handle::sub_ref() noexcept { - shm::sub_ref(impl(p_)->id_); -} - -bool handle::acquire(char const * name, std::size_t size, unsigned mode) { - release(); - impl(p_)->id_ = shm::acquire((impl(p_)->n_ = name).c_str(), size, mode); - impl(p_)->m_ = shm::get_mem(impl(p_)->id_, &(impl(p_)->s_)); - return valid(); -} - -std::int32_t handle::release() { - if (impl(p_)->id_ == nullptr) return -1; - return shm::release(detach()); -} - -void* handle::get() const { - return impl(p_)->m_; -} - -void handle::attach(id_t id) { - if (id == nullptr) return; - release(); - impl(p_)->id_ = id; - impl(p_)->m_ = shm::get_mem(impl(p_)->id_, &(impl(p_)->s_)); -} - -id_t handle::detach() { - auto old = impl(p_)->id_; - impl(p_)->id_ = nullptr; - impl(p_)->m_ = nullptr; - impl(p_)->s_ = 0; - impl(p_)->n_.clear(); - return old; -} - -} // namespace shm -} // namespace ipc diff --git a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/csrc/vision.cpp b/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/csrc/vision.cpp deleted file mode 100644 index 5d5fbfb63e035dd1efd01ca3fa226c88cc1f2409..0000000000000000000000000000000000000000 --- a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/csrc/vision.cpp +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -#include "nms.h" -#include "ROIAlign.h" -#include "ROIPool.h" -#include "SigmoidFocalLoss.h" -#include "dcn_v2.h" - - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { - m.def("nms", &nms, "non-maximum suppression"); - m.def("roi_align_forward", &ROIAlign_forward, "ROIAlign_forward"); - m.def("roi_align_backward", &ROIAlign_backward, "ROIAlign_backward"); - m.def("roi_pool_forward", &ROIPool_forward, "ROIPool_forward"); - m.def("roi_pool_backward", &ROIPool_backward, "ROIPool_backward"); - m.def("sigmoid_focalloss_forward", &SigmoidFocalLoss_forward, "SigmoidFocalLoss_forward"); - m.def("sigmoid_focalloss_backward", &SigmoidFocalLoss_backward, "SigmoidFocalLoss_backward"); - m.def("dcn_v2_forward", &dcn_v2_forward, "dcn_v2_forward"); - m.def("dcn_v2_backward", &dcn_v2_backward, "dcn_v2_backward"); - m.def("dcn_v2_psroi_pooling_forward", &dcn_v2_psroi_pooling_forward, "dcn_v2_psroi_pooling_forward"); - m.def("dcn_v2_psroi_pooling_backward", &dcn_v2_psroi_pooling_backward, "dcn_v2_psroi_pooling_backward"); -} diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiohttp/base_protocol.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiohttp/base_protocol.py deleted file mode 100644 index 4c9f0a752e3aa833a17b7adf0c261d19a5f083fa..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiohttp/base_protocol.py +++ /dev/null @@ -1,90 +0,0 @@ -import asyncio -from typing import Optional, cast - -from .tcp_helpers import tcp_nodelay - - -class BaseProtocol(asyncio.Protocol): - __slots__ = ( - "_loop", - "_paused", - "_drain_waiter", - "_connection_lost", - "_reading_paused", - "transport", - ) - - def __init__(self, loop: asyncio.AbstractEventLoop) -> None: - self._loop: asyncio.AbstractEventLoop = loop - self._paused = False - self._drain_waiter: Optional[asyncio.Future[None]] = None - self._reading_paused = False - - self.transport: Optional[asyncio.Transport] = None - - @property - def connected(self) -> bool: - """Return True if the connection is open.""" - return self.transport is not None - - def pause_writing(self) -> None: - assert not self._paused - self._paused = True - - def resume_writing(self) -> None: - assert self._paused - self._paused = False - - waiter = self._drain_waiter - if waiter is not None: - self._drain_waiter = None - if not waiter.done(): - waiter.set_result(None) - - def pause_reading(self) -> None: - if not self._reading_paused and self.transport is not None: - try: - self.transport.pause_reading() - except (AttributeError, NotImplementedError, RuntimeError): - pass - self._reading_paused = True - - def resume_reading(self) -> None: - if self._reading_paused and self.transport is not None: - try: - self.transport.resume_reading() - except (AttributeError, NotImplementedError, RuntimeError): - pass - self._reading_paused = False - - def connection_made(self, transport: asyncio.BaseTransport) -> None: - tr = cast(asyncio.Transport, transport) - tcp_nodelay(tr, True) - self.transport = tr - - def connection_lost(self, exc: Optional[BaseException]) -> None: - # Wake up the writer if currently paused. - self.transport = None - if not self._paused: - return - waiter = self._drain_waiter - if waiter is None: - return - self._drain_waiter = None - if waiter.done(): - return - if exc is None: - waiter.set_result(None) - else: - waiter.set_exception(exc) - - async def _drain_helper(self) -> None: - if not self.connected: - raise ConnectionResetError("Connection lost") - if not self._paused: - return - waiter = self._drain_waiter - if waiter is None: - waiter = self._loop.create_future() - self._drain_waiter = waiter - await asyncio.shield(waiter) diff --git a/spaces/Dinoking/Guccio-AI-Designer/netdissect/runningstats.py b/spaces/Dinoking/Guccio-AI-Designer/netdissect/runningstats.py deleted file mode 100644 index fe4093e0318edeecf8aebc34771adbde5043e2d4..0000000000000000000000000000000000000000 --- a/spaces/Dinoking/Guccio-AI-Designer/netdissect/runningstats.py +++ /dev/null @@ -1,773 +0,0 @@ -''' -Running statistics on the GPU using pytorch. - -RunningTopK maintains top-k statistics for a set of channels in parallel. -RunningQuantile maintains (sampled) quantile statistics for a set of channels. -''' - -import torch, math, numpy -from collections import defaultdict - -class RunningTopK: - ''' - A class to keep a running tally of the the top k values (and indexes) - of any number of torch feature components. Will work on the GPU if - the data is on the GPU. - - This version flattens all arrays to avoid crashes. - ''' - def __init__(self, k=100, state=None): - if state is not None: - self.set_state_dict(state) - return - self.k = k - self.count = 0 - # This version flattens all data internally to 2-d tensors, - # to avoid crashes with the current pytorch topk implementation. - # The data is puffed back out to arbitrary tensor shapes on ouput. - self.data_shape = None - self.top_data = None - self.top_index = None - self.next = 0 - self.linear_index = 0 - self.perm = None - - def add(self, data): - ''' - Adds a batch of data to be considered for the running top k. - The zeroth dimension enumerates the observations. All other - dimensions enumerate different features. - ''' - if self.top_data is None: - # Allocation: allocate a buffer of size 5*k, at least 10, for each. - self.data_shape = data.shape[1:] - feature_size = int(numpy.prod(self.data_shape)) - self.top_data = torch.zeros( - feature_size, max(10, self.k * 5), out=data.new()) - self.top_index = self.top_data.clone().long() - self.linear_index = 0 if len(data.shape) == 1 else torch.arange( - feature_size, out=self.top_index.new()).mul_( - self.top_data.shape[-1])[:,None] - size = data.shape[0] - sk = min(size, self.k) - if self.top_data.shape[-1] < self.next + sk: - # Compression: if full, keep topk only. - self.top_data[:,:self.k], self.top_index[:,:self.k] = ( - self.result(sorted=False, flat=True)) - self.next = self.k - free = self.top_data.shape[-1] - self.next - # Pick: copy the top sk of the next batch into the buffer. - # Currently strided topk is slow. So we clone after transpose. - # TODO: remove the clone() if it becomes faster. - cdata = data.contiguous().view(size, -1).t().clone() - td, ti = cdata.topk(sk, sorted=False) - self.top_data[:,self.next:self.next+sk] = td - self.top_index[:,self.next:self.next+sk] = (ti + self.count) - self.next += sk - self.count += size - - def result(self, sorted=True, flat=False): - ''' - Returns top k data items and indexes in each dimension, - with channels in the first dimension and k in the last dimension. - ''' - k = min(self.k, self.next) - # bti are top indexes relative to buffer array. - td, bti = self.top_data[:,:self.next].topk(k, sorted=sorted) - # we want to report top indexes globally, which is ti. - ti = self.top_index.view(-1)[ - (bti + self.linear_index).view(-1) - ].view(*bti.shape) - if flat: - return td, ti - else: - return (td.view(*(self.data_shape + (-1,))), - ti.view(*(self.data_shape + (-1,)))) - - def to_(self, device): - self.top_data = self.top_data.to(device) - self.top_index = self.top_index.to(device) - if isinstance(self.linear_index, torch.Tensor): - self.linear_index = self.linear_index.to(device) - - def state_dict(self): - return dict( - constructor=self.__module__ + '.' + - self.__class__.__name__ + '()', - k=self.k, - count=self.count, - data_shape=tuple(self.data_shape), - top_data=self.top_data.cpu().numpy(), - top_index=self.top_index.cpu().numpy(), - next=self.next, - linear_index=(self.linear_index.cpu().numpy() - if isinstance(self.linear_index, torch.Tensor) - else self.linear_index), - perm=self.perm) - - def set_state_dict(self, dic): - self.k = dic['k'].item() - self.count = dic['count'].item() - self.data_shape = tuple(dic['data_shape']) - self.top_data = torch.from_numpy(dic['top_data']) - self.top_index = torch.from_numpy(dic['top_index']) - self.next = dic['next'].item() - self.linear_index = (torch.from_numpy(dic['linear_index']) - if len(dic['linear_index'].shape) > 0 - else dic['linear_index'].item()) - -class RunningQuantile: - """ - Streaming randomized quantile computation for torch. - - Add any amount of data repeatedly via add(data). At any time, - quantile estimates (or old-style percentiles) can be read out using - quantiles(q) or percentiles(p). - - Accuracy scales according to resolution: the default is to - set resolution to be accurate to better than 0.1%, - while limiting storage to about 50,000 samples. - - Good for computing quantiles of huge data without using much memory. - Works well on arbitrary data with probability near 1. - - Based on the optimal KLL quantile algorithm by Karnin, Lang, and Liberty - from FOCS 2016. http://ieee-focs.org/FOCS-2016-Papers/3933a071.pdf - """ - - def __init__(self, resolution=6 * 1024, buffersize=None, seed=None, - state=None): - if state is not None: - self.set_state_dict(state) - return - self.depth = None - self.dtype = None - self.device = None - self.resolution = resolution - # Default buffersize: 128 samples (and smaller than resolution). - if buffersize is None: - buffersize = min(128, (resolution + 7) // 8) - self.buffersize = buffersize - self.samplerate = 1.0 - self.data = None - self.firstfree = [0] - self.randbits = torch.ByteTensor(resolution) - self.currentbit = len(self.randbits) - 1 - self.extremes = None - self.size = 0 - - def _lazy_init(self, incoming): - self.depth = incoming.shape[1] - self.dtype = incoming.dtype - self.device = incoming.device - self.data = [torch.zeros(self.depth, self.resolution, - dtype=self.dtype, device=self.device)] - self.extremes = torch.zeros(self.depth, 2, - dtype=self.dtype, device=self.device) - self.extremes[:,0] = float('inf') - self.extremes[:,-1] = -float('inf') - - def to_(self, device): - """Switches internal storage to specified device.""" - if device != self.device: - old_data = self.data - old_extremes = self.extremes - self.data = [d.to(device) for d in self.data] - self.extremes = self.extremes.to(device) - self.device = self.extremes.device - del old_data - del old_extremes - - def add(self, incoming): - if self.depth is None: - self._lazy_init(incoming) - assert len(incoming.shape) == 2 - assert incoming.shape[1] == self.depth, (incoming.shape[1], self.depth) - self.size += incoming.shape[0] - # Convert to a flat torch array. - if self.samplerate >= 1.0: - self._add_every(incoming) - return - # If we are sampling, then subsample a large chunk at a time. - self._scan_extremes(incoming) - chunksize = int(math.ceil(self.buffersize / self.samplerate)) - for index in range(0, len(incoming), chunksize): - batch = incoming[index:index+chunksize] - sample = sample_portion(batch, self.samplerate) - if len(sample): - self._add_every(sample) - - def _add_every(self, incoming): - supplied = len(incoming) - index = 0 - while index < supplied: - ff = self.firstfree[0] - available = self.data[0].shape[1] - ff - if available == 0: - if not self._shift(): - # If we shifted by subsampling, then subsample. - incoming = incoming[index:] - if self.samplerate >= 0.5: - # First time sampling - the data source is very large. - self._scan_extremes(incoming) - incoming = sample_portion(incoming, self.samplerate) - index = 0 - supplied = len(incoming) - ff = self.firstfree[0] - available = self.data[0].shape[1] - ff - copycount = min(available, supplied - index) - self.data[0][:,ff:ff + copycount] = torch.t( - incoming[index:index + copycount,:]) - self.firstfree[0] += copycount - index += copycount - - def _shift(self): - index = 0 - # If remaining space at the current layer is less than half prev - # buffer size (rounding up), then we need to shift it up to ensure - # enough space for future shifting. - while self.data[index].shape[1] - self.firstfree[index] < ( - -(-self.data[index-1].shape[1] // 2) if index else 1): - if index + 1 >= len(self.data): - return self._expand() - data = self.data[index][:,0:self.firstfree[index]] - data = data.sort()[0] - if index == 0 and self.samplerate >= 1.0: - self._update_extremes(data[:,0], data[:,-1]) - offset = self._randbit() - position = self.firstfree[index + 1] - subset = data[:,offset::2] - self.data[index + 1][:,position:position + subset.shape[1]] = subset - self.firstfree[index] = 0 - self.firstfree[index + 1] += subset.shape[1] - index += 1 - return True - - def _scan_extremes(self, incoming): - # When sampling, we need to scan every item still to get extremes - self._update_extremes( - torch.min(incoming, dim=0)[0], - torch.max(incoming, dim=0)[0]) - - def _update_extremes(self, minr, maxr): - self.extremes[:,0] = torch.min( - torch.stack([self.extremes[:,0], minr]), dim=0)[0] - self.extremes[:,-1] = torch.max( - torch.stack([self.extremes[:,-1], maxr]), dim=0)[0] - - def _randbit(self): - self.currentbit += 1 - if self.currentbit >= len(self.randbits): - self.randbits.random_(to=2) - self.currentbit = 0 - return self.randbits[self.currentbit] - - def state_dict(self): - return dict( - constructor=self.__module__ + '.' + - self.__class__.__name__ + '()', - resolution=self.resolution, - depth=self.depth, - buffersize=self.buffersize, - samplerate=self.samplerate, - data=[d.cpu().numpy()[:,:f].T - for d, f in zip(self.data, self.firstfree)], - sizes=[d.shape[1] for d in self.data], - extremes=self.extremes.cpu().numpy(), - size=self.size) - - def set_state_dict(self, dic): - self.resolution = int(dic['resolution']) - self.randbits = torch.ByteTensor(self.resolution) - self.currentbit = len(self.randbits) - 1 - self.depth = int(dic['depth']) - self.buffersize = int(dic['buffersize']) - self.samplerate = float(dic['samplerate']) - firstfree = [] - buffers = [] - for d, s in zip(dic['data'], dic['sizes']): - firstfree.append(d.shape[0]) - buf = numpy.zeros((d.shape[1], s), dtype=d.dtype) - buf[:,:d.shape[0]] = d.T - buffers.append(torch.from_numpy(buf)) - self.firstfree = firstfree - self.data = buffers - self.extremes = torch.from_numpy((dic['extremes'])) - self.size = int(dic['size']) - self.dtype = self.extremes.dtype - self.device = self.extremes.device - - def minmax(self): - if self.firstfree[0]: - self._scan_extremes(self.data[0][:,:self.firstfree[0]].t()) - return self.extremes.clone() - - def median(self): - return self.quantiles([0.5])[:,0] - - def mean(self): - return self.integrate(lambda x: x) / self.size - - def variance(self): - mean = self.mean()[:,None] - return self.integrate(lambda x: (x - mean).pow(2)) / (self.size - 1) - - def stdev(self): - return self.variance().sqrt() - - def _expand(self): - cap = self._next_capacity() - if cap > 0: - # First, make a new layer of the proper capacity. - self.data.insert(0, torch.zeros(self.depth, cap, - dtype=self.dtype, device=self.device)) - self.firstfree.insert(0, 0) - else: - # Unless we're so big we are just subsampling. - assert self.firstfree[0] == 0 - self.samplerate *= 0.5 - for index in range(1, len(self.data)): - # Scan for existing data that needs to be moved down a level. - amount = self.firstfree[index] - if amount == 0: - continue - position = self.firstfree[index-1] - # Move data down if it would leave enough empty space there - # This is the key invariant: enough empty space to fit half - # of the previous level's buffer size (rounding up) - if self.data[index-1].shape[1] - (amount + position) >= ( - -(-self.data[index-2].shape[1] // 2) if (index-1) else 1): - self.data[index-1][:,position:position + amount] = ( - self.data[index][:,:amount]) - self.firstfree[index-1] += amount - self.firstfree[index] = 0 - else: - # Scrunch the data if it would not. - data = self.data[index][:,:amount] - data = data.sort()[0] - if index == 1: - self._update_extremes(data[:,0], data[:,-1]) - offset = self._randbit() - scrunched = data[:,offset::2] - self.data[index][:,:scrunched.shape[1]] = scrunched - self.firstfree[index] = scrunched.shape[1] - return cap > 0 - - def _next_capacity(self): - cap = int(math.ceil(self.resolution * (0.67 ** len(self.data)))) - if cap < 2: - return 0 - # Round up to the nearest multiple of 8 for better GPU alignment. - cap = -8 * (-cap // 8) - return max(self.buffersize, cap) - - def _weighted_summary(self, sort=True): - if self.firstfree[0]: - self._scan_extremes(self.data[0][:,:self.firstfree[0]].t()) - size = sum(self.firstfree) + 2 - weights = torch.FloatTensor(size) # Floating point - summary = torch.zeros(self.depth, size, - dtype=self.dtype, device=self.device) - weights[0:2] = 0 - summary[:,0:2] = self.extremes - index = 2 - for level, ff in enumerate(self.firstfree): - if ff == 0: - continue - summary[:,index:index + ff] = self.data[level][:,:ff] - weights[index:index + ff] = 2.0 ** level - index += ff - assert index == summary.shape[1] - if sort: - summary, order = torch.sort(summary, dim=-1) - weights = weights[order.view(-1).cpu()].view(order.shape) - return (summary, weights) - - def quantiles(self, quantiles, old_style=False): - if self.size == 0: - return torch.full((self.depth, len(quantiles)), torch.nan) - summary, weights = self._weighted_summary() - cumweights = torch.cumsum(weights, dim=-1) - weights / 2 - if old_style: - # To be convenient with torch.percentile - cumweights -= cumweights[:,0:1].clone() - cumweights /= cumweights[:,-1:].clone() - else: - cumweights /= torch.sum(weights, dim=-1, keepdim=True) - result = torch.zeros(self.depth, len(quantiles), - dtype=self.dtype, device=self.device) - # numpy is needed for interpolation - if not hasattr(quantiles, 'cpu'): - quantiles = torch.Tensor(quantiles) - nq = quantiles.cpu().numpy() - ncw = cumweights.cpu().numpy() - nsm = summary.cpu().numpy() - for d in range(self.depth): - result[d] = torch.tensor(numpy.interp(nq, ncw[d], nsm[d]), - dtype=self.dtype, device=self.device) - return result - - def integrate(self, fun): - result = None - for level, ff in enumerate(self.firstfree): - if ff == 0: - continue - term = torch.sum( - fun(self.data[level][:,:ff]) * (2.0 ** level), - dim=-1) - if result is None: - result = term - else: - result += term - if result is not None: - result /= self.samplerate - return result - - def percentiles(self, percentiles): - return self.quantiles(percentiles, old_style=True) - - def readout(self, count=1001, old_style=True): - return self.quantiles( - torch.linspace(0.0, 1.0, count), old_style=old_style) - - def normalize(self, data): - ''' - Given input data as taken from the training distirbution, - normalizes every channel to reflect quantile values, - uniformly distributed, within [0, 1]. - ''' - assert self.size > 0 - assert data.shape[0] == self.depth - summary, weights = self._weighted_summary() - cumweights = torch.cumsum(weights, dim=-1) - weights / 2 - cumweights /= torch.sum(weights, dim=-1, keepdim=True) - result = torch.zeros_like(data).float() - # numpy is needed for interpolation - ndata = data.cpu().numpy().reshape((data.shape[0], -1)) - ncw = cumweights.cpu().numpy() - nsm = summary.cpu().numpy() - for d in range(self.depth): - normed = torch.tensor(numpy.interp(ndata[d], nsm[d], ncw[d]), - dtype=torch.float, device=data.device).clamp_(0.0, 1.0) - if len(data.shape) > 1: - normed = normed.view(*(data.shape[1:])) - result[d] = normed - return result - - -class RunningConditionalQuantile: - ''' - Equivalent to a map from conditions (any python hashable type) - to RunningQuantiles. The reason for the type is to allow limited - GPU memory to be exploited while counting quantile stats on many - different conditions, a few of which are common and which benefit - from GPU, but most of which are rare and would not all fit into - GPU RAM. - - To move a set of conditions to a device, use rcq.to_(device, conds). - Then in the future, move the tallied data to the device before - calling rcq.add, that is, rcq.add(cond, data.to(device)). - - To allow the caller to decide which conditions to allow to use GPU, - rcq.most_common_conditions(n) returns a list of the n most commonly - added conditions so far. - ''' - def __init__(self, resolution=6 * 1024, buffersize=None, seed=None, - state=None): - self.first_rq = None - self.call_stats = defaultdict(int) - self.running_quantiles = {} - if state is not None: - self.set_state_dict(state) - return - self.rq_args = dict(resolution=resolution, buffersize=buffersize, - seed=seed) - - def add(self, condition, incoming): - if condition not in self.running_quantiles: - self.running_quantiles[condition] = RunningQuantile(**self.rq_args) - if self.first_rq is None: - self.first_rq = self.running_quantiles[condition] - self.call_stats[condition] += 1 - rq = self.running_quantiles[condition] - # For performance reasons, the caller can move some conditions to - # the CPU if they are not among the most common conditions. - if rq.device is not None and (rq.device != incoming.device): - rq.to_(incoming.device) - self.running_quantiles[condition].add(incoming) - - def most_common_conditions(self, n): - return sorted(self.call_stats.keys(), - key=lambda c: -self.call_stats[c])[:n] - - def collected_add(self, conditions, incoming): - for c in conditions: - self.add(c, incoming) - - def conditional(self, c): - return self.running_quantiles[c] - - def collected_quantiles(self, conditions, quantiles, old_style=False): - result = torch.zeros( - size=(len(conditions), self.first_rq.depth, len(quantiles)), - dtype=self.first_rq.dtype, - device=self.first_rq.device) - for i, c in enumerate(conditions): - if c in self.running_quantiles: - result[i] = self.running_quantiles[c].quantiles( - quantiles, old_style) - return result - - def collected_normalize(self, conditions, values): - result = torch.zeros( - size=(len(conditions), values.shape[0], values.shape[1]), - dtype=torch.float, - device=self.first_rq.device) - for i, c in enumerate(conditions): - if c in self.running_quantiles: - result[i] = self.running_quantiles[c].normalize(values) - return result - - def to_(self, device, conditions=None): - if conditions is None: - conditions = self.running_quantiles.keys() - for cond in conditions: - if cond in self.running_quantiles: - self.running_quantiles[cond].to_(device) - - def state_dict(self): - conditions = sorted(self.running_quantiles.keys()) - result = dict( - constructor=self.__module__ + '.' + - self.__class__.__name__ + '()', - rq_args=self.rq_args, - conditions=conditions) - for i, c in enumerate(conditions): - result.update({ - '%d.%s' % (i, k): v - for k, v in self.running_quantiles[c].state_dict().items()}) - return result - - def set_state_dict(self, dic): - self.rq_args = dic['rq_args'].item() - conditions = list(dic['conditions']) - subdicts = defaultdict(dict) - for k, v in dic.items(): - if '.' in k: - p, s = k.split('.', 1) - subdicts[p][s] = v - self.running_quantiles = { - c: RunningQuantile(state=subdicts[str(i)]) - for i, c in enumerate(conditions)} - if conditions: - self.first_rq = self.running_quantiles[conditions[0]] - - # example usage: - # levels = rqc.conditional(()).quantiles(1 - fracs) - # denoms = 1 - rqc.collected_normalize(cats, levels) - # isects = 1 - rqc.collected_normalize(labels, levels) - # unions = fracs + denoms[cats] - isects - # iou = isects / unions - - - - -class RunningCrossCovariance: - ''' - Running computation. Use this when an off-diagonal block of the - covariance matrix is needed (e.g., when the whole covariance matrix - does not fit in the GPU). - - Chan-style numerically stable update of mean and full covariance matrix. - Chan, Golub. LeVeque. 1983. http://www.jstor.org/stable/2683386 - ''' - def __init__(self, state=None): - if state is not None: - self.set_state_dict(state) - return - self.count = 0 - self._mean = None - self.cmom2 = None - self.v_cmom2 = None - - def add(self, a, b): - if len(a.shape) == 1: - a = a[None, :] - b = b[None, :] - assert(a.shape[0] == b.shape[0]) - if len(a.shape) > 2: - a, b = [d.view(d.shape[0], d.shape[1], -1).permute(0, 2, 1 - ).contiguous().view(-1, d.shape[1]) for d in [a, b]] - batch_count = a.shape[0] - batch_mean = [d.sum(0) / batch_count for d in [a, b]] - centered = [d - bm for d, bm in zip([a, b], batch_mean)] - # If more than 10 billion operations, divide into batches. - sub_batch = -(-(10 << 30) // (a.shape[1] * b.shape[1])) - # Initial batch. - if self._mean is None: - self.count = batch_count - self._mean = batch_mean - self.v_cmom2 = [c.pow(2).sum(0) for c in centered] - self.cmom2 = a.new(a.shape[1], b.shape[1]).zero_() - progress_addbmm(self.cmom2, centered[0][:,:,None], - centered[1][:,None,:], sub_batch) - return - # Update a batch using Chan-style update for numerical stability. - oldcount = self.count - self.count += batch_count - new_frac = float(batch_count) / self.count - # Update the mean according to the batch deviation from the old mean. - delta = [bm.sub_(m).mul_(new_frac) - for bm, m in zip(batch_mean, self._mean)] - for m, d in zip(self._mean, delta): - m.add_(d) - # Update the cross-covariance using the batch deviation - progress_addbmm(self.cmom2, centered[0][:,:,None], - centered[1][:,None,:], sub_batch) - self.cmom2.addmm_(alpha=new_frac * oldcount, - mat1=delta[0][:,None], mat2=delta[1][None,:]) - # Update the variance using the batch deviation - for c, vc2, d in zip(centered, self.v_cmom2, delta): - vc2.add_(c.pow(2).sum(0)) - vc2.add_(d.pow_(2).mul_(new_frac * oldcount)) - - def mean(self): - return self._mean - - def variance(self): - return [vc2 / (self.count - 1) for vc2 in self.v_cmom2] - - def stdev(self): - return [v.sqrt() for v in self.variance()] - - def covariance(self): - return self.cmom2 / (self.count - 1) - - def correlation(self): - covariance = self.covariance() - rstdev = [s.reciprocal() for s in self.stdev()] - cor = rstdev[0][:,None] * covariance * rstdev[1][None,:] - # Remove NaNs - cor[torch.isnan(cor)] = 0 - return cor - - def to_(self, device): - self._mean = [m.to(device) for m in self._mean] - self.v_cmom2 = [vcs.to(device) for vcs in self.v_cmom2] - self.cmom2 = self.cmom2.to(device) - - def state_dict(self): - return dict( - constructor=self.__module__ + '.' + - self.__class__.__name__ + '()', - count=self.count, - mean_a=self._mean[0].cpu().numpy(), - mean_b=self._mean[1].cpu().numpy(), - cmom2_a=self.v_cmom2[0].cpu().numpy(), - cmom2_b=self.v_cmom2[1].cpu().numpy(), - cmom2=self.cmom2.cpu().numpy()) - - def set_state_dict(self, dic): - self.count = dic['count'].item() - self._mean = [torch.from_numpy(dic[k]) for k in ['mean_a', 'mean_b']] - self.v_cmom2 = [torch.from_numpy(dic[k]) - for k in ['cmom2_a', 'cmom2_b']] - self.cmom2 = torch.from_numpy(dic['cmom2']) - -def progress_addbmm(accum, x, y, batch_size): - ''' - Break up very large adbmm operations into batches so progress can be seen. - ''' - from .progress import default_progress - if x.shape[0] <= batch_size: - return accum.addbmm_(x, y) - progress = default_progress(None) - for i in progress(range(0, x.shape[0], batch_size), desc='bmm'): - accum.addbmm_(x[i:i+batch_size], y[i:i+batch_size]) - return accum - - -def sample_portion(vec, p=0.5): - bits = torch.bernoulli(torch.zeros(vec.shape[0], dtype=torch.uint8, - device=vec.device), p) - return vec[bits] - -if __name__ == '__main__': - import warnings - warnings.filterwarnings("error") - import time - import argparse - parser = argparse.ArgumentParser( - description='Test things out') - parser.add_argument('--mode', default='cpu', help='cpu or cuda') - parser.add_argument('--test_size', type=int, default=1000000) - args = parser.parse_args() - - # An adverarial case: we keep finding more numbers in the middle - # as the stream goes on. - amount = args.test_size - quantiles = 1000 - data = numpy.arange(float(amount)) - data[1::2] = data[-1::-2] + (len(data) - 1) - data /= 2 - depth = 50 - test_cuda = torch.cuda.is_available() - alldata = data[:,None] + (numpy.arange(depth) * amount)[None, :] - actual_sum = torch.FloatTensor(numpy.sum(alldata * alldata, axis=0)) - amt = amount // depth - for r in range(depth): - numpy.random.shuffle(alldata[r*amt:r*amt+amt,r]) - if args.mode == 'cuda': - alldata = torch.cuda.FloatTensor(alldata) - dtype = torch.float - device = torch.device('cuda') - else: - alldata = torch.FloatTensor(alldata) - dtype = torch.float - device = None - starttime = time.time() - qc = RunningQuantile(resolution=6 * 1024) - qc.add(alldata) - # Test state dict - saved = qc.state_dict() - # numpy.savez('foo.npz', **saved) - # saved = numpy.load('foo.npz') - qc = RunningQuantile(state=saved) - assert not qc.device.type == 'cuda' - qc.add(alldata) - actual_sum *= 2 - ro = qc.readout(1001).cpu() - endtime = time.time() - gt = torch.linspace(0, amount, quantiles+1)[None,:] + ( - torch.arange(qc.depth, dtype=torch.float) * amount)[:,None] - maxreldev = torch.max(torch.abs(ro - gt) / amount) * quantiles - print("Maximum relative deviation among %d perentiles: %f" % ( - quantiles, maxreldev)) - minerr = torch.max(torch.abs(qc.minmax().cpu()[:,0] - - torch.arange(qc.depth, dtype=torch.float) * amount)) - maxerr = torch.max(torch.abs((qc.minmax().cpu()[:, -1] + 1) - - (torch.arange(qc.depth, dtype=torch.float) + 1) * amount)) - print("Minmax error %f, %f" % (minerr, maxerr)) - interr = torch.max(torch.abs(qc.integrate(lambda x: x * x).cpu() - - actual_sum) / actual_sum) - print("Integral error: %f" % interr) - medianerr = torch.max(torch.abs(qc.median() - - alldata.median(0)[0]) / alldata.median(0)[0]).cpu() - print("Median error: %f" % interr) - meanerr = torch.max( - torch.abs(qc.mean() - alldata.mean(0)) / alldata.mean(0)).cpu() - print("Mean error: %f" % meanerr) - varerr = torch.max( - torch.abs(qc.variance() - alldata.var(0)) / alldata.var(0)).cpu() - print("Variance error: %f" % varerr) - counterr = ((qc.integrate(lambda x: torch.ones(x.shape[-1]).cpu()) - - qc.size) / (0.0 + qc.size)).item() - print("Count error: %f" % counterr) - print("Time %f" % (endtime - starttime)) - # Algorithm is randomized, so some of these will fail with low probability. - assert maxreldev < 1.0 - assert minerr == 0.0 - assert maxerr == 0.0 - assert interr < 0.01 - assert abs(counterr) < 0.001 - print("OK") diff --git a/spaces/DrBenjamin/AI_Demo/AI_Demo.py b/spaces/DrBenjamin/AI_Demo/AI_Demo.py deleted file mode 100644 index 87d84046431566bf40c5d6543bba9045d03b1089..0000000000000000000000000000000000000000 --- a/spaces/DrBenjamin/AI_Demo/AI_Demo.py +++ /dev/null @@ -1,291 +0,0 @@ -##### `AI_Demo.py` -##### AI Demo, hosted on https://huggingface.co/spaces/DrBenjamin/AI_Demo -##### Please reach out to ben@benbox.org for any questions -#### Loading needed Python libraries -import streamlit as st -import numpy as np -import audio2numpy as a2n -from pydub import AudioSegment -import cv2 -from PIL import Image -import torch -from diffusers import StableDiffusionPipeline -from diffusers import StableDiffusionImg2ImgPipeline -from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler -from transformers import pipeline, set_seed -from transformers import VisionEncoderDecoderModel, ViTImageProcessor, AutoTokenizer -import os - -os.environ['COMMANDLINE_ARGS'] = '--skip-torch-cuda-test --precision full --no-half' -os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' - - -#### Functions -### Function predict_step = Image to Text recognition -def predict_step(image): - if image.mode != "RGB": - image = image.convert(mode = "RGB") - pixel_values = feature_extractor(images = image, return_tensors = "pt").pixel_values - pixel_values = pixel_values.to(device) - output_ids = model.generate(pixel_values, **gen_kwargs) - preds = tokenizer.batch_decode(output_ids, skip_special_tokens = True) - preds = [pred.strip() for pred in preds] - return str(preds[0]).capitalize() + '.' - - -#### Models -st.header('🤗 Hugging Face Diffusers') -st.write('State-of-the-art diffusion models for image, text and audio generation in PyTorch.') -devices = ["mps", "cpu", "cuda"] -device = st.selectbox(label = 'Select device', options = devices, index = 1, disabled = True) -st.write(':orange[MPS for Mac (Metal Performance Shaders), CPU for all systems and CUDA for systems with NVIDIA GPU.]') -models = ["runwayml/stable-diffusion-v1-5", "stabilityai/stable-diffusion-2-1", "hakurei/waifu-diffusion", "stabilityai/stable-diffusion-2-base", - "nlpconnect/vit-gpt2-image-captioning", "openai-gpt", "gpt2-large", "openai/whisper-large-v2"] -model_id_or_path = st.selectbox(label = 'Select model', options = models, index = 5, disabled = True) -if model_id_or_path == "runwayml/stable-diffusion-v1-5": - st.write(':orange[Stable Diffusion v1-5 is the state of the art text-to-image model.]') -elif model_id_or_path == "stabilityai/stable-diffusion-2-1": - st.write(':orange[New stable diffusion text-to-image model at 768x768 resolution.]') -elif model_id_or_path == "stabilityai/stable-diffusion-2-base": - st.write(':orange[New stable diffusion text-to-image model at 512x512 resolution.]') -elif model_id_or_path == "hakurei/waifu-diffusion": - st.write( - ':orange[waifu-diffusion is a latent text-to-image diffusion model that has been conditioned on high-quality anime images through fine-tuning.]') -elif model_id_or_path == "nlpconnect/vit-gpt2-image-captioning": - st.write(':orange[vit-gpt2 is an image captioning model.]') -elif model_id_or_path == "openai-gpt": - st.write( - ':orange[openai-gpt is a transformer-based language model created and released by OpenAI. The model is a causal (unidirectional) transformer pre-trained using language modeling on a large corpus with long range dependencies.]') -elif model_id_or_path == "gpt2-large": - st.write( - ':orange[GPT-2 Large is the 774M parameter version of GPT-2, a transformer-based language model created and released by OpenAI. The model is a pretrained model on English language using a causal language modeling (CLM) objective.]') -elif model_id_or_path == "openai/whisper-large-v2": - st.write(':orange[Whisper is a pre-trained model for automatic speech recognition (ASR) and speech translation.]') - -control_net_models = ["None", "lllyasviel/sd-controlnet-canny", "lllyasviel/sd-controlnet-scribble"] -if model_id_or_path == "runwayml/stable-diffusion-v1-5": - disable = False -else: - disable = True -control_net_model = st.selectbox(label = 'Select control net model', options = control_net_models, disabled = disable) -if control_net_model == "lllyasviel/sd-controlnet-canny": - st.write( - ':orange[ControlNet is a neural network structure to control diffusion models by adding extra conditions. This checkpoint corresponds to the ControlNet conditioned on Canny edges.]') -elif control_net_model == "lllyasviel/sd-controlnet-scribble": - st.write( - ':orange[ControlNet is a neural network structure to control diffusion models by adding extra conditions. This checkpoint corresponds to the ControlNet conditioned on Scribble images.]') -if model_id_or_path != "runwayml/stable-diffusion-v1-5": - control_net_model = "None" - -#### Stable diffusion image 2 image with Control Net -if model_id_or_path == "runwayml/stable-diffusion-v1-5" and control_net_model != "None": - with st.form('img2img (Control Net)'): - st.subheader('Image 2 Image (Control Net)') - st.write('Create an image from text input with an image as template.') - image = '' - uploaded_file = st.file_uploader(label = "Upload a picture", type = 'png') - prompt = st.text_input(label = 'Prompt', - value = 'A picture in comic style, bright colours, a house with red bricks, a dark sky with a full yellow moon, best quality, extremely detailed.') - submitted = st.form_submit_button('Submit') - if submitted: - # Check for image data - if uploaded_file is not None: - image = cv2.imdecode(np.frombuffer(uploaded_file.getvalue(), np.uint8), cv2.COLOR_GRAY2BGR) - image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) - - # Resize image if existend and not 768x640 / 640x768 pixel - h, w = image.shape - if not (h == 768 and w == 640) and not (h == 640 and w == 768): - # Image is bigger in height than width - if h > w: - # Resize cropped image to standard dimensions - image = cv2.resize(image, (640, 768), interpolation = cv2.INTER_AREA) - - # Image is smaller in height than width - else: - # Resize cropped image to standard dimensions - image = cv2.resize(image, (768, 640), interpolation = cv2.INTER_AREA) - - # Get canny image - image = cv2.Canny(image, 100, 200) - image = image[:, :, None] - image = np.concatenate([image, image, image], axis = 2) - canny_image = Image.fromarray(image) - st.subheader('Preview annotator result') - st.image(canny_image) - - # Load control net and stable diffusion v1-5 - controlnet = ControlNetModel.from_pretrained(control_net_model, torch_dtype = torch.float32) - pipe = StableDiffusionControlNetPipeline.from_pretrained(model_id_or_path, controlnet = controlnet, torch_dtype = torch.float32) - pipe = pipe.to(device) - - # Recommended if your computer has < 64 GB of RAM - pipe.enable_attention_slicing() - - # Speed up diffusion process with faster scheduler and memory optimization - pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) - - # Generate image - generator = torch.manual_seed(0) - image = pipe(prompt = prompt, negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality", num_inference_steps = 30, - generator = generator, image = canny_image).images[0] - st.subheader('Diffuser result') - st.write('Model :orange[' + model_id_or_path + '] + :red[' + control_net_model + ']') - st.image(image) - -## Stable-Diffusion -if model_id_or_path == "runwayml/stable-diffusion-v1-5" and control_net_model == "None": - with st.form('img2img'): - st.subheader('Image 2 Image') - st.write('Create an image from text input with an image as template.') - image = '' - uploaded_file = st.file_uploader(label = "Upload a picture", type = 'png') - prompt = st.text_input(label = 'Prompt', - value = 'A picture in comic style, bright colours, a house with red bricks, a dark sky with a full yellow moon, best quality, extremely detailed.') - submitted = st.form_submit_button('Submit') - if submitted: - # Check for image data - if uploaded_file is not None: - image = cv2.imdecode(np.frombuffer(uploaded_file.getvalue(), np.uint8), cv2.IMREAD_COLOR) - - # Resize image if existend and not 768x640 / 640x768 pixel - h, w, _ = image.shape - if not (h == 768 and w == 640) and not (h == 640 and w == 768): - # Image is bigger in height than width - if h > w: - # Resize cropped image to standard dimensions - image = cv2.resize(image, (640, 768), interpolation = cv2.INTER_AREA) - - # Image is smaller in height than width - else: - # Resize cropped image to standard dimensions - image = cv2.resize(image, (768, 640), interpolation = cv2.INTER_AREA) - image = Image.fromarray(image) - - # Load the pipeline - pipe = StableDiffusionImg2ImgPipeline.from_pretrained(model_id_or_path, torch_dtype = torch.float32) - pipe = pipe.to(device) - - # Recommended if your computer has < 64 GB of RAM - pipe.enable_attention_slicing() - - # Speed up diffusion process with faster scheduler and memory optimization - pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) - - # Create new image - images = pipe(prompt = prompt, negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality", num_inference_steps = 30, - image = image, strength = 0.75, guidance_scale = 7.5).images - - # Show image - st.subheader('Diffuser result') - st.write('Model :orange[' + model_id_or_path + ']') - st.image(images[0]) - -#### Stable diffusion txt 2 image -if control_net_model == "None" and model_id_or_path != "nlpconnect/vit-gpt2-image-captioning" and model_id_or_path != "openai-gpt" and model_id_or_path != "gpt2-large" and model_id_or_path != "openai/whisper-large-v2": - with st.form('txt2img'): - st.subheader('Text 2 Image') - st.write('Create an image from text input.') - if model_id_or_path == "runwayml/stable-diffusion-v1-5" or model_id_or_path == "stabilityai/stable-diffusion-2-1": - value = 'A picture in comic style, bright colours, a house with red bricks, a dark sky with a full yellow moon, best quality, extremely detailed.' - if model_id_or_path == "hakurei/waifu-diffusion": - value = 'A picture in Anime style, bright colours, a house with red bricks, a dark sky with a full yellow moon, best quality, extremely detailed.' - if model_id_or_path == "stabilityai/stable-diffusion-2-base": - value = 'A picture in comic style, a castle with grey bricks in the background, a river is going through, a blue sky with a full yellow sun, best quality, extremely detailed.' - - prompt = st.text_input(label = 'Prompt', value = value) - submitted = st.form_submit_button('Submit') - if submitted: - # Make sure you're logged in with `huggingface-cli login` - pipe = StableDiffusionPipeline.from_pretrained(model_id_or_path) - pipe = pipe.to(device) - - # Recommended if your computer has < 64 GB of RAM - pipe.enable_attention_slicing() - - # Speed up diffusion process with faster scheduler and memory optimization - pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) - - # Results - if model_id_or_path == "hakurei/waifu-diffusion": - negative = "several scenes, more than one image, split picture" - else: - negative = "monochrome, lowres, bad anatomy, worst quality, low quality" - image = pipe(prompt = prompt, negative_prompt = negative, num_inference_steps = 30, guidance_scale = 7.5).images[0] - st.subheader('Diffuser result') - st.write('Model :orange[' + model_id_or_path + ']') - st.image(image) - -#### Text (OpenAI gpt models) -if model_id_or_path == "openai-gpt" or model_id_or_path == "gpt2-large": - with st.form('GPT'): - st.subheader('Text generation') - st.write('Create text which is generated from text input.') - text_input = st.text_input(label = 'Give a start of a sentence', value = 'This is a test ') - submitted = st.form_submit_button('Submit') - if submitted: - generator = pipeline('text-generation', model = model_id_or_path) - set_seed(42) - generated = generator(text_input, max_length = 150, num_return_sequences = 1) - st.subheader('Diffuser result') - st.write('Model :orange[' + model_id_or_path + ']') - st.markdown('Text: ":green[' + str(generated[0]['generated_text']) + ']"') - -#### Image to text -if model_id_or_path == "nlpconnect/vit-gpt2-image-captioning": - with st.form('Image2Text'): - st.subheader('Image 2 Text') - st.write('Create a description of an image.') - image = '' - uploaded_file = st.file_uploader(label = "Upload a picture", type = 'png') - submitted = st.form_submit_button('Submit') - if submitted: - # Check for image data - if uploaded_file is not None: - image = cv2.imdecode(np.frombuffer(uploaded_file.getvalue(), np.uint8), cv2.IMREAD_COLOR) - image = Image.fromarray(image) - model = VisionEncoderDecoderModel.from_pretrained(model_id_or_path) - feature_extractor = ViTImageProcessor.from_pretrained(model_id_or_path) - tokenizer = AutoTokenizer.from_pretrained(model_id_or_path) - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - model.to(device) - max_length = 16 - num_beams = 4 - gen_kwargs = {"max_length": max_length, "num_beams": num_beams} - output = predict_step(image) - st.subheader('Diffuser result') - st.write('Model :orange[nlpconnect/vit-gpt2-image-captioning]') - st.write('Description: ":green[' + str(output) + ']"') - -#### Whisper Model -if model_id_or_path == "openai/whisper-large-v2": - with st.form('Image2Text'): - st.subheader('Audio 2 Text') - st.write('Create a transcription of an audio file.') - audio_file = st.file_uploader(label = "Upload an audio file", type = 'mp3') - submitted = st.form_submit_button('Submit') - if submitted: - if audio_file is not None: - audio = audio_file.getvalue() - with open("temp.mp3", "wb") as binary_file: - # Write bytes to file - binary_file.write(audio) - - # Calling the split_to_mono method on the stereo audio file - stereo_audio = AudioSegment.from_file("temp.mp3", format = "mp3") - mono_audios = stereo_audio.split_to_mono() - mono_audios[0].export("temp.mp3", format = "mp3") - - # Mp3 file to numpy array - audio, sr = a2n.audio_from_file('temp.mp3') - st.audio('temp.mp3') - if os.path.exists("temp.mp3"): - os.remove("temp.mp3") - - # Load model and processor - pipe = pipeline("automatic-speech-recognition", model = "openai/whisper-large-v2", chunk_length_s = 30, device = "cpu", - ignore_warning = True) - prediction = pipe(audio, sampling_rate = sr)["text"] - st.subheader('Preview used audio') - st.write('Model :orange[' + model_id_or_path + ']') - st.write('Transcript: ":green[' + str(prediction) + ']"') diff --git a/spaces/DragGan/DragGan/stylegan_human/openpose/src/__init__.py b/spaces/DragGan/DragGan/stylegan_human/openpose/src/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/ECCV2022/PSG/OpenPSG/configs/_base_/models/psgtr_r50.py b/spaces/ECCV2022/PSG/OpenPSG/configs/_base_/models/psgtr_r50.py deleted file mode 100644 index 96eccd68df077c5de98613fe62d4bcacb5b7f5a4..0000000000000000000000000000000000000000 --- a/spaces/ECCV2022/PSG/OpenPSG/configs/_base_/models/psgtr_r50.py +++ /dev/null @@ -1,82 +0,0 @@ -model = dict( - type='PSGTr', - backbone=dict(type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=False), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet50')), - bbox_head=dict(type='PSGTrHead', - num_classes=80, - num_relations=117, - in_channels=2048, - transformer=dict( - type='Transformer', - encoder=dict(type='DetrTransformerEncoder', - num_layers=6, - transformerlayers=dict( - type='BaseTransformerLayer', - attn_cfgs=[ - dict(type='MultiheadAttention', - embed_dims=256, - num_heads=8, - dropout=0.1) - ], - feedforward_channels=2048, - ffn_dropout=0.1, - operation_order=('self_attn', 'norm', - 'ffn', 'norm'))), - decoder=dict( - type='DetrTransformerDecoder', - return_intermediate=True, - num_layers=6, - transformerlayers=dict( - type='DetrTransformerDecoderLayer', - attn_cfgs=dict(type='MultiheadAttention', - embed_dims=256, - num_heads=8, - dropout=0.1), - feedforward_channels=2048, - ffn_dropout=0.1, - operation_order=('self_attn', 'norm', - 'cross_attn', 'norm', 'ffn', - 'norm')), - )), - positional_encoding=dict(type='SinePositionalEncoding', - num_feats=128, - normalize=True), - sub_loss_cls=dict(type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0, - class_weight=1.0), - sub_loss_bbox=dict(type='L1Loss', loss_weight=5.0), - sub_loss_iou=dict(type='GIoULoss', loss_weight=2.0), - sub_focal_loss=dict(type='BCEFocalLoss', loss_weight=1.0), - sub_dice_loss=dict(type='psgtrDiceLoss', loss_weight=1.0), - obj_loss_cls=dict(type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0, - class_weight=1.0), - obj_loss_bbox=dict(type='L1Loss', loss_weight=5.0), - obj_loss_iou=dict(type='GIoULoss', loss_weight=2.0), - obj_focal_loss=dict(type='BCEFocalLoss', loss_weight=1.0), - obj_dice_loss=dict(type='psgtrDiceLoss', loss_weight=1.0), - rel_loss_cls=dict(type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=2.0, - class_weight=1.0)), - # training and testing settings - train_cfg=dict(assigner=dict( - type='HTriMatcher', - s_cls_cost=dict(type='ClassificationCost', weight=1.), - s_reg_cost=dict(type='BBoxL1Cost', weight=5.0), - s_iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0), - o_cls_cost=dict(type='ClassificationCost', weight=1.), - o_reg_cost=dict(type='BBoxL1Cost', weight=5.0), - o_iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0), - r_cls_cost=dict(type='ClassificationCost', weight=2.))), - test_cfg=dict(max_per_img=100)) diff --git a/spaces/ECCV2022/bytetrack/tutorials/motr/evaluation.py b/spaces/ECCV2022/bytetrack/tutorials/motr/evaluation.py deleted file mode 100644 index 2be0d672e160e78361f94916e319cd5ee5f2310d..0000000000000000000000000000000000000000 --- a/spaces/ECCV2022/bytetrack/tutorials/motr/evaluation.py +++ /dev/null @@ -1,207 +0,0 @@ -# ------------------------------------------------------------------------ -# Copyright (c) 2021 megvii-model. All Rights Reserved. -# ------------------------------------------------------------------------ -# Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR) -# Copyright (c) 2020 SenseTime. All Rights Reserved. -# ------------------------------------------------------------------------ -# Modified from DETR (https://github.com/facebookresearch/detr) -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -# ------------------------------------------------------------------------ - - -import os -import numpy as np -import copy -import motmetrics as mm -mm.lap.default_solver = 'lap' -import os -from typing import Dict -import numpy as np -import logging - -def read_results(filename, data_type: str, is_gt=False, is_ignore=False): - if data_type in ('mot', 'lab'): - read_fun = read_mot_results - else: - raise ValueError('Unknown data type: {}'.format(data_type)) - - return read_fun(filename, is_gt, is_ignore) - -# def read_mot_results(filename, is_gt, is_ignore): -# results_dict = dict() -# if os.path.isfile(filename): -# with open(filename, 'r') as f: -# for line in f.readlines(): -# linelist = line.split(',') -# if len(linelist) < 7: -# continue -# fid = int(linelist[0]) -# if fid < 1: -# continue -# results_dict.setdefault(fid, list()) - -# if is_gt: -# mark = int(float(linelist[6])) -# if mark == 0 : -# continue -# score = 1 -# elif is_ignore: -# score = 1 -# else: -# score = float(linelist[6]) - -# tlwh = tuple(map(float, linelist[2:6])) -# target_id = int(float(linelist[1])) -# results_dict[fid].append((tlwh, target_id, score)) - -# return results_dict - -def read_mot_results(filename, is_gt, is_ignore): - valid_labels = {1} - ignore_labels = {0, 2, 7, 8, 12} - results_dict = dict() - if os.path.isfile(filename): - with open(filename, 'r') as f: - for line in f.readlines(): - linelist = line.split(',') - if len(linelist) < 7: - continue - fid = int(linelist[0]) - if fid < 1: - continue - results_dict.setdefault(fid, list()) - - if is_gt: - if 'MOT16-' in filename or 'MOT17-' in filename: - label = int(float(linelist[7])) - mark = int(float(linelist[6])) - if mark == 0 or label not in valid_labels: - continue - score = 1 - elif is_ignore: - if 'MOT16-' in filename or 'MOT17-' in filename: - label = int(float(linelist[7])) - vis_ratio = float(linelist[8]) - if label not in ignore_labels and vis_ratio >= 0: - continue - elif 'MOT15' in filename: - label = int(float(linelist[6])) - if label not in ignore_labels: - continue - else: - continue - score = 1 - else: - score = float(linelist[6]) - - tlwh = tuple(map(float, linelist[2:6])) - target_id = int(linelist[1]) - - results_dict[fid].append((tlwh, target_id, score)) - - return results_dict - -def unzip_objs(objs): - if len(objs) > 0: - tlwhs, ids, scores = zip(*objs) - else: - tlwhs, ids, scores = [], [], [] - tlwhs = np.asarray(tlwhs, dtype=float).reshape(-1, 4) - return tlwhs, ids, scores - - -class Evaluator(object): - def __init__(self, data_root, seq_name, data_type='mot'): - - self.data_root = data_root - self.seq_name = seq_name - self.data_type = data_type - - self.load_annotations() - self.reset_accumulator() - - def load_annotations(self): - assert self.data_type == 'mot' - - gt_filename = os.path.join(self.data_root, self.seq_name, 'gt', 'gt.txt') - self.gt_frame_dict = read_results(gt_filename, self.data_type, is_gt=True) - self.gt_ignore_frame_dict = read_results(gt_filename, self.data_type, is_ignore=True) - - def reset_accumulator(self): - self.acc = mm.MOTAccumulator(auto_id=True) - - def eval_frame(self, frame_id, trk_tlwhs, trk_ids, rtn_events=False): - # results - trk_tlwhs = np.copy(trk_tlwhs) - trk_ids = np.copy(trk_ids) - - # gts - gt_objs = self.gt_frame_dict.get(frame_id, []) - gt_tlwhs, gt_ids = unzip_objs(gt_objs)[:2] - - # ignore boxes - ignore_objs = self.gt_ignore_frame_dict.get(frame_id, []) - ignore_tlwhs = unzip_objs(ignore_objs)[0] - # remove ignored results - keep = np.ones(len(trk_tlwhs), dtype=bool) - iou_distance = mm.distances.iou_matrix(ignore_tlwhs, trk_tlwhs, max_iou=0.5) - if len(iou_distance) > 0: - match_is, match_js = mm.lap.linear_sum_assignment(iou_distance) - match_is, match_js = map(lambda a: np.asarray(a, dtype=int), [match_is, match_js]) - match_ious = iou_distance[match_is, match_js] - - match_js = np.asarray(match_js, dtype=int) - match_js = match_js[np.logical_not(np.isnan(match_ious))] - keep[match_js] = False - trk_tlwhs = trk_tlwhs[keep] - trk_ids = trk_ids[keep] - - # get distance matrix - iou_distance = mm.distances.iou_matrix(gt_tlwhs, trk_tlwhs, max_iou=0.5) - - # acc - self.acc.update(gt_ids, trk_ids, iou_distance) - - if rtn_events and iou_distance.size > 0 and hasattr(self.acc, 'last_mot_events'): - events = self.acc.last_mot_events # only supported by https://github.com/longcw/py-motmetrics - else: - events = None - return events - - def eval_file(self, filename): - self.reset_accumulator() - - result_frame_dict = read_results(filename, self.data_type, is_gt=False) - #frames = sorted(list(set(self.gt_frame_dict.keys()) | set(result_frame_dict.keys()))) - frames = sorted(list(set(result_frame_dict.keys()))) - - for frame_id in frames: - trk_objs = result_frame_dict.get(frame_id, []) - trk_tlwhs, trk_ids = unzip_objs(trk_objs)[:2] - self.eval_frame(frame_id, trk_tlwhs, trk_ids, rtn_events=False) - - return self.acc - - @staticmethod - def get_summary(accs, names, metrics=('mota', 'num_switches', 'idp', 'idr', 'idf1', 'precision', 'recall')): - names = copy.deepcopy(names) - if metrics is None: - metrics = mm.metrics.motchallenge_metrics - metrics = copy.deepcopy(metrics) - - mh = mm.metrics.create() - summary = mh.compute_many( - accs, - metrics=metrics, - names=names, - generate_overall=True - ) - - return summary - - @staticmethod - def save_summary(summary, filename): - import pandas as pd - writer = pd.ExcelWriter(filename) - summary.to_excel(writer) - writer.save() diff --git a/spaces/EDGAhab/Paimon-Talking/modules.py b/spaces/EDGAhab/Paimon-Talking/modules.py deleted file mode 100644 index 9c7fd9cd6eb8b7e0ec0e08957e970744a374a924..0000000000000000000000000000000000000000 --- a/spaces/EDGAhab/Paimon-Talking/modules.py +++ /dev/null @@ -1,390 +0,0 @@ -import copy -import math -import numpy as np -import scipy -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm - -import commons -from commons import init_weights, get_padding -from transforms import piecewise_rational_quadratic_transform - - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential( - nn.ReLU(), - nn.Dropout(p_dropout)) - for _ in range(n_layers-1): - self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size ** i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size, - groups=channels, dilation=dilation, padding=padding - )) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0): - super(WN, self).__init__() - assert(kernel_size % 2 == 1) - self.hidden_channels =hidden_channels - self.kernel_size = kernel_size, - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') - - for i in range(n_layers): - dilation = dilation_rate ** i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size, - dilation=dilation, padding=padding) - in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply( - x_in, - g_l, - n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:,:self.hidden_channels,:] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:,self.hidden_channels:,:] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels,1)) - self.logs = nn.Parameter(torch.zeros(channels,1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1,2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels]*2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1,2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - -class ConvFlow(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.) - self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_derivatives = h[..., 2 * self.num_bins:] - - x1, logabsdet = piecewise_rational_quadratic_transform(x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails='linear', - tail_bound=self.tail_bound - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1,2]) - if not reverse: - return x, logdet - else: - return x diff --git a/spaces/Enterprisium/Easy_GUI/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py b/spaces/Enterprisium/Easy_GUI/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py deleted file mode 100644 index b2c592527a5966e6f8e79e8c52dc5b414246dcc6..0000000000000000000000000000000000000000 --- a/spaces/Enterprisium/Easy_GUI/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py +++ /dev/null @@ -1,97 +0,0 @@ -from lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor -import parselmouth -import numpy as np - - -class PMF0Predictor(F0Predictor): - def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100): - self.hop_length = hop_length - self.f0_min = f0_min - self.f0_max = f0_max - self.sampling_rate = sampling_rate - - def interpolate_f0(self, f0): - """ - 对F0进行插值处理 - """ - - data = np.reshape(f0, (f0.size, 1)) - - vuv_vector = np.zeros((data.size, 1), dtype=np.float32) - vuv_vector[data > 0.0] = 1.0 - vuv_vector[data <= 0.0] = 0.0 - - ip_data = data - - frame_number = data.size - last_value = 0.0 - for i in range(frame_number): - if data[i] <= 0.0: - j = i + 1 - for j in range(i + 1, frame_number): - if data[j] > 0.0: - break - if j < frame_number - 1: - if last_value > 0.0: - step = (data[j] - data[i - 1]) / float(j - i) - for k in range(i, j): - ip_data[k] = data[i - 1] + step * (k - i + 1) - else: - for k in range(i, j): - ip_data[k] = data[j] - else: - for k in range(i, frame_number): - ip_data[k] = last_value - else: - ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝 - last_value = data[i] - - return ip_data[:, 0], vuv_vector[:, 0] - - def compute_f0(self, wav, p_len=None): - x = wav - if p_len is None: - p_len = x.shape[0] // self.hop_length - else: - assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error" - time_step = self.hop_length / self.sampling_rate * 1000 - f0 = ( - parselmouth.Sound(x, self.sampling_rate) - .to_pitch_ac( - time_step=time_step / 1000, - voicing_threshold=0.6, - pitch_floor=self.f0_min, - pitch_ceiling=self.f0_max, - ) - .selected_array["frequency"] - ) - - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant") - f0, uv = self.interpolate_f0(f0) - return f0 - - def compute_f0_uv(self, wav, p_len=None): - x = wav - if p_len is None: - p_len = x.shape[0] // self.hop_length - else: - assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error" - time_step = self.hop_length / self.sampling_rate * 1000 - f0 = ( - parselmouth.Sound(x, self.sampling_rate) - .to_pitch_ac( - time_step=time_step / 1000, - voicing_threshold=0.6, - pitch_floor=self.f0_min, - pitch_ceiling=self.f0_max, - ) - .selected_array["frequency"] - ) - - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant") - f0, uv = self.interpolate_f0(f0) - return f0, uv diff --git a/spaces/EronSamez/RVC_HFmeu/lib/infer_pack/models_dml.py b/spaces/EronSamez/RVC_HFmeu/lib/infer_pack/models_dml.py deleted file mode 100644 index 958d7b29259763d2fea94caf8ba7e314c4a77d05..0000000000000000000000000000000000000000 --- a/spaces/EronSamez/RVC_HFmeu/lib/infer_pack/models_dml.py +++ /dev/null @@ -1,1124 +0,0 @@ -import math, pdb, os -from time import time as ttime -import torch -from torch import nn -from torch.nn import functional as F -from lib.infer_pack import modules -from lib.infer_pack import attentions -from lib.infer_pack import commons -from lib.infer_pack.commons import init_weights, get_padding -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from lib.infer_pack.commons import init_weights -import numpy as np -from lib.infer_pack import commons - - -class TextEncoder256(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class TextEncoder768(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(768, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - def remove_weight_norm(self): - for i in range(self.n_flows): - self.flows[i * 2].remove_weight_norm() - - -class PosteriorEncoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class Generator(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=0, - ): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class SineGen(torch.nn.Module): - """Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__( - self, - samp_rate, - harmonic_num=0, - sine_amp=0.1, - noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False, - ): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - return uv.float() - - def forward(self, f0, upp): - """sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0 = f0[:, None].transpose(1, 2) - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( - idx + 2 - ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 - rand_ini = torch.rand( - f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device - ) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 - tmp_over_one *= upp - tmp_over_one = F.interpolate( - tmp_over_one.transpose(2, 1), - scale_factor=upp, - mode="linear", - align_corners=True, - ).transpose(2, 1) - rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose( - 2, 1 - ) ####### - tmp_over_one %= 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi - ) - sine_waves = sine_waves * self.sine_amp - uv = self._f02uv(f0) - uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose(2, 1) - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__( - self, - sampling_rate, - harmonic_num=0, - sine_amp=0.1, - add_noise_std=0.003, - voiced_threshod=0, - is_half=True, - ): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - self.is_half = is_half - # to produce sine waveforms - self.l_sin_gen = SineGen( - sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod - ) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x, upp=None): - sine_wavs, uv, _ = self.l_sin_gen(x, upp) - if self.is_half: - sine_wavs = sine_wavs.half() - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - return sine_merge, None, None # noise, uv - - -class GeneratorNSF(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - sr, - is_half=False, - ): - super(GeneratorNSF, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) - self.m_source = SourceModuleHnNSF( - sampling_rate=sr, harmonic_num=0, is_half=is_half - ) - self.noise_convs = nn.ModuleList() - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - c_cur = upsample_initial_channel // (2 ** (i + 1)) - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) - self.noise_convs.append( - Conv1d( - 1, - c_cur, - kernel_size=stride_f0 * 2, - stride=stride_f0, - padding=stride_f0 // 2, - ) - ) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - self.upp = np.prod(upsample_rates) - - def forward(self, x, f0, g=None): - har_source, noi_source, uv = self.m_source(f0, self.upp) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -sr2sr = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -class SynthesizerTrnMs256NSFsid(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds - ): # 这里ds是id,[bs,1] - # print(1,pitch.shape)#[bs,t] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - # print(-2,pitchf.shape,z_slice.shape) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs768NSFsid(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder768( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds - ): # 这里ds是id,[bs,1] - # print(1,pitch.shape)#[bs,t] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - # print(-2,pitchf.shape,z_slice.shape) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs256NSFsid_nono(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr=None, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=False, - ) - self.dec = Generator( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - o = self.dec(z_slice, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs768NSFsid_nono(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr=None, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder768( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=False, - ) - self.dec = Generator( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - o = self.dec(z_slice, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11, 17] - # periods = [3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class MultiPeriodDiscriminatorV2(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminatorV2, self).__init__() - # periods = [2, 3, 5, 7, 11, 17] - periods = [2, 3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 1024, - 1024, - (kernel_size, 1), - 1, - padding=(get_padding(kernel_size, 1), 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap diff --git a/spaces/EsoCode/text-generation-webui/extensions/superbooga/chromadb.py b/spaces/EsoCode/text-generation-webui/extensions/superbooga/chromadb.py deleted file mode 100644 index 1fb7a71848a8c99ab29b90c49902b545a1595f03..0000000000000000000000000000000000000000 --- a/spaces/EsoCode/text-generation-webui/extensions/superbooga/chromadb.py +++ /dev/null @@ -1,125 +0,0 @@ -import chromadb -import posthog -import torch -from chromadb.config import Settings -from sentence_transformers import SentenceTransformer - -from modules.logging_colors import logger - -logger.info('Intercepting all calls to posthog :)') -posthog.capture = lambda *args, **kwargs: None - - -class Collecter(): - def __init__(self): - pass - - def add(self, texts: list[str]): - pass - - def get(self, search_strings: list[str], n_results: int) -> list[str]: - pass - - def clear(self): - pass - - -class Embedder(): - def __init__(self): - pass - - def embed(self, text: str) -> list[torch.Tensor]: - pass - - -class ChromaCollector(Collecter): - def __init__(self, embedder: Embedder): - super().__init__() - self.chroma_client = chromadb.Client(Settings(anonymized_telemetry=False)) - self.embedder = embedder - self.collection = self.chroma_client.create_collection(name="context", embedding_function=embedder.embed) - self.ids = [] - - def add(self, texts: list[str]): - if len(texts) == 0: - return - - self.ids = [f"id{i}" for i in range(len(texts))] - self.collection.add(documents=texts, ids=self.ids) - - def get_documents_ids_distances(self, search_strings: list[str], n_results: int): - n_results = min(len(self.ids), n_results) - if n_results == 0: - return [], [], [] - - result = self.collection.query(query_texts=search_strings, n_results=n_results, include=['documents', 'distances']) - documents = result['documents'][0] - ids = list(map(lambda x: int(x[2:]), result['ids'][0])) - distances = result['distances'][0] - return documents, ids, distances - - # Get chunks by similarity - def get(self, search_strings: list[str], n_results: int) -> list[str]: - documents, _, _ = self.get_documents_ids_distances(search_strings, n_results) - return documents - - # Get ids by similarity - def get_ids(self, search_strings: list[str], n_results: int) -> list[str]: - _, ids, _ = self.get_documents_ids_distances(search_strings, n_results) - return ids - - # Get chunks by similarity and then sort by insertion order - def get_sorted(self, search_strings: list[str], n_results: int) -> list[str]: - documents, ids, _ = self.get_documents_ids_distances(search_strings, n_results) - return [x for _, x in sorted(zip(ids, documents))] - - # Multiply distance by factor within [0, time_weight] where more recent is lower - def apply_time_weight_to_distances(self, ids: list[int], distances: list[float], time_weight: float = 1.0) -> list[float]: - if len(self.ids) <= 1: - return distances.copy() - - return [distance * (1 - _id / (len(self.ids) - 1) * time_weight) for _id, distance in zip(ids, distances)] - - # Get ids by similarity and then sort by insertion order - def get_ids_sorted(self, search_strings: list[str], n_results: int, n_initial: int = None, time_weight: float = 1.0) -> list[str]: - do_time_weight = time_weight > 0 - if not (do_time_weight and n_initial is not None): - n_initial = n_results - elif n_initial == -1: - n_initial = len(self.ids) - - if n_initial < n_results: - raise ValueError(f"n_initial {n_initial} should be >= n_results {n_results}") - - _, ids, distances = self.get_documents_ids_distances(search_strings, n_initial) - if do_time_weight: - distances_w = self.apply_time_weight_to_distances(ids, distances, time_weight=time_weight) - results = zip(ids, distances, distances_w) - results = sorted(results, key=lambda x: x[2])[:n_results] - results = sorted(results, key=lambda x: x[0]) - ids = [x[0] for x in results] - - return sorted(ids) - - def clear(self): - self.collection.delete(ids=self.ids) - self.ids = [] - - -class SentenceTransformerEmbedder(Embedder): - def __init__(self) -> None: - self.model = SentenceTransformer("sentence-transformers/all-mpnet-base-v2") - self.embed = self.model.encode - - -def make_collector(): - global embedder - return ChromaCollector(embedder) - - -def add_chunks_to_collector(chunks, collector): - collector.clear() - collector.add(chunks) - - -embedder = SentenceTransformerEmbedder() diff --git a/spaces/EuroPython2022/mmocr-demo/configs/_base_/det_pipelines/textsnake_pipeline.py b/spaces/EuroPython2022/mmocr-demo/configs/_base_/det_pipelines/textsnake_pipeline.py deleted file mode 100644 index dc4b44819e5c3f3f725df096903fc0a809313913..0000000000000000000000000000000000000000 --- a/spaces/EuroPython2022/mmocr-demo/configs/_base_/det_pipelines/textsnake_pipeline.py +++ /dev/null @@ -1,65 +0,0 @@ -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) - -train_pipeline = [ - dict(type='LoadImageFromFile', color_type='color_ignore_orientation'), - dict( - type='LoadTextAnnotations', - with_bbox=True, - with_mask=True, - poly2mask=False), - dict(type='ColorJitter', brightness=32.0 / 255, saturation=0.5), - dict(type='Normalize', **img_norm_cfg), - dict( - type='RandomCropPolyInstances', - instance_key='gt_masks', - crop_ratio=0.65, - min_side_ratio=0.3), - dict( - type='RandomRotatePolyInstances', - rotate_ratio=0.5, - max_angle=20, - pad_with_fixed_color=False), - dict( - type='ScaleAspectJitter', - img_scale=[(3000, 736)], # unused - ratio_range=(0.7, 1.3), - aspect_ratio_range=(0.9, 1.1), - multiscale_mode='value', - long_size_bound=800, - short_size_bound=480, - resize_type='long_short_bound', - keep_ratio=False), - dict(type='SquareResizePad', target_size=800, pad_ratio=0.6), - dict(type='RandomFlip', flip_ratio=0.5, direction='horizontal'), - dict(type='TextSnakeTargets'), - dict(type='Pad', size_divisor=32), - dict( - type='CustomFormatBundle', - keys=[ - 'gt_text_mask', 'gt_center_region_mask', 'gt_mask', - 'gt_radius_map', 'gt_sin_map', 'gt_cos_map' - ], - visualize=dict(flag=False, boundary_key='gt_text_mask')), - dict( - type='Collect', - keys=[ - 'img', 'gt_text_mask', 'gt_center_region_mask', 'gt_mask', - 'gt_radius_map', 'gt_sin_map', 'gt_cos_map' - ]) -] - -test_pipeline = [ - dict(type='LoadImageFromFile', color_type='color_ignore_orientation'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 736), # used by Resize - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] diff --git a/spaces/EuroPython2022/mmocr-demo/configs/textdet/fcenet/README.md b/spaces/EuroPython2022/mmocr-demo/configs/textdet/fcenet/README.md deleted file mode 100644 index f1acd2b1d8daa4557b16c8375b8c1ab4aa36cf6c..0000000000000000000000000000000000000000 --- a/spaces/EuroPython2022/mmocr-demo/configs/textdet/fcenet/README.md +++ /dev/null @@ -1,38 +0,0 @@ -# FCENet - -> [Fourier Contour Embedding for Arbitrary-Shaped Text Detection](https://arxiv.org/abs/2104.10442) - - - -## Abstract - -One of the main challenges for arbitrary-shaped text detection is to design a good text instance representation that allows networks to learn diverse text geometry variances. Most of existing methods model text instances in image spatial domain via masks or contour point sequences in the Cartesian or the polar coordinate system. However, the mask representation might lead to expensive post-processing, while the point sequence one may have limited capability to model texts with highly-curved shapes. To tackle these problems, we model text instances in the Fourier domain and propose one novel Fourier Contour Embedding (FCE) method to represent arbitrary shaped text contours as compact signatures. We further construct FCENet with a backbone, feature pyramid networks (FPN) and a simple post-processing with the Inverse Fourier Transformation (IFT) and Non-Maximum Suppression (NMS). Different from previous methods, FCENet first predicts compact Fourier signatures of text instances, and then reconstructs text contours via IFT and NMS during test. Extensive experiments demonstrate that FCE is accurate and robust to fit contours of scene texts even with highly-curved shapes, and also validate the effectiveness and the good generalization of FCENet for arbitrary-shaped text detection. Furthermore, experimental results show that our FCENet is superior to the state-of-the-art (SOTA) methods on CTW1500 and Total-Text, especially on challenging highly-curved text subset. - -
    - -
    - -## Results and models - -### CTW1500 - -| Method | Backbone | Pretrained Model | Training set | Test set | #epochs | Test size | Recall | Precision | Hmean | Download | -| :-------------------------------------------------: | :--------------: | :--------------: | :-----------: | :----------: | :-----: | :---------: | :----: | :-------: | :---: | :----------------------------------------------------: | -| [FCENet](/configs/textdet/fcenet/fcenet_r50dcnv2_fpn_1500e_ctw1500.py) | ResNet50 + DCNv2 | ImageNet | CTW1500 Train | CTW1500 Test | 1500 | (736, 1080) | 0.828 | 0.875 | 0.851 | [model](https://download.openmmlab.com/mmocr/textdet/fcenet/fcenet_r50dcnv2_fpn_1500e_ctw1500_20211022-e326d7ec.pth) \| [log](https://download.openmmlab.com/mmocr/textdet/fcenet/20210511_181328.log.json) | - -### ICDAR2015 - -| Method | Backbone | Pretrained Model | Training set | Test set | #epochs | Test size | Recall | Precision | Hmean | Download | -| :-------------------------------------------------------: | :------: | :--------------: | :----------: | :-------: | :-----: | :----------: | :----: | :-------: | :---: | :---------------------------------------------------------: | -| [FCENet](/configs/textdet/fcenet/fcenet_r50_fpn_1500e_icdar2015.py) | ResNet50 | ImageNet | IC15 Train | IC15 Test | 1500 | (2260, 2260) | 0.819 | 0.880 | 0.849 | [model](https://download.openmmlab.com/mmocr/textdet/fcenet/fcenet_r50_fpn_1500e_icdar2015_20211022-daefb6ed.pth) \| [log](https://download.openmmlab.com/mmocr/textdet/fcenet/20210601_222655.log.json) | - -## Citation - -```bibtex -@InProceedings{zhu2021fourier, - title={Fourier Contour Embedding for Arbitrary-Shaped Text Detection}, - author={Yiqin Zhu and Jianyong Chen and Lingyu Liang and Zhanghui Kuang and Lianwen Jin and Wayne Zhang}, - year={2021}, - booktitle = {CVPR} - } -``` diff --git a/spaces/Fcjs/stablediffusionapi-edge-of-realism/app.py b/spaces/Fcjs/stablediffusionapi-edge-of-realism/app.py deleted file mode 100644 index ee2dc1ddf0e78826420cd9f04700130d8151216f..0000000000000000000000000000000000000000 --- a/spaces/Fcjs/stablediffusionapi-edge-of-realism/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/stablediffusionapi/edge-of-realism").launch() \ No newline at end of file diff --git a/spaces/FelixLuoX/codeformer/CodeFormer/basicsr/ops/dcn/src/deform_conv_ext.cpp b/spaces/FelixLuoX/codeformer/CodeFormer/basicsr/ops/dcn/src/deform_conv_ext.cpp deleted file mode 100644 index 41c6df6f721bd95a525fd6a03dd9882e863de042..0000000000000000000000000000000000000000 --- a/spaces/FelixLuoX/codeformer/CodeFormer/basicsr/ops/dcn/src/deform_conv_ext.cpp +++ /dev/null @@ -1,164 +0,0 @@ -// modify from -// https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda.c - -#include -#include - -#include -#include - -#define WITH_CUDA // always use cuda -#ifdef WITH_CUDA -int deform_conv_forward_cuda(at::Tensor input, at::Tensor weight, - at::Tensor offset, at::Tensor output, - at::Tensor columns, at::Tensor ones, int kW, - int kH, int dW, int dH, int padW, int padH, - int dilationW, int dilationH, int group, - int deformable_group, int im2col_step); - -int deform_conv_backward_input_cuda(at::Tensor input, at::Tensor offset, - at::Tensor gradOutput, at::Tensor gradInput, - at::Tensor gradOffset, at::Tensor weight, - at::Tensor columns, int kW, int kH, int dW, - int dH, int padW, int padH, int dilationW, - int dilationH, int group, - int deformable_group, int im2col_step); - -int deform_conv_backward_parameters_cuda( - at::Tensor input, at::Tensor offset, at::Tensor gradOutput, - at::Tensor gradWeight, // at::Tensor gradBias, - at::Tensor columns, at::Tensor ones, int kW, int kH, int dW, int dH, - int padW, int padH, int dilationW, int dilationH, int group, - int deformable_group, float scale, int im2col_step); - -void modulated_deform_conv_cuda_forward( - at::Tensor input, at::Tensor weight, at::Tensor bias, at::Tensor ones, - at::Tensor offset, at::Tensor mask, at::Tensor output, at::Tensor columns, - int kernel_h, int kernel_w, const int stride_h, const int stride_w, - const int pad_h, const int pad_w, const int dilation_h, - const int dilation_w, const int group, const int deformable_group, - const bool with_bias); - -void modulated_deform_conv_cuda_backward( - at::Tensor input, at::Tensor weight, at::Tensor bias, at::Tensor ones, - at::Tensor offset, at::Tensor mask, at::Tensor columns, - at::Tensor grad_input, at::Tensor grad_weight, at::Tensor grad_bias, - at::Tensor grad_offset, at::Tensor grad_mask, at::Tensor grad_output, - int kernel_h, int kernel_w, int stride_h, int stride_w, int pad_h, - int pad_w, int dilation_h, int dilation_w, int group, int deformable_group, - const bool with_bias); -#endif - -int deform_conv_forward(at::Tensor input, at::Tensor weight, - at::Tensor offset, at::Tensor output, - at::Tensor columns, at::Tensor ones, int kW, - int kH, int dW, int dH, int padW, int padH, - int dilationW, int dilationH, int group, - int deformable_group, int im2col_step) { - if (input.device().is_cuda()) { -#ifdef WITH_CUDA - return deform_conv_forward_cuda(input, weight, offset, output, columns, - ones, kW, kH, dW, dH, padW, padH, dilationW, dilationH, group, - deformable_group, im2col_step); -#else - AT_ERROR("deform conv is not compiled with GPU support"); -#endif - } - AT_ERROR("deform conv is not implemented on CPU"); -} - -int deform_conv_backward_input(at::Tensor input, at::Tensor offset, - at::Tensor gradOutput, at::Tensor gradInput, - at::Tensor gradOffset, at::Tensor weight, - at::Tensor columns, int kW, int kH, int dW, - int dH, int padW, int padH, int dilationW, - int dilationH, int group, - int deformable_group, int im2col_step) { - if (input.device().is_cuda()) { -#ifdef WITH_CUDA - return deform_conv_backward_input_cuda(input, offset, gradOutput, - gradInput, gradOffset, weight, columns, kW, kH, dW, dH, padW, padH, - dilationW, dilationH, group, deformable_group, im2col_step); -#else - AT_ERROR("deform conv is not compiled with GPU support"); -#endif - } - AT_ERROR("deform conv is not implemented on CPU"); -} - -int deform_conv_backward_parameters( - at::Tensor input, at::Tensor offset, at::Tensor gradOutput, - at::Tensor gradWeight, // at::Tensor gradBias, - at::Tensor columns, at::Tensor ones, int kW, int kH, int dW, int dH, - int padW, int padH, int dilationW, int dilationH, int group, - int deformable_group, float scale, int im2col_step) { - if (input.device().is_cuda()) { -#ifdef WITH_CUDA - return deform_conv_backward_parameters_cuda(input, offset, gradOutput, - gradWeight, columns, ones, kW, kH, dW, dH, padW, padH, dilationW, - dilationH, group, deformable_group, scale, im2col_step); -#else - AT_ERROR("deform conv is not compiled with GPU support"); -#endif - } - AT_ERROR("deform conv is not implemented on CPU"); -} - -void modulated_deform_conv_forward( - at::Tensor input, at::Tensor weight, at::Tensor bias, at::Tensor ones, - at::Tensor offset, at::Tensor mask, at::Tensor output, at::Tensor columns, - int kernel_h, int kernel_w, const int stride_h, const int stride_w, - const int pad_h, const int pad_w, const int dilation_h, - const int dilation_w, const int group, const int deformable_group, - const bool with_bias) { - if (input.device().is_cuda()) { -#ifdef WITH_CUDA - return modulated_deform_conv_cuda_forward(input, weight, bias, ones, - offset, mask, output, columns, kernel_h, kernel_w, stride_h, - stride_w, pad_h, pad_w, dilation_h, dilation_w, group, - deformable_group, with_bias); -#else - AT_ERROR("modulated deform conv is not compiled with GPU support"); -#endif - } - AT_ERROR("modulated deform conv is not implemented on CPU"); -} - -void modulated_deform_conv_backward( - at::Tensor input, at::Tensor weight, at::Tensor bias, at::Tensor ones, - at::Tensor offset, at::Tensor mask, at::Tensor columns, - at::Tensor grad_input, at::Tensor grad_weight, at::Tensor grad_bias, - at::Tensor grad_offset, at::Tensor grad_mask, at::Tensor grad_output, - int kernel_h, int kernel_w, int stride_h, int stride_w, int pad_h, - int pad_w, int dilation_h, int dilation_w, int group, int deformable_group, - const bool with_bias) { - if (input.device().is_cuda()) { -#ifdef WITH_CUDA - return modulated_deform_conv_cuda_backward(input, weight, bias, ones, - offset, mask, columns, grad_input, grad_weight, grad_bias, grad_offset, - grad_mask, grad_output, kernel_h, kernel_w, stride_h, stride_w, - pad_h, pad_w, dilation_h, dilation_w, group, deformable_group, - with_bias); -#else - AT_ERROR("modulated deform conv is not compiled with GPU support"); -#endif - } - AT_ERROR("modulated deform conv is not implemented on CPU"); -} - - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { - m.def("deform_conv_forward", &deform_conv_forward, - "deform forward"); - m.def("deform_conv_backward_input", &deform_conv_backward_input, - "deform_conv_backward_input"); - m.def("deform_conv_backward_parameters", - &deform_conv_backward_parameters, - "deform_conv_backward_parameters"); - m.def("modulated_deform_conv_forward", - &modulated_deform_conv_forward, - "modulated deform conv forward"); - m.def("modulated_deform_conv_backward", - &modulated_deform_conv_backward, - "modulated deform conv backward"); -} diff --git a/spaces/Fernando22/freegpt-webui/client/css/button.css b/spaces/Fernando22/freegpt-webui/client/css/button.css deleted file mode 100644 index 5f604a8460d048458249f78be9dc544ade84801e..0000000000000000000000000000000000000000 --- a/spaces/Fernando22/freegpt-webui/client/css/button.css +++ /dev/null @@ -1,26 +0,0 @@ -.button { - display: flex; - padding: 8px 12px; - align-items: center; - justify-content: center; - border: 1px solid var(--conversations); - border-radius: var(--border-radius-1); - width: 100%; - background: transparent; - cursor: pointer; -} - -.button span { - color: var(--colour-3); - font-size: 0.875rem; -} - -.button i::before { - margin-right: 8px; -} - -@media screen and (max-width: 990px) { - .button span { - font-size: 0.75rem; - } -} diff --git a/spaces/FridaZuley/RVC_HFKawaii/infer/lib/uvr5_pack/lib_v5/nets_33966KB.py b/spaces/FridaZuley/RVC_HFKawaii/infer/lib/uvr5_pack/lib_v5/nets_33966KB.py deleted file mode 100644 index 73a5b836177b706c306e27875f8391c1aed4b948..0000000000000000000000000000000000000000 --- a/spaces/FridaZuley/RVC_HFKawaii/infer/lib/uvr5_pack/lib_v5/nets_33966KB.py +++ /dev/null @@ -1,122 +0,0 @@ -import torch -import torch.nn.functional as F -from torch import nn - -from . import layers_33966KB as layers - - -class BaseASPPNet(nn.Module): - def __init__(self, nin, ch, dilations=(4, 8, 16, 32)): - super(BaseASPPNet, self).__init__() - self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) - self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) - self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) - self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) - - self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) - - self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) - self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) - self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) - self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) - - def __call__(self, x): - h, e1 = self.enc1(x) - h, e2 = self.enc2(h) - h, e3 = self.enc3(h) - h, e4 = self.enc4(h) - - h = self.aspp(h) - - h = self.dec4(h, e4) - h = self.dec3(h, e3) - h = self.dec2(h, e2) - h = self.dec1(h, e1) - - return h - - -class CascadedASPPNet(nn.Module): - def __init__(self, n_fft): - super(CascadedASPPNet, self).__init__() - self.stg1_low_band_net = BaseASPPNet(2, 16) - self.stg1_high_band_net = BaseASPPNet(2, 16) - - self.stg2_bridge = layers.Conv2DBNActiv(18, 8, 1, 1, 0) - self.stg2_full_band_net = BaseASPPNet(8, 16) - - self.stg3_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0) - self.stg3_full_band_net = BaseASPPNet(16, 32) - - self.out = nn.Conv2d(32, 2, 1, bias=False) - self.aux1_out = nn.Conv2d(16, 2, 1, bias=False) - self.aux2_out = nn.Conv2d(16, 2, 1, bias=False) - - self.max_bin = n_fft // 2 - self.output_bin = n_fft // 2 + 1 - - self.offset = 128 - - def forward(self, x, aggressiveness=None): - mix = x.detach() - x = x.clone() - - x = x[:, :, : self.max_bin] - - bandw = x.size()[2] // 2 - aux1 = torch.cat( - [ - self.stg1_low_band_net(x[:, :, :bandw]), - self.stg1_high_band_net(x[:, :, bandw:]), - ], - dim=2, - ) - - h = torch.cat([x, aux1], dim=1) - aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) - - h = torch.cat([x, aux1, aux2], dim=1) - h = self.stg3_full_band_net(self.stg3_bridge(h)) - - mask = torch.sigmoid(self.out(h)) - mask = F.pad( - input=mask, - pad=(0, 0, 0, self.output_bin - mask.size()[2]), - mode="replicate", - ) - - if self.training: - aux1 = torch.sigmoid(self.aux1_out(aux1)) - aux1 = F.pad( - input=aux1, - pad=(0, 0, 0, self.output_bin - aux1.size()[2]), - mode="replicate", - ) - aux2 = torch.sigmoid(self.aux2_out(aux2)) - aux2 = F.pad( - input=aux2, - pad=(0, 0, 0, self.output_bin - aux2.size()[2]), - mode="replicate", - ) - return mask * mix, aux1 * mix, aux2 * mix - else: - if aggressiveness: - mask[:, :, : aggressiveness["split_bin"]] = torch.pow( - mask[:, :, : aggressiveness["split_bin"]], - 1 + aggressiveness["value"] / 3, - ) - mask[:, :, aggressiveness["split_bin"] :] = torch.pow( - mask[:, :, aggressiveness["split_bin"] :], - 1 + aggressiveness["value"], - ) - - return mask * mix - - def predict(self, x_mag, aggressiveness=None): - h = self.forward(x_mag, aggressiveness) - - if self.offset > 0: - h = h[:, :, :, self.offset : -self.offset] - assert h.size()[3] > 0 - - return h diff --git a/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/color_ordered_blocks_on_pallet.py b/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/color_ordered_blocks_on_pallet.py deleted file mode 100644 index b89e13ee73e9fa43d9f9b8efaf7c127d8c3cc5eb..0000000000000000000000000000000000000000 --- a/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/color_ordered_blocks_on_pallet.py +++ /dev/null @@ -1,58 +0,0 @@ -import numpy as np -import os -import pybullet as p -import random -from cliport.tasks import primitives -from cliport.tasks.grippers import Spatula -from cliport.tasks.task import Task -from cliport.utils import utils -import numpy as np -from cliport.tasks.task import Task -from cliport.utils import utils - -class ColorOrderedBlocksOnPallet(Task): - """Pick up each colored block and place it onto the pallet in specific color sequence: red, blue, green, yellow, orange, and finally purple.""" - - def __init__(self): - super().__init__() - self.max_steps = 20 - self.lang_template = "place the colored blocks onto the pallet in the following order: red, blue, green, yellow, orange, and purple" - self.task_completed_desc = "done placing blocks on the pallet." - self.additional_reset() - - def reset(self, env): - super().reset(env) - - # Add pallet. - # x, y, z dimensions for the asset size - pallet_size = (0.15, 0.15, 0.02) - pallet_urdf = 'pallet/pallet.urdf' - pallet_pose = self.get_random_pose(env, pallet_size) - env.add_object(pallet_urdf, pallet_pose, 'fixed') - - # Block colors. - colors = [ - utils.COLORS['red'], utils.COLORS['blue'], utils.COLORS['green'], - utils.COLORS['yellow'], utils.COLORS['orange'], utils.COLORS['purple'] - ] - - # Add blocks. - # x, y, z dimensions for the asset size - block_size = (0.04, 0.04, 0.04) - block_urdf = 'block/block.urdf' - blocks = [] - for i in range(6): - block_pose = self.get_random_pose(env, block_size) - block_id = env.add_object(block_urdf, block_pose, color=colors[i]) - blocks.append(block_id) - - # Associate placement locations for goals. - place_pos = [(0, -0.05, 0.03), (0, 0, 0.03), - (0, 0.05, 0.03), (0, -0.025, 0.08), - (0, 0.025, 0.08), (0, 0, 0.13)] - targs = [(utils.apply(pallet_pose, i), pallet_pose[1]) for i in place_pos] - - # Goal: blocks are placed on the pallet in the order of red, blue, green, yellow, orange, purple. - for i in range(6): - self.add_goal(objs=[blocks[i]], matches=np.ones((1, 1)), targ_poses=[targs[i]], replace=False, - rotations=True, metric='pose', params=None, step_max_reward=1 / 6, symmetries=[np.pi/2], language_goal=self.lang_template) \ No newline at end of file diff --git a/spaces/Gradio-Blocks/Anime-BigGAN/README.md b/spaces/Gradio-Blocks/Anime-BigGAN/README.md deleted file mode 100644 index e1cece3f852485e4f62eb78fa8e423ecac1bfe48..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/Anime-BigGAN/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Anime BigGAN -emoji: 👸🏼 -colorFrom: pink -colorTo: indigo -sdk: gradio -sdk_version: 3.0.3 -app_file: app.py -pinned: false -license: mit ---- - -

    Anime-BigGAN

    -This is a Gradio Blocks app of HighCWu/anime_biggan_toy in github. \ No newline at end of file diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/_base_/models/fpn_uniformer.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/_base_/models/fpn_uniformer.py deleted file mode 100644 index 8aae98c5991055bfcc08e82ccdc09f8b1d9f8a8d..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/_base_/models/fpn_uniformer.py +++ /dev/null @@ -1,35 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='EncoderDecoder', - backbone=dict( - type='UniFormer', - embed_dim=[64, 128, 320, 512], - layers=[3, 4, 8, 3], - head_dim=64, - mlp_ratio=4., - qkv_bias=True, - drop_rate=0., - attn_drop_rate=0., - drop_path_rate=0.1), - neck=dict( - type='FPN', - in_channels=[64, 128, 320, 512], - out_channels=256, - num_outs=4), - decode_head=dict( - type='FPNHead', - in_channels=[256, 256, 256, 256], - in_index=[0, 1, 2, 3], - feature_strides=[4, 8, 16, 32], - channels=128, - dropout_ratio=0.1, - num_classes=150, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - # model training and testing settings - train_cfg=dict(), - test_cfg=dict(mode='whole') -) diff --git a/spaces/Grezz/generate_human_motion/pyrender/setup.py b/spaces/Grezz/generate_human_motion/pyrender/setup.py deleted file mode 100644 index c3b5ba0da2b0f17b759e5556597981096a80bda8..0000000000000000000000000000000000000000 --- a/spaces/Grezz/generate_human_motion/pyrender/setup.py +++ /dev/null @@ -1,76 +0,0 @@ -""" -Setup of pyrender Python codebase. - -Author: Matthew Matl -""" -import sys -from setuptools import setup - -# load __version__ -exec(open('pyrender/version.py').read()) - -def get_imageio_dep(): - if sys.version[0] == "2": - return 'imageio<=2.6.1' - return 'imageio' - -requirements = [ - 'freetype-py', # For font loading - get_imageio_dep(), # For Image I/O - 'networkx', # For the scene graph - 'numpy', # Numpy - 'Pillow', # For Trimesh texture conversions - 'pyglet>=1.4.10', # For the pyglet viewer - 'PyOpenGL~=3.1.0', # For OpenGL -# 'PyOpenGL_accelerate~=3.1.0', # For OpenGL - 'scipy', # Because of trimesh missing dep - 'six', # For Python 2/3 interop - 'trimesh', # For meshes -] - -dev_requirements = [ - 'flake8', # Code formatting checker - 'pre-commit', # Pre-commit hooks - 'pytest', # Code testing - 'pytest-cov', # Coverage testing - 'tox', # Automatic virtualenv testing -] - -docs_requirements = [ - 'sphinx', # General doc library - 'sphinx_rtd_theme', # RTD theme for sphinx - 'sphinx-automodapi' # For generating nice tables -] - - -setup( - name = 'pyrender', - version=__version__, - description='Easy-to-use Python renderer for 3D visualization', - long_description='A simple implementation of Physically-Based Rendering ' - '(PBR) in Python. Compliant with the glTF 2.0 standard.', - author='Matthew Matl', - author_email='matthewcmatl@gmail.com', - license='MIT License', - url = 'https://github.com/mmatl/pyrender', - classifiers = [ - 'Development Status :: 4 - Beta', - 'License :: OSI Approved :: MIT License', - 'Operating System :: POSIX :: Linux', - 'Operating System :: MacOS :: MacOS X', - 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: 3.5', - 'Programming Language :: Python :: 3.6', - 'Natural Language :: English', - 'Topic :: Scientific/Engineering' - ], - keywords = 'rendering graphics opengl 3d visualization pbr gltf', - packages = ['pyrender', 'pyrender.platforms'], - setup_requires = requirements, - install_requires = requirements, - extras_require={ - 'dev': dev_requirements, - 'docs': docs_requirements, - }, - include_package_data=True -) diff --git a/spaces/Haitangtangtangtang/AnimeBackgroundGAN/network/Transformer.py b/spaces/Haitangtangtangtang/AnimeBackgroundGAN/network/Transformer.py deleted file mode 100644 index 966c1c3aa654fbeb4650d361b4fc803695de5369..0000000000000000000000000000000000000000 --- a/spaces/Haitangtangtangtang/AnimeBackgroundGAN/network/Transformer.py +++ /dev/null @@ -1,180 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F - - -class Transformer(nn.Module): - def __init__(self): - super(Transformer, self).__init__() - # - self.refpad01_1 = nn.ReflectionPad2d(3) - self.conv01_1 = nn.Conv2d(3, 64, 7) - self.in01_1 = InstanceNormalization(64) - # relu - self.conv02_1 = nn.Conv2d(64, 128, 3, 2, 1) - self.conv02_2 = nn.Conv2d(128, 128, 3, 1, 1) - self.in02_1 = InstanceNormalization(128) - # relu - self.conv03_1 = nn.Conv2d(128, 256, 3, 2, 1) - self.conv03_2 = nn.Conv2d(256, 256, 3, 1, 1) - self.in03_1 = InstanceNormalization(256) - # relu - - ## res block 1 - self.refpad04_1 = nn.ReflectionPad2d(1) - self.conv04_1 = nn.Conv2d(256, 256, 3) - self.in04_1 = InstanceNormalization(256) - # relu - self.refpad04_2 = nn.ReflectionPad2d(1) - self.conv04_2 = nn.Conv2d(256, 256, 3) - self.in04_2 = InstanceNormalization(256) - # + input - - ## res block 2 - self.refpad05_1 = nn.ReflectionPad2d(1) - self.conv05_1 = nn.Conv2d(256, 256, 3) - self.in05_1 = InstanceNormalization(256) - # relu - self.refpad05_2 = nn.ReflectionPad2d(1) - self.conv05_2 = nn.Conv2d(256, 256, 3) - self.in05_2 = InstanceNormalization(256) - # + input - - ## res block 3 - self.refpad06_1 = nn.ReflectionPad2d(1) - self.conv06_1 = nn.Conv2d(256, 256, 3) - self.in06_1 = InstanceNormalization(256) - # relu - self.refpad06_2 = nn.ReflectionPad2d(1) - self.conv06_2 = nn.Conv2d(256, 256, 3) - self.in06_2 = InstanceNormalization(256) - # + input - - ## res block 4 - self.refpad07_1 = nn.ReflectionPad2d(1) - self.conv07_1 = nn.Conv2d(256, 256, 3) - self.in07_1 = InstanceNormalization(256) - # relu - self.refpad07_2 = nn.ReflectionPad2d(1) - self.conv07_2 = nn.Conv2d(256, 256, 3) - self.in07_2 = InstanceNormalization(256) - # + input - - ## res block 5 - self.refpad08_1 = nn.ReflectionPad2d(1) - self.conv08_1 = nn.Conv2d(256, 256, 3) - self.in08_1 = InstanceNormalization(256) - # relu - self.refpad08_2 = nn.ReflectionPad2d(1) - self.conv08_2 = nn.Conv2d(256, 256, 3) - self.in08_2 = InstanceNormalization(256) - # + input - - ## res block 6 - self.refpad09_1 = nn.ReflectionPad2d(1) - self.conv09_1 = nn.Conv2d(256, 256, 3) - self.in09_1 = InstanceNormalization(256) - # relu - self.refpad09_2 = nn.ReflectionPad2d(1) - self.conv09_2 = nn.Conv2d(256, 256, 3) - self.in09_2 = InstanceNormalization(256) - # + input - - ## res block 7 - self.refpad10_1 = nn.ReflectionPad2d(1) - self.conv10_1 = nn.Conv2d(256, 256, 3) - self.in10_1 = InstanceNormalization(256) - # relu - self.refpad10_2 = nn.ReflectionPad2d(1) - self.conv10_2 = nn.Conv2d(256, 256, 3) - self.in10_2 = InstanceNormalization(256) - # + input - - ## res block 8 - self.refpad11_1 = nn.ReflectionPad2d(1) - self.conv11_1 = nn.Conv2d(256, 256, 3) - self.in11_1 = InstanceNormalization(256) - # relu - self.refpad11_2 = nn.ReflectionPad2d(1) - self.conv11_2 = nn.Conv2d(256, 256, 3) - self.in11_2 = InstanceNormalization(256) - # + input - - ##------------------------------------## - self.deconv01_1 = nn.ConvTranspose2d(256, 128, 3, 2, 1, 1) - self.deconv01_2 = nn.Conv2d(128, 128, 3, 1, 1) - self.in12_1 = InstanceNormalization(128) - # relu - self.deconv02_1 = nn.ConvTranspose2d(128, 64, 3, 2, 1, 1) - self.deconv02_2 = nn.Conv2d(64, 64, 3, 1, 1) - self.in13_1 = InstanceNormalization(64) - # relu - self.refpad12_1 = nn.ReflectionPad2d(3) - self.deconv03_1 = nn.Conv2d(64, 3, 7) - # tanh - - def forward(self, x): - y = F.relu(self.in01_1(self.conv01_1(self.refpad01_1(x)))) - y = F.relu(self.in02_1(self.conv02_2(self.conv02_1(y)))) - t04 = F.relu(self.in03_1(self.conv03_2(self.conv03_1(y)))) - - ## - y = F.relu(self.in04_1(self.conv04_1(self.refpad04_1(t04)))) - t05 = self.in04_2(self.conv04_2(self.refpad04_2(y))) + t04 - - y = F.relu(self.in05_1(self.conv05_1(self.refpad05_1(t05)))) - t06 = self.in05_2(self.conv05_2(self.refpad05_2(y))) + t05 - - y = F.relu(self.in06_1(self.conv06_1(self.refpad06_1(t06)))) - t07 = self.in06_2(self.conv06_2(self.refpad06_2(y))) + t06 - - y = F.relu(self.in07_1(self.conv07_1(self.refpad07_1(t07)))) - t08 = self.in07_2(self.conv07_2(self.refpad07_2(y))) + t07 - - y = F.relu(self.in08_1(self.conv08_1(self.refpad08_1(t08)))) - t09 = self.in08_2(self.conv08_2(self.refpad08_2(y))) + t08 - - y = F.relu(self.in09_1(self.conv09_1(self.refpad09_1(t09)))) - t10 = self.in09_2(self.conv09_2(self.refpad09_2(y))) + t09 - - y = F.relu(self.in10_1(self.conv10_1(self.refpad10_1(t10)))) - t11 = self.in10_2(self.conv10_2(self.refpad10_2(y))) + t10 - - y = F.relu(self.in11_1(self.conv11_1(self.refpad11_1(t11)))) - y = self.in11_2(self.conv11_2(self.refpad11_2(y))) + t11 - ## - - y = F.relu(self.in12_1(self.deconv01_2(self.deconv01_1(y)))) - y = F.relu(self.in13_1(self.deconv02_2(self.deconv02_1(y)))) - y = torch.tanh(self.deconv03_1(self.refpad12_1(y))) - - return y - - -class InstanceNormalization(nn.Module): - def __init__(self, dim, eps=1e-9): - super(InstanceNormalization, self).__init__() - self.scale = nn.Parameter(torch.FloatTensor(dim)) - self.shift = nn.Parameter(torch.FloatTensor(dim)) - self.eps = eps - self._reset_parameters() - - def _reset_parameters(self): - self.scale.data.uniform_() - self.shift.data.zero_() - - def __call__(self, x): - n = x.size(2) * x.size(3) - t = x.view(x.size(0), x.size(1), n) - mean = torch.mean(t, 2).unsqueeze(2).unsqueeze(3).expand_as(x) - # Calculate the biased var. torch.var returns unbiased var - var = torch.var(t, 2).unsqueeze(2).unsqueeze(3).expand_as(x) * ( - (n - 1) / float(n) - ) - scale_broadcast = self.scale.unsqueeze(1).unsqueeze(1).unsqueeze(0) - scale_broadcast = scale_broadcast.expand_as(x) - shift_broadcast = self.shift.unsqueeze(1).unsqueeze(1).unsqueeze(0) - shift_broadcast = shift_broadcast.expand_as(x) - out = (x - mean) / torch.sqrt(var + self.eps) - out = out * scale_broadcast + shift_broadcast - return out diff --git a/spaces/HaloMaster/chinesesummary/fengshen/examples/classification/demo_classification_afqmc_roberta_deepspeed.sh b/spaces/HaloMaster/chinesesummary/fengshen/examples/classification/demo_classification_afqmc_roberta_deepspeed.sh deleted file mode 100644 index 48b003940a960454912a62731e5aec3b9046a6df..0000000000000000000000000000000000000000 --- a/spaces/HaloMaster/chinesesummary/fengshen/examples/classification/demo_classification_afqmc_roberta_deepspeed.sh +++ /dev/null @@ -1,90 +0,0 @@ -MODEL_NAME="IDEA-CCNL/Erlangshen-Roberta-110M-NLI" - -TEXTA_NAME=sentence1 -TEXTB_NAME=sentence2 -LABEL_NAME=label -ID_NAME=id - -BATCH_SIZE=32 -VAL_BATCH_SIZE=32 -ZERO_STAGE=1 -config_json="./ds_config.json" - -cat < $config_json -{ - "train_micro_batch_size_per_gpu": $BATCH_SIZE, - "steps_per_print": 1000, - "gradient_clipping": 0.1, - "zero_optimization": { - "stage": ${ZERO_STAGE} - }, - "zero_allow_untested_optimizer": false, - "fp16": { - "enabled": true, - "loss_scale": 0, - "loss_scale_window": 1000, - "hysteresis": 2, - "min_loss_scale": 1 - }, - "activation_checkpointing": { - "partition_activations": false, - "contiguous_memory_optimization": false - }, - "wall_clock_breakdown": false -} -EOT - -export PL_DEEPSPEED_CONFIG_PATH=$config_json - -DATA_ARGS="\ - --dataset_name IDEA-CCNL/AFQMC \ - --train_batchsize $BATCH_SIZE \ - --valid_batchsize $VAL_BATCH_SIZE \ - --max_length 128 \ - --texta_name $TEXTA_NAME \ - --textb_name $TEXTB_NAME \ - --label_name $LABEL_NAME \ - --id_name $ID_NAME \ - " - -MODEL_ARGS="\ - --learning_rate 1e-5 \ - --weight_decay 1e-2 \ - --warmup_ratio 0.01 \ - --num_labels 2 \ - --model_type huggingface-auto \ - " - -MODEL_CHECKPOINT_ARGS="\ - --monitor val_acc \ - --save_top_k 3 \ - --mode max \ - --every_n_train_steps 0 \ - --save_weights_only True \ - --dirpath . \ - --filename model-{epoch:02d}-{val_acc:.4f} \ - " - - -TRAINER_ARGS="\ - --max_epochs 67 \ - --gpus 1 \ - --num_nodes 1 \ - --strategy deepspeed_stage_${ZERO_STAGE} \ - --gradient_clip_val 1.0 \ - --check_val_every_n_epoch 1 \ - --val_check_interval 1.0 \ - --precision 16 \ - --default_root_dir . \ - " - -options=" \ - --pretrained_model_path $MODEL_NAME \ - $DATA_ARGS \ - $MODEL_ARGS \ - $MODEL_CHECKPOINT_ARGS \ - $TRAINER_ARGS \ - " - -python3 finetune_classification.py $options - diff --git a/spaces/HaloMaster/chinesesummary/fengshen/examples/zen2_finetune/fs_zen2_base_iflytek.sh b/spaces/HaloMaster/chinesesummary/fengshen/examples/zen2_finetune/fs_zen2_base_iflytek.sh deleted file mode 100644 index 9171a7c3264a856915fd9147096f097b8ebd43c8..0000000000000000000000000000000000000000 --- a/spaces/HaloMaster/chinesesummary/fengshen/examples/zen2_finetune/fs_zen2_base_iflytek.sh +++ /dev/null @@ -1,93 +0,0 @@ -#!/bin/bash -#SBATCH --job-name=zen2_base_iflytek # create a short name for your job -#SBATCH --nodes=1 # node count -#SBATCH --ntasks=1 # total number of tasks across all nodes -#SBATCH --cpus-per-task=30 # cpu-cores per task (>1 if multi-threaded tasks) -#SBATCH --gres=gpu:1 # number of gpus per node -#SBATCH --mail-type=ALL # send email when job begins, ends or failed etc. -#SBATCH -o %x-%j.log # output and error file name (%x=job name, %j=job id) - - -export CUDA_VISIBLE_DEVICES='0' -export TORCH_EXTENSIONS_DIR=/cognitive_comp/ganruyi/tmp/torch_extendsions - -MODEL_NAME=zen2_base - -TASK=iflytek - -ZERO_STAGE=1 -STRATEGY=deepspeed_stage_${ZERO_STAGE} - -ROOT_DIR=/cognitive_comp/ganruyi/experiments/classification_finetune/${MODEL_NAME}_${TASK} -if [ ! -d ${ROOT_DIR} ];then - mkdir -p ${ROOT_DIR} - echo ${ROOT_DIR} created!!!!!!!!!!!!!! -else - echo ${ROOT_DIR} exist!!!!!!!!!!!!!!! -fi - -DATA_DIR=/cognitive_comp/yangping/data/ChineseCLUE_DATA/${TASK}_public/ -PRETRAINED_MODEL_PATH=/cognitive_comp/ganruyi/hf_models/zen/zh_zen_base_2.0 - -CHECKPOINT_PATH=${ROOT_DIR}/ckpt/ -OUTPUT_PATH=${ROOT_DIR}/predict.json - -DATA_ARGS="\ - --data_dir $DATA_DIR \ - --train_data train.json \ - --valid_data dev.json \ - --test_data test.json \ - --train_batchsize 32 \ - --valid_batchsize 16 \ - --max_seq_length 128 \ - --texta_name sentence \ - --label_name label \ - --id_name id \ - --task_name iflytek \ - " - -MODEL_ARGS="\ - --learning_rate 2e-5 \ - --weight_decay 0.1 \ - --warmup_ratio 0.01 \ - --num_labels 119 \ - " - -MODEL_CHECKPOINT_ARGS="\ - --monitor val_acc \ - --save_top_k 3 \ - --mode max \ - --every_n_train_steps 100 \ - --save_weights_only True \ - --dirpath $CHECKPOINT_PATH \ - --filename model-{epoch:02d}-{val_acc:.4f} \ - " - -TRAINER_ARGS="\ - --max_epochs 7 \ - --gpus 1 \ - --check_val_every_n_epoch 1 \ - --val_check_interval 100 \ - --default_root_dir $ROOT_DIR \ - " - - -options=" \ - --pretrained_model_path $PRETRAINED_MODEL_PATH \ - --vocab_file $PRETRAINED_MODEL_PATH/vocab.txt \ - --do_lower_case \ - --output_save_path $OUTPUT_PATH \ - $DATA_ARGS \ - $MODEL_ARGS \ - $MODEL_CHECKPOINT_ARGS \ - $TRAINER_ARGS \ -" -SCRIPT_PATH=/cognitive_comp/ganruyi/Fengshenbang-LM/fengshen/examples/zen2_finetune/fengshen_sequence_level_ft_task.py -/home/ganruyi/anaconda3/bin/python $SCRIPT_PATH $options - -# SINGULARITY_PATH=/cognitive_comp/ganruyi/pytorch21_06_py3_docker_image_v2.sif -# python3 $SCRIPT_PATH $options -# source activate base -# singularity exec --nv -B /cognitive_comp/:/cognitive_comp/ $SINGULARITY_PATH /home/ganruyi/anaconda3/bin/python $SCRIPT_PATH $options -# /home/ganruyi/anaconda3/bin/python $SCRIPT_PATH $options - diff --git a/spaces/Haokko/AronaTTS/modules.py b/spaces/Haokko/AronaTTS/modules.py deleted file mode 100644 index 9c7fd9cd6eb8b7e0ec0e08957e970744a374a924..0000000000000000000000000000000000000000 --- a/spaces/Haokko/AronaTTS/modules.py +++ /dev/null @@ -1,390 +0,0 @@ -import copy -import math -import numpy as np -import scipy -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm - -import commons -from commons import init_weights, get_padding -from transforms import piecewise_rational_quadratic_transform - - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential( - nn.ReLU(), - nn.Dropout(p_dropout)) - for _ in range(n_layers-1): - self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size ** i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size, - groups=channels, dilation=dilation, padding=padding - )) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0): - super(WN, self).__init__() - assert(kernel_size % 2 == 1) - self.hidden_channels =hidden_channels - self.kernel_size = kernel_size, - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') - - for i in range(n_layers): - dilation = dilation_rate ** i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size, - dilation=dilation, padding=padding) - in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply( - x_in, - g_l, - n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:,:self.hidden_channels,:] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:,self.hidden_channels:,:] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels,1)) - self.logs = nn.Parameter(torch.zeros(channels,1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1,2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels]*2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1,2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - -class ConvFlow(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.) - self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_derivatives = h[..., 2 * self.num_bins:] - - x1, logabsdet = piecewise_rational_quadratic_transform(x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails='linear', - tail_bound=self.tail_bound - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1,2]) - if not reverse: - return x, logdet - else: - return x diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/speech_synthesis/preprocessing/get_feature_manifest.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/speech_synthesis/preprocessing/get_feature_manifest.py deleted file mode 100644 index 516f2cc469af9b417126dea1988698adac41d8ab..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/speech_synthesis/preprocessing/get_feature_manifest.py +++ /dev/null @@ -1,233 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import logging -from pathlib import Path -import shutil -from tempfile import NamedTemporaryFile -from collections import Counter, defaultdict - -import pandas as pd -import torchaudio -from tqdm import tqdm - -from fairseq.data.audio.audio_utils import convert_waveform -from examples.speech_to_text.data_utils import ( - create_zip, - gen_config_yaml, - gen_vocab, - get_zip_manifest, - load_tsv_to_dicts, - save_df_to_tsv -) -from examples.speech_synthesis.data_utils import ( - extract_logmel_spectrogram, extract_pitch, extract_energy, get_global_cmvn, - ipa_phonemize, get_mfa_alignment, get_unit_alignment -) - - -log = logging.getLogger(__name__) - - -def process(args): - assert "train" in args.splits - out_root = Path(args.output_root).absolute() - out_root.mkdir(exist_ok=True) - - print("Fetching data...") - audio_manifest_root = Path(args.audio_manifest_root).absolute() - samples = [] - for s in args.splits: - for e in load_tsv_to_dicts(audio_manifest_root / f"{s}.audio.tsv"): - e["split"] = s - samples.append(e) - sample_ids = [s["id"] for s in samples] - - # Get alignment info - id_to_alignment = None - if args.textgrid_zip is not None: - assert args.id_to_units_tsv is None - id_to_alignment = get_mfa_alignment( - args.textgrid_zip, sample_ids, args.sample_rate, args.hop_length - ) - elif args.id_to_units_tsv is not None: - # assume identical hop length on the unit sequence - id_to_alignment = get_unit_alignment(args.id_to_units_tsv, sample_ids) - - # Extract features and pack features into ZIP - feature_name = "logmelspec80" - zip_path = out_root / f"{feature_name}.zip" - pitch_zip_path = out_root / "pitch.zip" - energy_zip_path = out_root / "energy.zip" - gcmvn_npz_path = out_root / "gcmvn_stats.npz" - if zip_path.exists() and gcmvn_npz_path.exists(): - print(f"{zip_path} and {gcmvn_npz_path} exist.") - else: - feature_root = out_root / feature_name - feature_root.mkdir(exist_ok=True) - pitch_root = out_root / "pitch" - energy_root = out_root / "energy" - if args.add_fastspeech_targets: - pitch_root.mkdir(exist_ok=True) - energy_root.mkdir(exist_ok=True) - print("Extracting Mel spectrogram features...") - for sample in tqdm(samples): - waveform, sample_rate = torchaudio.load(sample["audio"]) - waveform, sample_rate = convert_waveform( - waveform, sample_rate, normalize_volume=args.normalize_volume, - to_sample_rate=args.sample_rate - ) - sample_id = sample["id"] - target_length = None - if id_to_alignment is not None: - a = id_to_alignment[sample_id] - target_length = sum(a.frame_durations) - if a.start_sec is not None and a.end_sec is not None: - start_frame = int(a.start_sec * sample_rate) - end_frame = int(a.end_sec * sample_rate) - waveform = waveform[:, start_frame: end_frame] - extract_logmel_spectrogram( - waveform, sample_rate, feature_root / f"{sample_id}.npy", - win_length=args.win_length, hop_length=args.hop_length, - n_fft=args.n_fft, n_mels=args.n_mels, f_min=args.f_min, - f_max=args.f_max, target_length=target_length - ) - if args.add_fastspeech_targets: - assert id_to_alignment is not None - extract_pitch( - waveform, sample_rate, pitch_root / f"{sample_id}.npy", - hop_length=args.hop_length, log_scale=True, - phoneme_durations=id_to_alignment[sample_id].frame_durations - ) - extract_energy( - waveform, energy_root / f"{sample_id}.npy", - hop_length=args.hop_length, n_fft=args.n_fft, - log_scale=True, - phoneme_durations=id_to_alignment[sample_id].frame_durations - ) - print("ZIPing features...") - create_zip(feature_root, zip_path) - get_global_cmvn(feature_root, gcmvn_npz_path) - shutil.rmtree(feature_root) - if args.add_fastspeech_targets: - create_zip(pitch_root, pitch_zip_path) - shutil.rmtree(pitch_root) - create_zip(energy_root, energy_zip_path) - shutil.rmtree(energy_root) - - print("Fetching ZIP manifest...") - audio_paths, audio_lengths = get_zip_manifest(zip_path) - pitch_paths, pitch_lengths, energy_paths, energy_lengths = [None] * 4 - if args.add_fastspeech_targets: - pitch_paths, pitch_lengths = get_zip_manifest(pitch_zip_path) - energy_paths, energy_lengths = get_zip_manifest(energy_zip_path) - # Generate TSV manifest - print("Generating manifest...") - manifest_by_split = {split: defaultdict(list) for split in args.splits} - for sample in tqdm(samples): - sample_id, split = sample["id"], sample["split"] - normalized_utt = sample["tgt_text"] - if id_to_alignment is not None: - normalized_utt = " ".join(id_to_alignment[sample_id].tokens) - elif args.ipa_vocab: - normalized_utt = ipa_phonemize( - normalized_utt, lang=args.lang, use_g2p=args.use_g2p - ) - manifest_by_split[split]["id"].append(sample_id) - manifest_by_split[split]["audio"].append(audio_paths[sample_id]) - manifest_by_split[split]["n_frames"].append(audio_lengths[sample_id]) - manifest_by_split[split]["tgt_text"].append(normalized_utt) - manifest_by_split[split]["speaker"].append(sample["speaker"]) - manifest_by_split[split]["src_text"].append(sample["src_text"]) - if args.add_fastspeech_targets: - assert id_to_alignment is not None - duration = " ".join( - str(d) for d in id_to_alignment[sample_id].frame_durations - ) - manifest_by_split[split]["duration"].append(duration) - manifest_by_split[split]["pitch"].append(pitch_paths[sample_id]) - manifest_by_split[split]["energy"].append(energy_paths[sample_id]) - for split in args.splits: - save_df_to_tsv( - pd.DataFrame.from_dict(manifest_by_split[split]), - out_root / f"{split}.tsv" - ) - # Generate vocab - vocab_name, spm_filename = None, None - if id_to_alignment is not None or args.ipa_vocab: - vocab = Counter() - for t in manifest_by_split["train"]["tgt_text"]: - vocab.update(t.split(" ")) - vocab_name = "vocab.txt" - with open(out_root / vocab_name, "w") as f: - for s, c in vocab.most_common(): - f.write(f"{s} {c}\n") - else: - spm_filename_prefix = "spm_char" - spm_filename = f"{spm_filename_prefix}.model" - with NamedTemporaryFile(mode="w") as f: - for t in manifest_by_split["train"]["tgt_text"]: - f.write(t + "\n") - f.flush() # needed to ensure gen_vocab sees dumped text - gen_vocab(Path(f.name), out_root / spm_filename_prefix, "char") - # Generate speaker list - speakers = sorted({sample["speaker"] for sample in samples}) - speakers_path = out_root / "speakers.txt" - with open(speakers_path, "w") as f: - for speaker in speakers: - f.write(f"{speaker}\n") - # Generate config YAML - win_len_t = args.win_length / args.sample_rate - hop_len_t = args.hop_length / args.sample_rate - extra = { - "sample_rate": args.sample_rate, - "features": { - "type": "spectrogram+melscale+log", - "eps": 1e-2, "n_mels": args.n_mels, "n_fft": args.n_fft, - "window_fn": "hann", "win_length": args.win_length, - "hop_length": args.hop_length, "sample_rate": args.sample_rate, - "win_len_t": win_len_t, "hop_len_t": hop_len_t, - "f_min": args.f_min, "f_max": args.f_max, - "n_stft": args.n_fft // 2 + 1 - } - } - if len(speakers) > 1: - extra["speaker_set_filename"] = "speakers.txt" - gen_config_yaml( - out_root, spm_filename=spm_filename, vocab_name=vocab_name, - audio_root=out_root.as_posix(), input_channels=None, - input_feat_per_channel=None, specaugment_policy=None, - cmvn_type="global", gcmvn_path=gcmvn_npz_path, extra=extra - ) - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument("--audio-manifest-root", "-m", required=True, type=str) - parser.add_argument("--output-root", "-o", required=True, type=str) - parser.add_argument("--splits", "-s", type=str, nargs="+", - default=["train", "dev", "test"]) - parser.add_argument("--ipa-vocab", action="store_true") - parser.add_argument("--use-g2p", action="store_true") - parser.add_argument("--lang", type=str, default="en-us") - parser.add_argument("--win-length", type=int, default=1024) - parser.add_argument("--hop-length", type=int, default=256) - parser.add_argument("--n-fft", type=int, default=1024) - parser.add_argument("--n-mels", type=int, default=80) - parser.add_argument("--f-min", type=int, default=20) - parser.add_argument("--f-max", type=int, default=8000) - parser.add_argument("--sample-rate", type=int, default=22050) - parser.add_argument("--normalize-volume", "-n", action="store_true") - parser.add_argument("--textgrid-zip", type=str, default=None) - parser.add_argument("--id-to-units-tsv", type=str, default=None) - parser.add_argument("--add-fastspeech-targets", action="store_true") - args = parser.parse_args() - - process(args) - - -if __name__ == "__main__": - main() diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/wav2vec/wav2vec_manifest.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/wav2vec/wav2vec_manifest.py deleted file mode 100644 index 9b8aa180e88d9ee98bdca7089aed5046ec0d9cb9..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/wav2vec/wav2vec_manifest.py +++ /dev/null @@ -1,87 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -""" -Data pre-processing: build vocabularies and binarize training data. -""" - -import argparse -import glob -import os -import random - -import soundfile - - -def get_parser(): - parser = argparse.ArgumentParser() - parser.add_argument( - "root", metavar="DIR", help="root directory containing flac files to index" - ) - parser.add_argument( - "--valid-percent", - default=0.01, - type=float, - metavar="D", - help="percentage of data to use as validation set (between 0 and 1)", - ) - parser.add_argument( - "--dest", default=".", type=str, metavar="DIR", help="output directory" - ) - parser.add_argument( - "--ext", default="flac", type=str, metavar="EXT", help="extension to look for" - ) - parser.add_argument("--seed", default=42, type=int, metavar="N", help="random seed") - parser.add_argument( - "--path-must-contain", - default=None, - type=str, - metavar="FRAG", - help="if set, path must contain this substring for a file to be included in the manifest", - ) - return parser - - -def main(args): - assert args.valid_percent >= 0 and args.valid_percent <= 1.0 - - if not os.path.exists(args.dest): - os.makedirs(args.dest) - - dir_path = os.path.realpath(args.root) - search_path = os.path.join(dir_path, "**/*." + args.ext) - rand = random.Random(args.seed) - - valid_f = ( - open(os.path.join(args.dest, "valid.tsv"), "w") - if args.valid_percent > 0 - else None - ) - - with open(os.path.join(args.dest, "train.tsv"), "w") as train_f: - print(dir_path, file=train_f) - - if valid_f is not None: - print(dir_path, file=valid_f) - - for fname in glob.iglob(search_path, recursive=True): - file_path = os.path.realpath(fname) - - if args.path_must_contain and args.path_must_contain not in file_path: - continue - - frames = soundfile.info(fname).frames - dest = train_f if rand.random() > args.valid_percent else valid_f - print( - "{}\t{}".format(os.path.relpath(file_path, dir_path), frames), file=dest - ) - if valid_f is not None: - valid_f.close() - - -if __name__ == "__main__": - parser = get_parser() - args = parser.parse_args() - main(args) diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/tests/distributed/utils.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/tests/distributed/utils.py deleted file mode 100644 index c8040392a8e27eb4c3a74032c702643a91d11a3e..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/tests/distributed/utils.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import functools -import tempfile - -import torch - - -def spawn_and_init(fn, world_size, args=None): - if args is None: - args = () - with tempfile.NamedTemporaryFile(delete=False) as tmp_file: - torch.multiprocessing.spawn( - fn=functools.partial(init_and_run, fn, args), - args=(world_size, tmp_file.name,), - nprocs=world_size, - join=True, - ) - - -def distributed_init(rank, world_size, tmp_file): - torch.distributed.init_process_group( - backend="nccl", - init_method="file://{}".format(tmp_file), - world_size=world_size, - rank=rank, - ) - torch.cuda.set_device(rank) - - -def init_and_run(fn, args, rank, world_size, tmp_file): - distributed_init(rank, world_size, tmp_file) - group = torch.distributed.new_group() - fn(rank, group, *args) - - -def objects_are_equal(a, b) -> bool: - if type(a) is not type(b): - return False - if isinstance(a, dict): - if set(a.keys()) != set(b.keys()): - return False - for k in a.keys(): - if not objects_are_equal(a[k], b[k]): - return False - return True - elif isinstance(a, (list, tuple, set)): - if len(a) != len(b): - return False - return all(objects_are_equal(x, y) for x, y in zip(a, b)) - elif torch.is_tensor(a): - return ( - a.size() == b.size() - and a.dtype == b.dtype - and a.device == b.device - and torch.all(a == b) - ) - else: - return a == b diff --git a/spaces/Harveenchadha/Vakyansh-Hindi-TTS/ttsv/src/glow_tts/t2s_fastapi.py b/spaces/Harveenchadha/Vakyansh-Hindi-TTS/ttsv/src/glow_tts/t2s_fastapi.py deleted file mode 100644 index e034fc01a4a5bcd54b365a49dad2e907b57504a1..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/Vakyansh-Hindi-TTS/ttsv/src/glow_tts/t2s_fastapi.py +++ /dev/null @@ -1,63 +0,0 @@ -from starlette.responses import StreamingResponse -from texttospeech import MelToWav, TextToMel -from typing import Optional -from pydantic import BaseModel -from fastapi import FastAPI, HTTPException -import uvicorn -import base64 - -app = FastAPI() - - -class TextJson(BaseModel): - text: str - lang: Optional[str] = "hi" - gender: Optional[str] = "male" - - -glow_hi_male = TextToMel(glow_model_dir="", device="") -glow_hi_female = TextToMel(glow_model_dir="", device="") -hifi_hi = MelToWav(hifi_model_dir="", device="") - - -available_choice = { - "hi_male": [glow_hi_male, hifi_hi], - "hi_female": [glow_hi_female, hifi_hi], -} - - -@app.post("/TTS/") -async def tts(input: TextJson): - text = input.text - lang = input.lang - gender = input.gender - - choice = lang + "_" + gender - if choice in available_choice.keys(): - t2s = available_choice[choice] - else: - raise HTTPException( - status_code=400, detail={"error": "Requested model not found"} - ) - - if text: - mel = t2s[0].generate_mel(text) - data, sr = t2s[1].generate_wav(mel) - t2s.save_audio("out.wav", data, sr) - else: - raise HTTPException(status_code=400, detail={"error": "No text"}) - - ## to return outpur as a file - # audio = open('out.wav', mode='rb') - # return StreamingResponse(audio, media_type="audio/wav") - - with open("out.wav", "rb") as audio_file: - encoded_bytes = base64.b64encode(audio_file.read()) - encoded_string = encoded_bytes.decode() - return {"encoding": "base64", "data": encoded_string, "sr": sr} - - -if __name__ == "__main__": - uvicorn.run( - "t2s_fastapi:app", host="127.0.0.1", port=5000, log_level="info", reload=True - ) diff --git a/spaces/Harveenchadha/hindi-speech-recognition-vakyansh-wav2vec2/app.py b/spaces/Harveenchadha/hindi-speech-recognition-vakyansh-wav2vec2/app.py deleted file mode 100644 index cd94b9ef0a51d8003812f21034c8e2d904f38106..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/hindi-speech-recognition-vakyansh-wav2vec2/app.py +++ /dev/null @@ -1,65 +0,0 @@ -import soundfile as sf -import torch -from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor,Wav2Vec2ProcessorWithLM -import gradio as gr -import sox -import subprocess - - -def read_file_and_process(wav_file): - filename = wav_file.split('.')[0] - filename_16k = filename + "16k.wav" - resampler(wav_file, filename_16k) - speech, _ = sf.read(filename_16k) - inputs = processor(speech, sampling_rate=16_000, return_tensors="pt", padding=True) - - return inputs - - -def resampler(input_file_path, output_file_path): - command = ( - f"ffmpeg -hide_banner -loglevel panic -i {input_file_path} -ar 16000 -ac 1 -bits_per_raw_sample 16 -vn " - f"{output_file_path}" - ) - subprocess.call(command, shell=True) - - -def parse_transcription_with_lm(logits): - result = processor_with_LM.batch_decode(logits.cpu().numpy()) - text = result.text - transcription = text[0].replace('','') - return transcription - -def parse_transcription(logits): - predicted_ids = torch.argmax(logits, dim=-1) - transcription = processor.decode(predicted_ids[0], skip_special_tokens=True) - return transcription - -def parse(wav_file, applyLM): - input_values = read_file_and_process(wav_file) - with torch.no_grad(): - logits = model(**input_values).logits - - if applyLM: - return parse_transcription_with_lm(logits) - else: - return parse_transcription(logits) - - -model_id = "Harveenchadha/vakyansh-wav2vec2-hindi-him-4200" -processor = Wav2Vec2Processor.from_pretrained(model_id) -processor_with_LM = Wav2Vec2ProcessorWithLM.from_pretrained(model_id) -model = Wav2Vec2ForCTC.from_pretrained(model_id) - - -input_ = gr.Audio(source="microphone", type="filepath") -txtbox = gr.Textbox( - label="Output from model will appear here:", - lines=5 - ) -chkbox = gr.Checkbox(label="Apply LM", value=False) - - -gr.Interface(parse, inputs = [input_, chkbox], outputs=txtbox, - streaming=True, interactive=True, - analytics_enabled=False, show_tips=False, enable_queue=True).launch(inline=False); \ No newline at end of file diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/clib/libbase/balanced_assignment.cpp b/spaces/ICML2022/OFA/fairseq/fairseq/clib/libbase/balanced_assignment.cpp deleted file mode 100644 index 1a5a1061f3892be5a17e49192f744c39e0d395e8..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/fairseq/clib/libbase/balanced_assignment.cpp +++ /dev/null @@ -1,109 +0,0 @@ -/** - * Copyright 2017-present, Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under the license found in the - * LICENSE file in the root directory of this source tree. - */ - -/* -C++ code for solving the linear assignment problem. -Based on the Auction Algorithm from -https://dspace.mit.edu/bitstream/handle/1721.1/3265/P-2108-26912652.pdf and the -implementation from: https://github.com/bkj/auction-lap Adapted to be more -efficient when each worker is looking for k jobs instead of 1. -*/ -#include -#include -using namespace torch::indexing; -torch::Tensor balanced_assignment(torch::Tensor job_and_worker_to_score) { - int max_iterations = 100; - torch::Tensor epsilon = - (job_and_worker_to_score.max() - job_and_worker_to_score.min()) / 50; - epsilon.clamp_min_(1e-04); - torch::Tensor worker_and_job_to_score = - job_and_worker_to_score.detach().transpose(0, 1).contiguous(); - int num_workers = worker_and_job_to_score.size(0); - int num_jobs = worker_and_job_to_score.size(1); - auto device = worker_and_job_to_score.device(); - int jobs_per_worker = num_jobs / num_workers; - torch::Tensor value = worker_and_job_to_score.clone(); - int counter = 0; - torch::Tensor max_value = worker_and_job_to_score.max(); - - torch::Tensor bid_indices; - torch::Tensor cost = worker_and_job_to_score.new_zeros({1, num_jobs}); - torch::Tensor bids = - worker_and_job_to_score.new_empty({num_workers, num_jobs}); - torch::Tensor bid_increments = - worker_and_job_to_score.new_empty({num_workers, jobs_per_worker}); - torch::Tensor top_values = - worker_and_job_to_score.new_empty({num_workers, jobs_per_worker + 1}); - torch::Tensor high_bids = worker_and_job_to_score.new_empty({num_jobs}); - - torch::Tensor top_index = top_values.to(torch::kLong); - torch::Tensor high_bidders = top_index.new_empty({num_jobs}); - torch::Tensor have_bids = high_bidders.to(torch::kBool); - torch::Tensor jobs_indices = - torch::arange({num_jobs}, torch::dtype(torch::kLong).device(device)); - torch::Tensor true_tensor = - torch::ones({1}, torch::dtype(torch::kBool).device(device)); - - while (true) { - bids.zero_(); - torch::topk_out(top_values, top_index, value, jobs_per_worker + 1, 1); - - // Each worker bids the difference in value between that job and the k+1th - // job - torch::sub_out( - bid_increments, - top_values.index({Slice(None, None), Slice(0, jobs_per_worker)}), - top_values.index({Slice(None, None), jobs_per_worker}).unsqueeze(1)); - - bid_increments.add_(epsilon); - bids.scatter_( - 1, - top_index.index({Slice(None, None), Slice(0, jobs_per_worker)}), - bid_increments); - - if (counter < max_iterations && counter > 0) { - // Put in a minimal bid to retain items from the last round if no-one else - // bids for them this round - bids.view(-1).index_put_({bid_indices}, epsilon); - } - - // Find the highest bidding worker per job - torch::max_out(high_bids, high_bidders, bids, 0); - torch::gt_out(have_bids, high_bids, 0); - - if (have_bids.all().item()) { - // All jobs were bid for - break; - } - - // Make popular items more expensive - cost.add_(high_bids); - torch::sub_out(value, worker_and_job_to_score, cost); - - bid_indices = ((high_bidders * num_jobs) + jobs_indices).index({have_bids}); - - if (counter < max_iterations) { - // Make sure that this item will be in the winning worker's top-k next - // time. - value.view(-1).index_put_({bid_indices}, max_value); - } else { - // Suboptimal approximation that converges quickly from current solution - value.view(-1).index_put_( - {bid_indices}, worker_and_job_to_score.view(-1).index({bid_indices})); - } - - counter += 1; - } - - return top_index.index({Slice(None, None), Slice(0, jobs_per_worker)}) - .reshape(-1); -} - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { - m.def("balanced_assignment", &balanced_assignment, "Balanced Assignment"); -} diff --git a/spaces/ICML2022/resefa/third_party/stylegan3_official_ops/grid_sample_gradfix.py b/spaces/ICML2022/resefa/third_party/stylegan3_official_ops/grid_sample_gradfix.py deleted file mode 100644 index c3d9cd591a13e146eeeedddbef28871d7c3a0742..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/resefa/third_party/stylegan3_official_ops/grid_sample_gradfix.py +++ /dev/null @@ -1,92 +0,0 @@ -# python3.7 - -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Custom replacement for `torch.nn.functional.grid_sample`. - -This is useful for differentiable augmentation. This customized operator -supports arbitrarily high order gradients between the input and output. Only -works on 2D images and assumes `mode=bilinear`, `padding_mode=zeros`, and -`align_corners=False`. - -Please refer to https://github.com/NVlabs/stylegan3 -""" - -# pylint: disable=redefined-builtin -# pylint: disable=arguments-differ -# pylint: disable=protected-access -# pylint: disable=line-too-long -# pylint: disable=missing-function-docstring - -import torch - -#---------------------------------------------------------------------------- - -enabled = True # Enable the custom op by setting this to true. - -#---------------------------------------------------------------------------- - -def grid_sample(input, grid, impl='cuda'): - if impl == 'cuda' and _should_use_custom_op(): - return _GridSample2dForward.apply(input, grid) - return torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False) - -#---------------------------------------------------------------------------- - -def _should_use_custom_op(): - return enabled - -#---------------------------------------------------------------------------- - -class _GridSample2dForward(torch.autograd.Function): - @staticmethod - def forward(ctx, input, grid): - assert input.ndim == 4 - assert grid.ndim == 4 - output = torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False) - ctx.save_for_backward(input, grid) - return output - - @staticmethod - def backward(ctx, grad_output): - input, grid = ctx.saved_tensors - grad_input, grad_grid = _GridSample2dBackward.apply(grad_output, input, grid) - return grad_input, grad_grid - -#---------------------------------------------------------------------------- - -class _GridSample2dBackward(torch.autograd.Function): - @staticmethod - def forward(ctx, grad_output, input, grid): - op = torch._C._jit_get_operation('aten::grid_sampler_2d_backward') - grad_input, grad_grid = op(grad_output, input, grid, 0, 0, False) - ctx.save_for_backward(grid) - return grad_input, grad_grid - - @staticmethod - def backward(ctx, grad2_grad_input, grad2_grad_grid): - _ = grad2_grad_grid # unused - grid, = ctx.saved_tensors - grad2_grad_output = None - grad2_input = None - grad2_grid = None - - if ctx.needs_input_grad[0]: - grad2_grad_output = _GridSample2dForward.apply(grad2_grad_input, grid) - - assert not ctx.needs_input_grad[2] - return grad2_grad_output, grad2_input, grad2_grid - -#---------------------------------------------------------------------------- - -# pylint: enable=redefined-builtin -# pylint: enable=arguments-differ -# pylint: enable=protected-access -# pylint: enable=line-too-long -# pylint: enable=missing-function-docstring diff --git a/spaces/Ikaros521/so-vits-svc-4.0-ikaros/inference/__init__.py b/spaces/Ikaros521/so-vits-svc-4.0-ikaros/inference/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Intae/deepfake/training/datasets/classifier_dataset.py b/spaces/Intae/deepfake/training/datasets/classifier_dataset.py deleted file mode 100644 index 28bcb6c6418ea0d0da05366704af5d0945a0f953..0000000000000000000000000000000000000000 --- a/spaces/Intae/deepfake/training/datasets/classifier_dataset.py +++ /dev/null @@ -1,378 +0,0 @@ -import math -import os -import random -import sys -import traceback - -import cv2 -import numpy as np -import pandas as pd -import skimage.draw -from albumentations import ImageCompression, OneOf, GaussianBlur, Blur -from albumentations.augmentations.functional import image_compression, rot90 -from albumentations.pytorch.functional import img_to_tensor -from scipy.ndimage import binary_erosion, binary_dilation -from skimage import measure -from torch.utils.data import Dataset -import dlib - -from training.datasets.validation_set import PUBLIC_SET - - -def prepare_bit_masks(mask): - h, w = mask.shape - mid_w = w // 2 - mid_h = w // 2 - masks = [] - ones = np.ones_like(mask) - ones[:mid_h] = 0 - masks.append(ones) - ones = np.ones_like(mask) - ones[mid_h:] = 0 - masks.append(ones) - ones = np.ones_like(mask) - ones[:, :mid_w] = 0 - masks.append(ones) - ones = np.ones_like(mask) - ones[:, mid_w:] = 0 - masks.append(ones) - ones = np.ones_like(mask) - ones[:mid_h, :mid_w] = 0 - ones[mid_h:, mid_w:] = 0 - masks.append(ones) - ones = np.ones_like(mask) - ones[:mid_h, mid_w:] = 0 - ones[mid_h:, :mid_w] = 0 - masks.append(ones) - return masks - - -detector = dlib.get_frontal_face_detector() -predictor = dlib.shape_predictor('libs/shape_predictor_68_face_landmarks.dat') - - -def blackout_convex_hull(img): - try: - rect = detector(img)[0] - sp = predictor(img, rect) - landmarks = np.array([[p.x, p.y] for p in sp.parts()]) - outline = landmarks[[*range(17), *range(26, 16, -1)]] - Y, X = skimage.draw.polygon(outline[:, 1], outline[:, 0]) - cropped_img = np.zeros(img.shape[:2], dtype=np.uint8) - cropped_img[Y, X] = 1 - # if random.random() > 0.5: - # img[cropped_img == 0] = 0 - # #leave only face - # return img - - y, x = measure.centroid(cropped_img) - y = int(y) - x = int(x) - first = random.random() > 0.5 - if random.random() > 0.5: - if first: - cropped_img[:y, :] = 0 - else: - cropped_img[y:, :] = 0 - else: - if first: - cropped_img[:, :x] = 0 - else: - cropped_img[:, x:] = 0 - - img[cropped_img > 0] = 0 - except Exception as e: - pass - - -def dist(p1, p2): - return math.sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2) - - -def remove_eyes(image, landmarks): - image = image.copy() - (x1, y1), (x2, y2) = landmarks[:2] - mask = np.zeros_like(image[..., 0]) - line = cv2.line(mask, (x1, y1), (x2, y2), color=(1), thickness=2) - w = dist((x1, y1), (x2, y2)) - dilation = int(w // 4) - line = binary_dilation(line, iterations=dilation) - image[line, :] = 0 - return image - - -def remove_nose(image, landmarks): - image = image.copy() - (x1, y1), (x2, y2) = landmarks[:2] - x3, y3 = landmarks[2] - mask = np.zeros_like(image[..., 0]) - x4 = int((x1 + x2) / 2) - y4 = int((y1 + y2) / 2) - line = cv2.line(mask, (x3, y3), (x4, y4), color=(1), thickness=2) - w = dist((x1, y1), (x2, y2)) - dilation = int(w // 4) - line = binary_dilation(line, iterations=dilation) - image[line, :] = 0 - return image - - -def remove_mouth(image, landmarks): - image = image.copy() - (x1, y1), (x2, y2) = landmarks[-2:] - mask = np.zeros_like(image[..., 0]) - line = cv2.line(mask, (x1, y1), (x2, y2), color=(1), thickness=2) - w = dist((x1, y1), (x2, y2)) - dilation = int(w // 3) - line = binary_dilation(line, iterations=dilation) - image[line, :] = 0 - return image - - -def remove_landmark(image, landmarks): - if random.random() > 0.5: - image = remove_eyes(image, landmarks) - elif random.random() > 0.5: - image = remove_mouth(image, landmarks) - elif random.random() > 0.5: - image = remove_nose(image, landmarks) - return image - - -def change_padding(image, part=5): - h, w = image.shape[:2] - # original padding was done with 1/3 from each side, too much - pad_h = int(((3 / 5) * h) / part) - pad_w = int(((3 / 5) * w) / part) - image = image[h // 5 - pad_h:-h // 5 + pad_h, w // 5 - pad_w:-w // 5 + pad_w] - return image - - -def blackout_random(image, mask, label): - binary_mask = mask > 0.4 * 255 - h, w = binary_mask.shape[:2] - - tries = 50 - current_try = 1 - while current_try < tries: - first = random.random() < 0.5 - if random.random() < 0.5: - pivot = random.randint(h // 2 - h // 5, h // 2 + h // 5) - bitmap_msk = np.ones_like(binary_mask) - if first: - bitmap_msk[:pivot, :] = 0 - else: - bitmap_msk[pivot:, :] = 0 - else: - pivot = random.randint(w // 2 - w // 5, w // 2 + w // 5) - bitmap_msk = np.ones_like(binary_mask) - if first: - bitmap_msk[:, :pivot] = 0 - else: - bitmap_msk[:, pivot:] = 0 - - if label < 0.5 and np.count_nonzero(image * np.expand_dims(bitmap_msk, axis=-1)) / 3 > (h * w) / 5 \ - or np.count_nonzero(binary_mask * bitmap_msk) > 40: - mask *= bitmap_msk - image *= np.expand_dims(bitmap_msk, axis=-1) - break - current_try += 1 - return image - - -def blend_original(img): - img = img.copy() - h, w = img.shape[:2] - rect = detector(img) - if len(rect) == 0: - return img - else: - rect = rect[0] - sp = predictor(img, rect) - landmarks = np.array([[p.x, p.y] for p in sp.parts()]) - outline = landmarks[[*range(17), *range(26, 16, -1)]] - Y, X = skimage.draw.polygon(outline[:, 1], outline[:, 0]) - raw_mask = np.zeros(img.shape[:2], dtype=np.uint8) - raw_mask[Y, X] = 1 - face = img * np.expand_dims(raw_mask, -1) - - # add warping - h1 = random.randint(h - h // 2, h + h // 2) - w1 = random.randint(w - w // 2, w + w // 2) - while abs(h1 - h) < h // 3 and abs(w1 - w) < w // 3: - h1 = random.randint(h - h // 2, h + h // 2) - w1 = random.randint(w - w // 2, w + w // 2) - face = cv2.resize(face, (w1, h1), interpolation=random.choice([cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC])) - face = cv2.resize(face, (w, h), interpolation=random.choice([cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC])) - - raw_mask = binary_erosion(raw_mask, iterations=random.randint(4, 10)) - img[raw_mask, :] = face[raw_mask, :] - if random.random() < 0.2: - img = OneOf([GaussianBlur(), Blur()], p=0.5)(image=img)["image"] - # image compression - if random.random() < 0.5: - img = ImageCompression(quality_lower=40, quality_upper=95)(image=img)["image"] - return img - - -class DeepFakeClassifierDataset(Dataset): - - def __init__(self, - data_path="/mnt/sota/datasets/deepfake", - fold=0, - label_smoothing=0.01, - padding_part=3, - hardcore=True, - crops_dir="crops", - folds_csv="folds.csv", - normalize={"mean": [0.485, 0.456, 0.406], - "std": [0.229, 0.224, 0.225]}, - rotation=False, - mode="train", - reduce_val=True, - oversample_real=True, - transforms=None - ): - super().__init__() - self.data_root = data_path - self.fold = fold - self.folds_csv = folds_csv - self.mode = mode - self.rotation = rotation - self.padding_part = padding_part - self.hardcore = hardcore - self.crops_dir = crops_dir - self.label_smoothing = label_smoothing - self.normalize = normalize - self.transforms = transforms - self.df = pd.read_csv(self.folds_csv) - self.oversample_real = oversample_real - self.reduce_val = reduce_val - - def __getitem__(self, index: int): - - while True: - video, img_file, label, ori_video, frame, fold = self.data[index] - try: - if self.mode == "train": - label = np.clip(label, self.label_smoothing, 1 - self.label_smoothing) - img_path = os.path.join(self.data_root, self.crops_dir, video, img_file) - image = cv2.imread(img_path, cv2.IMREAD_COLOR) - image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) - mask = np.zeros(image.shape[:2], dtype=np.uint8) - diff_path = os.path.join(self.data_root, "diffs", video, img_file[:-4] + "_diff.png") - try: - msk = cv2.imread(diff_path, cv2.IMREAD_GRAYSCALE) - if msk is not None: - mask = msk - except: - print("not found mask", diff_path) - pass - if self.mode == "train" and self.hardcore and not self.rotation: - landmark_path = os.path.join(self.data_root, "landmarks", ori_video, img_file[:-4] + ".npy") - if os.path.exists(landmark_path) and random.random() < 0.7: - landmarks = np.load(landmark_path) - image = remove_landmark(image, landmarks) - elif random.random() < 0.2: - blackout_convex_hull(image) - elif random.random() < 0.1: - binary_mask = mask > 0.4 * 255 - masks = prepare_bit_masks((binary_mask * 1).astype(np.uint8)) - tries = 6 - current_try = 1 - while current_try < tries: - bitmap_msk = random.choice(masks) - if label < 0.5 or np.count_nonzero(mask * bitmap_msk) > 20: - mask *= bitmap_msk - image *= np.expand_dims(bitmap_msk, axis=-1) - break - current_try += 1 - if self.mode == "train" and self.padding_part > 3: - image = change_padding(image, self.padding_part) - valid_label = np.count_nonzero(mask[mask > 20]) > 32 or label < 0.5 - valid_label = 1 if valid_label else 0 - rotation = 0 - if self.transforms: - data = self.transforms(image=image, mask=mask) - image = data["image"] - mask = data["mask"] - if self.mode == "train" and self.hardcore and self.rotation: - # landmark_path = os.path.join(self.data_root, "landmarks", ori_video, img_file[:-4] + ".npy") - dropout = 0.8 if label > 0.5 else 0.6 - if self.rotation: - dropout *= 0.7 - elif random.random() < dropout: - blackout_random(image, mask, label) - - # - # os.makedirs("../images", exist_ok=True) - # cv2.imwrite(os.path.join("../images", video+ "_" + str(1 if label > 0.5 else 0) + "_"+img_file), image[...,::-1]) - - if self.mode == "train" and self.rotation: - rotation = random.randint(0, 3) - image = rot90(image, rotation) - - image = img_to_tensor(image, self.normalize) - return {"image": image, "labels": np.array((label,)), "img_name": os.path.join(video, img_file), - "valid": valid_label, "rotations": rotation} - except Exception as e: - traceback.print_exc(file=sys.stdout) - print("Broken image", os.path.join(self.data_root, self.crops_dir, video, img_file)) - index = random.randint(0, len(self.data) - 1) - - def random_blackout_landmark(self, image, mask, landmarks): - x, y = random.choice(landmarks) - first = random.random() > 0.5 - # crop half face either vertically or horizontally - if random.random() > 0.5: - # width - if first: - image[:, :x] = 0 - mask[:, :x] = 0 - else: - image[:, x:] = 0 - mask[:, x:] = 0 - else: - # height - if first: - image[:y, :] = 0 - mask[:y, :] = 0 - else: - image[y:, :] = 0 - mask[y:, :] = 0 - - def reset(self, epoch, seed): - self.data = self._prepare_data(epoch, seed) - - def __len__(self) -> int: - return len(self.data) - - def _prepare_data(self, epoch, seed): - df = self.df - if self.mode == "train": - rows = df[df["fold"] != self.fold] - else: - rows = df[df["fold"] == self.fold] - seed = (epoch + 1) * seed - if self.oversample_real: - rows = self._oversample(rows, seed) - if self.mode == "val" and self.reduce_val: - # every 2nd frame, to speed up validation - rows = rows[rows["frame"] % 20 == 0] - # another option is to use public validation set - #rows = rows[rows["video"].isin(PUBLIC_SET)] - - print( - "real {} fakes {} mode {}".format(len(rows[rows["label"] == 0]), len(rows[rows["label"] == 1]), self.mode)) - data = rows.values - - np.random.seed(seed) - np.random.shuffle(data) - return data - - def _oversample(self, rows: pd.DataFrame, seed): - real = rows[rows["label"] == 0] - fakes = rows[rows["label"] == 1] - num_real = real["video"].count() - if self.mode == "train": - fakes = fakes.sample(n=num_real, replace=False, random_state=seed) - return pd.concat([real, fakes]) diff --git a/spaces/Intel/Stable-Diffusion-Side-by-Side/app.py b/spaces/Intel/Stable-Diffusion-Side-by-Side/app.py deleted file mode 100644 index c6396abaf17156b87f77eb6e527b63e89c446df1..0000000000000000000000000000000000000000 --- a/spaces/Intel/Stable-Diffusion-Side-by-Side/app.py +++ /dev/null @@ -1,303 +0,0 @@ -import os -import gradio as gr -import numpy as np -import random -import torch -import subprocess -import time -import requests -import json -import threading - -import base64 -from io import BytesIO -from PIL import Image -from huggingface_hub import login - - -myip_spr = os.environ["myip_spr"] -myip_clx = os.environ["myip_clx"] -myport = os.environ["myport"] - -SPR = f"http://{myip_spr}:{myport}" -CLX = f"http://{myip_clx}:{myport}" - - -print('=='*20) -print(os.system("hostname -i")) -print(SPR) -print(CLX) - -prompt_examples_list = [ - ['A cascading waterfall tumbles down moss-covered rocks, surrounded by a lush and vibrant forest.'], - ['In a serene garden, delicate cherry blossoms fall like pink snowflakes.'], - ['A breathtaking mountain range towers above a picturesque valley, with a winding river reflecting the surrounding beauty.'], - ['A serene beach scene with turquoise waters, palm trees swaying in the breeze, and a radiant sunset painting the sky in hues of orange and pink.'], - ['After the rain, sunlight breaks through the clouds, illuminating the verdant fields.'] - ] -CN_prompt_examples_list = [ - ['瀑布从长满苔藓的岩石上奔流而下,周围是一片茂密而充满活力的森林。'], - ['在一个宁静的花园里,精致的樱花像粉色的雪花一样飘落。'], - ['壮丽的山脉高耸在风景如画的山谷之上,一条蜿蜒的河流映衬着周围的美景。'], - ['一个宁静的海滩场景,湛蓝的海水,微风中摇曳的棕榈树,夺目的日落将天空染成橙色和粉红色的色调。'], - ['雨后,阳光穿过云层,照亮了青翠的田野。'] -] - -def update_language(value): - if value == "zh-CN": - return [gr.update(visible=False), gr.update(visible=True)] - else: - return [gr.update(visible=True), gr.update(visible=False)] - -def url_requests(url, data): - resp = requests.post(url, data=json.dumps(data)) - img_str = json.loads(resp.text)["img_str"] - location = json.loads(resp.text)["ip"] - - img_byte = base64.b64decode(img_str) - img_io = BytesIO(img_byte) # convert image to file-like object - img = Image.open(img_io) # img is now PIL Image object - - return img, location - -def img2img_generate(url, source_img, prompt, steps=25, strength=0.75, seed=42, guidance_scale=7.5, hidden=""): - - if hidden != os.environ["front_token"]: - return None - - print('=*'*20) - print(type(source_img)) - print("prompt: ", prompt) - buffered = BytesIO() - source_img.save(buffered, format="JPEG") - img_b64 = base64.b64encode(buffered.getvalue()) - - data = {"source_img": img_b64.decode(), "prompt": prompt, "steps": steps, - "guidance_scale": guidance_scale, "seed": seed, "strength": strength, - "token": os.environ["access_token"]} - - start_time = time.time() - img, location = url_requests(url, data) - print("*="*20) - print("location: ", location) - print("cost: ", time.time() - start_time) - - return img - -def toggle_content(): - if toggle_content.collapsed: - toggle_content.collapsed = False - return "Content expanded" - else: - toggle_content.collapsed = True - return "Content collapsed" - -def txt2img_example_input(value): - print('6/12/2023', value) - return value - -def txt2img_generate(url, prompt, steps=25, seed=42, guidance_scale=7.5, hidden=""): - - if hidden != os.environ["front_token"]: - return None - - print("prompt: ", prompt) - print("steps: ", steps) - print("url: ", url) - data = {"prompt": prompt, - "steps": steps, "guidance_scale": guidance_scale, "seed": seed, - "token": os.environ["access_token"]} - start_time = time.time() - img, location = url_requests(url, data) - - print("*="*20) - print("location: ", location) - print("cost: ", time.time() - start_time) - - return img - -title = """ -# Stable Diffusion Inference Acceleration Comparison -""" -CN_title = """ -# Stable Diffusion 推理加速比较 -""" - -subtitle = """ -# between 4th Gen and 3rd Gen Intel Xeon Scalable Processor -""" -CN_subtitle = """ -## 第四代和第三代英特尔至强可扩展处理器 -""" - -md = """ -Have fun and try your own prompts and see a up to 9x performance acceleration on the new 4th Gen Intel Xeon using **Intel Extension for Transformers**. You may also want to try creating your own Stable Diffusion with few-shot fine-tuning. Please refer to our blog and code available in **Intel Neural Compressor** and **Hugging Face Diffusers**. -""" - -CN_md = """ -请尽情体验这些功能!利用**Intel Extension for Transformers**和新一代英特尔至强可扩展处理器可获得高达9倍的性能提升。您还可以使用少样本微调的方式来创建属于自己的稳定扩散模型。请参考我们的博客代码,这些资源可在**Intel Neural Compressor****Hugging Face Diffusers**的GitHub上找到。 -""" - -legal = """ -Performance varies by use, configuration and other factors. Learn more at www.Intel.com/PerformanceIndex. Performance results are based on testing as of dates shown in configurations and may not reflect all publicly available updates. See backup for configuration details. No product or component can be absolutely secure. -© Intel Corporation. Intel, the Intel logo, and other Intel marks are trademarks of Intel Corporation or its subsidiaries. Other names and brands may be claimed as the property of others. -""" - -CN_legal = """ -性能因使用、配置和其他因素而异。想要了解更多信息,请访问www.Intel.com/PerformanceIndex。 -- 性能结果基于所示配置的测试,可能不反映所有公开可用的更新。 -- 有关配置详细信息,请参考备份文件。没有任何产品/组件绝对安全。 -- © 英特尔公司。英特尔、英特尔标识和其他英特尔商标是英特尔公司或其子公司的商标。其他名称和品牌可能归他人所有。""" - -details = """ -- 4th Gen Intel Xeon Scalable Processor Inference. Test by Intel on 10/06/2023. Ubuntu 22.04.1 LTS, Intel Extension for Transformers(1.1.dev154+g448cc17e), Transformers 4.28.1, Diffusers 0.12.1, oneDNN v2.7.4. -- 3rd Gen Intel Xeon Scalable Processor Inference: Test by Intel on 10/06/2023. Ubuntu 22.04.1 LTS, PyTorch Nightly build (2.0.0.dev20230105+cpu), Transformers 4.25.1, Diffusers 0.11.1, oneDNN v2.7.2. -""" - -CN_details = """ -- 英特尔第四代至强可扩展处理器推理。由英特尔于2023年6月10日测试。Ubuntu 22.04.1 LTS,英特尔Transformer扩展(1.1.dev154+g448cc17e),Transformer 4.28.1,Diffusers 0.12.1,oneDNN v2.7.4。 -- 英特尔第三代至强可扩展处理器推理:由英特尔于2023年6月10日测试。Ubuntu 22.04.1 LTS,PyTorch Nightly构建(2.0.0.dev20230105+cpu),Transformer 4.25.1,Diffusers 0.11.1,oneDNN v2.7.2。 -""" - -# warining = """ -# ⚠ Upgrading, service temporarily paused. -# """ - -css = ''' - .instruction{position: absolute; top: 0;right: 0;margin-top: 0px !important} - .arrow{position: absolute;top: 0;right: -110px;margin-top: -8px !important} - #component-4, #component-3, #component-10{min-height: 0} - .duplicate-button img{margin: 0} - #img_1, #img_2, #img_3, #img_4{height:15rem} - #mdStyle{font-size: 0.7rem} - #titleCenter {text-align:center} - -''' - -random_seed = random.randint(0, 2147483647) - -with gr.Blocks(css=css) as demo: - # gr.Markdown(warining, elem_id="warning") - with gr.Box(visible=False) as zh: - gr.Markdown(CN_title, elem_id='titleCenter') - gr.Markdown(CN_subtitle, elem_id='titleCenter') - gr.Markdown(CN_md) - - with gr.Tab("文字转图片"): - with gr.Row() as text_to_image: - with gr.Column(): - prompt = gr.inputs.Textbox(label='提示词', default='a photo of an astronaut riding a horse on mars') - inference_steps = gr.inputs.Slider(1, 100, label='采样步数 - 步数越长质量越高 ', default=20, step=1) - seed = gr.inputs.Slider(0, 2147483647, label='随机种子', default=random_seed, step=1) - guidance_scale = gr.inputs.Slider(1.0, 20.0, label='引导程度 - 提示词对结果的影响程度', default=7.5, step=0.1) - hidden = gr.Textbox(label='hidden', value=os.environ["front_token"], visible=False) - txt2img_button = gr.Button("生成图片", variant="primary") - url_SPR_txt = gr.Textbox(label='url_SPR_txt', value=SPR, visible=False) - url_CLX_txt = gr.Textbox(label='url_CLX_txt', value=CLX, visible=False) - - with gr.Column(): - result_image_1 = gr.Image(label="第四代英特尔至强可扩展处理器 (SPR)", elem_id="img_1") - result_image_2 = gr.Image(label="第三代英特尔至强可扩展处理器 (ICX)", elem_id="img_2") - - txt2img_input = gr.Textbox(visible=False) - - gr.Examples( - examples=prompt_examples_list, - inputs=txt2img_input, - outputs=prompt, - fn=txt2img_example_input, - cache_examples=True, - label="示例" - ) - - with gr.Tab("图片转图片"): - with gr.Row() as image_to_image: - with gr.Column(): - source_img = gr.Image(source="upload", type="pil", value="https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg") - prompt_2 = gr.inputs.Textbox(label='提示词', default='A fantasy landscape, trending on artstation') - inference_steps_2 = gr.inputs.Slider(1, 100, label='采样步数 - 步数越长质量越高', default=20, step=1) - seed_2 = gr.inputs.Slider(0, 2147483647, label='随机种子', default=random_seed, step=1) - guidance_scale_2 = gr.inputs.Slider(1.0, 20.0, label='引导程度 - 提示词对结果的影响程度', default=7.5, step=0.1) - strength = gr.inputs.Slider(0.0, 1.0, label='强度级别 - 强度增加时噪声也变大', default=0.75, step=0.01) - hidden_2 = gr.Textbox(label='hidden', value=os.environ["front_token"], visible=False) - img2img_button = gr.Button("生成图片", variant="primary") - url_SPR = gr.Textbox(label='url_SPR', value=SPR, visible=False) - url_CLX = gr.Textbox(label='url_CLX', value=CLX, visible=False) - - with gr.Column(): - result_image_3 = gr.Image(label="第四代英特尔至强可扩展处理器 (SPR)", elem_id="img_3") - result_image_4 = gr.Image(label="第三代英特尔至强可扩展处理器 (ICX)", elem_id="img_4") - with gr.Accordion("附加信息" , open=False) as area_crazy_fn: - gr.Markdown("**测试配置详情:**", elem_id='mdStyle') - gr.Markdown(CN_details, elem_id='mdStyle') - - gr.Markdown("**注意事项和免责声明:**", elem_id='mdStyle') - gr.Markdown(CN_legal, elem_id='mdStyle') - - with gr.Box(visible=False) as Eng: - gr.Markdown(title) - gr.Markdown(subtitle) - gr.Markdown(md) - - with gr.Tab("Text-to-Image"): - with gr.Row() as text_to_image: - with gr.Column(): - prompt = gr.inputs.Textbox(label='Prompt', default='a photo of an astronaut riding a horse on mars') - inference_steps = gr.inputs.Slider(1, 100, label='Inference Steps - increase the steps for better quality (e.g., avoiding black image) ', default=20, step=1) - seed = gr.inputs.Slider(0, 2147483647, label='Seed', default=random_seed, step=1) - guidance_scale = gr.inputs.Slider(1.0, 20.0, label='Guidance Scale - how much the prompt will influence the results', default=7.5, step=0.1) - hidden = gr.Textbox(label='hidden', value=os.environ["front_token"], visible=False) - txt2img_button = gr.Button("Generate Image", variant="primary") - url_SPR_txt = gr.Textbox(label='url_SPR_txt', value=SPR, visible=False) - url_CLX_txt = gr.Textbox(label='url_CLX_txt', value=CLX, visible=False) - - with gr.Column(): - result_image_1 = gr.Image(label="4th Gen Intel Xeon Scalable Processors (SPR)", elem_id="img_1") - result_image_2 = gr.Image(label="3rd Gen Intel Xeon Scalable Processors (ICX)", elem_id="img_2") - - txt2img_input = gr.Textbox(visible=False) - - gr.Examples( - examples=prompt_examples_list, - inputs=txt2img_input, - outputs=prompt, - fn=txt2img_example_input, - cache_examples=True, - ) - - with gr.Tab("Image-to-Image text-guided generation"): - with gr.Row() as image_to_image: - with gr.Column(): - source_img = gr.Image(source="upload", type="pil", value="https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg") - prompt_2 = gr.inputs.Textbox(label='Prompt', default='A fantasy landscape, trending on artstation') - inference_steps_2 = gr.inputs.Slider(1, 100, label='Inference Steps - increase the steps for better quality (e.g., avoiding black image) ', default=20, step=1) - seed_2 = gr.inputs.Slider(0, 2147483647, label='Seed', default=random_seed, step=1) - guidance_scale_2 = gr.inputs.Slider(1.0, 20.0, label='Guidance Scale - how much the prompt will influence the results', default=7.5, step=0.1) - strength = gr.inputs.Slider(0.0, 1.0, label='Strength - adding more noise to it the larger the strength', default=0.75, step=0.01) - hidden_2 = gr.Textbox(label='hidden', value=os.environ["front_token"], visible=False) - img2img_button = gr.Button("Generate Image", variant="primary") - url_SPR = gr.Textbox(label='url_SPR', value=SPR, visible=False) - url_CLX = gr.Textbox(label='url_CLX', value=CLX, visible=False) - - with gr.Column(): - result_image_3 = gr.Image(label="4th Gen Intel Xeon Scalable Processors (SPR)", elem_id="img_3") - result_image_4 = gr.Image(label="3rd Gen Intel Xeon Scalable Processors (ICX)", elem_id="img_4") - with gr.Accordion("Additional Info", open=False) as area_crazy_fn: - gr.Markdown("**Test Configuration Details:**", elem_id='mdStyle') - gr.Markdown(details, elem_id='mdStyle') - - gr.Markdown("**Notices and Disclaimers:**", elem_id='mdStyle') - gr.Markdown(legal, elem_id='mdStyle') - - - txt2img_button.click(fn=txt2img_generate, inputs=[url_SPR_txt, prompt, inference_steps, seed, guidance_scale, hidden], outputs=result_image_1, queue=False) - txt2img_button.click(fn=txt2img_generate, inputs=[url_CLX_txt, prompt, inference_steps, seed, guidance_scale, hidden], outputs=result_image_2, queue=False) - img2img_button.click(fn=img2img_generate, inputs=[url_SPR, source_img, prompt_2, inference_steps_2, strength, seed_2, guidance_scale_2, hidden_2], outputs=result_image_3, queue=False) - img2img_button.click(fn=img2img_generate, inputs=[url_CLX, source_img, prompt_2, inference_steps_2, strength, seed_2, guidance_scale_2, hidden_2], outputs=result_image_4, queue=False) - - dt = gr.Textbox(label="Current language", visible=False) - dt.change(update_language, inputs=dt, outputs=[Eng, zh]) - demo.load(None, inputs=None, outputs=dt, _js="() => navigator.language") - - -demo.queue(default_enabled=False, api_open=False, max_size=5).launch(debug=True, show_api=False) diff --git a/spaces/IntelligenzaArtificiale/code-generation/utils/resources.md b/spaces/IntelligenzaArtificiale/code-generation/utils/resources.md deleted file mode 100644 index c3e0149a07bf2f6340f275eecfaa2811831a1314..0000000000000000000000000000000000000000 --- a/spaces/IntelligenzaArtificiale/code-generation/utils/resources.md +++ /dev/null @@ -1,6 +0,0 @@ -- Natural Language Processing with Transformers [Tunstall et al., 2022](https://www.oreilly.com/library/view/natural-language-processing/9781098103231/). -- Evaluating large language models trained on code [Chen et al., 2021](https://arxiv.org/abs/2107.03374). -- Competition-Level Code Generation with AlphaCode [Li et al., 2022](https://arxiv.org/abs/2203.07814). -- InCoder: A Generative Model for Code Infilling and Synthesis [Fried et al., 2022](https://arxiv.org/abs/2204.05999). -- A Conversational Paradigm for Program Synthesis [Nijkamp et al. 2022](https://arxiv.org/abs/2203.13474). -- A systematic evaluation of large language models of code [Xu et al. 2022](https://arxiv.org/abs/2202.13169). diff --git a/spaces/Jackflack09/diffuse-custom/diffusers/pipelines/repaint/__init__.py b/spaces/Jackflack09/diffuse-custom/diffusers/pipelines/repaint/__init__.py deleted file mode 100644 index 16bc86d1cedf6243fb92f7ba331b5a6188133298..0000000000000000000000000000000000000000 --- a/spaces/Jackflack09/diffuse-custom/diffusers/pipelines/repaint/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .pipeline_repaint import RePaintPipeline diff --git a/spaces/JairoDanielMT/CCPlatanos/app.py b/spaces/JairoDanielMT/CCPlatanos/app.py deleted file mode 100644 index 21f52b77e4a93ad4c7a054e4b691b2520ea5f608..0000000000000000000000000000000000000000 --- a/spaces/JairoDanielMT/CCPlatanos/app.py +++ /dev/null @@ -1,67 +0,0 @@ -import torch -import torch.nn as nn -import torchvision.transforms as transforms -import cv2 -import numpy as np -from PIL import Image -import gradio as gr - -# Preprocesamiento de imágenes -transform = transforms.Compose([ - transforms.Resize((512, 512)), - transforms.ToTensor(), - transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) -]) - -class CNN(nn.Module): - def __init__(self): - super(CNN, self).__init__() - self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1) - self.relu = nn.ReLU() - self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2) - self.fc1 = nn.Linear(32 * 256 * 256, 128) - self.fc2 = nn.Linear(128, 4) # 4 clases: baja, regular, excelente, mala - - def forward(self, x): - x = self.conv1(x) - x = self.relu(x) - x = self.maxpool(x) - x = x.view(x.size(0), -1) - x = self.fc1(x) - x = self.relu(x) - x = self.fc2(x) - return x - -# Configurar dispositivo en CPU -device = torch.device('cpu') - -# Cargar el modelo previamente guardado -model = CNN().to(device) -model.load_state_dict(torch.load('calidadplatano.pth', map_location=device)) -model.eval() - -# Función para clasificar la imagen de entrada -def classify_image(input_image): - input_image = cv2.cvtColor(input_image, cv2.COLOR_BGR2RGB) - input_image = Image.fromarray(input_image) - input_image = transform(input_image).unsqueeze(0).to(device) - output = model(input_image) - probabilities = torch.softmax(output, dim=1).squeeze().detach().cpu().numpy() - class_labels = ['baja', 'regular', 'excelente', 'mala'] - predicted_class = class_labels[np.argmax(probabilities)] - confidence = probabilities[np.argmax(probabilities)] - return predicted_class, confidence - -# Definir la interfaz gráfica de usuario -inputs = gr.inputs.Image() -outputs = gr.outputs.Label(num_top_classes=1) -examples=[["imagenesDeEjemplos/1.webp"],["imagenesDeEjemplos/2.webp"],["imagenesDeEjemplos/3.webp"],["imagenesDeEjemplos/4.webp"],["imagenesDeEjemplos/5.webp"]] - -def process_image(input_image): - predicted_class, confidence = classify_image(input_image) - return predicted_class + " (" + str(round(confidence * 100, 2)) + "%)" - -title = "Clasificación de calidad de plátanos" -description = "Carga una imagen de plátano y obtén la clasificación de calidad." -iface = gr.Interface(fn=process_image, inputs=inputs, outputs=outputs, title=title, description=description,examples=examples) -iface.launch() diff --git a/spaces/Jeff2323/ai-comic-factory/src/app/interface/progress/progress-bar.tsx b/spaces/Jeff2323/ai-comic-factory/src/app/interface/progress/progress-bar.tsx deleted file mode 100644 index 0e926d05419cecc6d4a4964d53a8dad6e07a4102..0000000000000000000000000000000000000000 --- a/spaces/Jeff2323/ai-comic-factory/src/app/interface/progress/progress-bar.tsx +++ /dev/null @@ -1,57 +0,0 @@ -"use client" - -import { CircularProgressbar, buildStyles } from "react-circular-progressbar" -import "react-circular-progressbar/dist/styles.css" - -export function ProgressBar ({ - className, - progressPercentage, - text -}: { - className?: string - progressPercentage?: number - text?: string -}) { - return ( -
    - -
    - ) -} \ No newline at end of file diff --git a/spaces/KenjieDec/GPEN/retinaface/utils/box_utils.py b/spaces/KenjieDec/GPEN/retinaface/utils/box_utils.py deleted file mode 100644 index 06cc37b509fe792eb9ac3f0db5693dd0299162b4..0000000000000000000000000000000000000000 --- a/spaces/KenjieDec/GPEN/retinaface/utils/box_utils.py +++ /dev/null @@ -1,330 +0,0 @@ -import torch -import numpy as np - - -def point_form(boxes): - """ Convert prior_boxes to (xmin, ymin, xmax, ymax) - representation for comparison to point form ground truth data. - Args: - boxes: (tensor) center-size default boxes from priorbox layers. - Return: - boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes. - """ - return torch.cat((boxes[:, :2] - boxes[:, 2:]/2, # xmin, ymin - boxes[:, :2] + boxes[:, 2:]/2), 1) # xmax, ymax - - -def center_size(boxes): - """ Convert prior_boxes to (cx, cy, w, h) - representation for comparison to center-size form ground truth data. - Args: - boxes: (tensor) point_form boxes - Return: - boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes. - """ - return torch.cat((boxes[:, 2:] + boxes[:, :2])/2, # cx, cy - boxes[:, 2:] - boxes[:, :2], 1) # w, h - - -def intersect(box_a, box_b): - """ We resize both tensors to [A,B,2] without new malloc: - [A,2] -> [A,1,2] -> [A,B,2] - [B,2] -> [1,B,2] -> [A,B,2] - Then we compute the area of intersect between box_a and box_b. - Args: - box_a: (tensor) bounding boxes, Shape: [A,4]. - box_b: (tensor) bounding boxes, Shape: [B,4]. - Return: - (tensor) intersection area, Shape: [A,B]. - """ - A = box_a.size(0) - B = box_b.size(0) - max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2), - box_b[:, 2:].unsqueeze(0).expand(A, B, 2)) - min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2), - box_b[:, :2].unsqueeze(0).expand(A, B, 2)) - inter = torch.clamp((max_xy - min_xy), min=0) - return inter[:, :, 0] * inter[:, :, 1] - - -def jaccard(box_a, box_b): - """Compute the jaccard overlap of two sets of boxes. The jaccard overlap - ==simply the intersection over union of two boxes. Here we operate on - ground truth boxes and default boxes. - E.g.: - A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B) - Args: - box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4] - box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4] - Return: - jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)] - """ - inter = intersect(box_a, box_b) - area_a = ((box_a[:, 2]-box_a[:, 0]) * - (box_a[:, 3]-box_a[:, 1])).unsqueeze(1).expand_as(inter) # [A,B] - area_b = ((box_b[:, 2]-box_b[:, 0]) * - (box_b[:, 3]-box_b[:, 1])).unsqueeze(0).expand_as(inter) # [A,B] - union = area_a + area_b - inter - return inter / union # [A,B] - - -def matrix_iou(a, b): - """ - return iou of a and b, numpy version for data augenmentation - """ - lt = np.maximum(a[:, np.newaxis, :2], b[:, :2]) - rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:]) - - area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2) - area_a = np.prod(a[:, 2:] - a[:, :2], axis=1) - area_b = np.prod(b[:, 2:] - b[:, :2], axis=1) - return area_i / (area_a[:, np.newaxis] + area_b - area_i) - - -def matrix_iof(a, b): - """ - return iof of a and b, numpy version for data augenmentation - """ - lt = np.maximum(a[:, np.newaxis, :2], b[:, :2]) - rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:]) - - area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2) - area_a = np.prod(a[:, 2:] - a[:, :2], axis=1) - return area_i / np.maximum(area_a[:, np.newaxis], 1) - - -def match(threshold, truths, priors, variances, labels, landms, loc_t, conf_t, landm_t, idx): - """Match each prior box with the ground truth box of the highest jaccard - overlap, encode the bounding boxes, then return the matched indices - corresponding to both confidence and location preds. - Args: - threshold: (float) The overlap threshold used when mathing boxes. - truths: (tensor) Ground truth boxes, Shape: [num_obj, 4]. - priors: (tensor) Prior boxes from priorbox layers, Shape: [n_priors,4]. - variances: (tensor) Variances corresponding to each prior coord, - Shape: [num_priors, 4]. - labels: (tensor) All the class labels for the image, Shape: [num_obj]. - landms: (tensor) Ground truth landms, Shape [num_obj, 10]. - loc_t: (tensor) Tensor to be filled w/ endcoded location targets. - conf_t: (tensor) Tensor to be filled w/ matched indices for conf preds. - landm_t: (tensor) Tensor to be filled w/ endcoded landm targets. - idx: (int) current batch index - Return: - The matched indices corresponding to 1)location 2)confidence 3)landm preds. - """ - # jaccard index - overlaps = jaccard( - truths, - point_form(priors) - ) - # (Bipartite Matching) - # [1,num_objects] best prior for each ground truth - best_prior_overlap, best_prior_idx = overlaps.max(1, keepdim=True) - - # ignore hard gt - valid_gt_idx = best_prior_overlap[:, 0] >= 0.2 - best_prior_idx_filter = best_prior_idx[valid_gt_idx, :] - if best_prior_idx_filter.shape[0] <= 0: - loc_t[idx] = 0 - conf_t[idx] = 0 - return - - # [1,num_priors] best ground truth for each prior - best_truth_overlap, best_truth_idx = overlaps.max(0, keepdim=True) - best_truth_idx.squeeze_(0) - best_truth_overlap.squeeze_(0) - best_prior_idx.squeeze_(1) - best_prior_idx_filter.squeeze_(1) - best_prior_overlap.squeeze_(1) - best_truth_overlap.index_fill_(0, best_prior_idx_filter, 2) # ensure best prior - # TODO refactor: index best_prior_idx with long tensor - # ensure every gt matches with its prior of max overlap - for j in range(best_prior_idx.size(0)): # 判别此anchor是预测哪一个boxes - best_truth_idx[best_prior_idx[j]] = j - matches = truths[best_truth_idx] # Shape: [num_priors,4] 此处为每一个anchor对应的bbox取出来 - conf = labels[best_truth_idx] # Shape: [num_priors] 此处为每一个anchor对应的label取出来 - conf[best_truth_overlap < threshold] = 0 # label as background overlap<0.35的全部作为负样本 - loc = encode(matches, priors, variances) - - matches_landm = landms[best_truth_idx] - landm = encode_landm(matches_landm, priors, variances) - loc_t[idx] = loc # [num_priors,4] encoded offsets to learn - conf_t[idx] = conf # [num_priors] top class label for each prior - landm_t[idx] = landm - - -def encode(matched, priors, variances): - """Encode the variances from the priorbox layers into the ground truth boxes - we have matched (based on jaccard overlap) with the prior boxes. - Args: - matched: (tensor) Coords of ground truth for each prior in point-form - Shape: [num_priors, 4]. - priors: (tensor) Prior boxes in center-offset form - Shape: [num_priors,4]. - variances: (list[float]) Variances of priorboxes - Return: - encoded boxes (tensor), Shape: [num_priors, 4] - """ - - # dist b/t match center and prior's center - g_cxcy = (matched[:, :2] + matched[:, 2:])/2 - priors[:, :2] - # encode variance - g_cxcy /= (variances[0] * priors[:, 2:]) - # match wh / prior wh - g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:] - g_wh = torch.log(g_wh) / variances[1] - # return target for smooth_l1_loss - return torch.cat([g_cxcy, g_wh], 1) # [num_priors,4] - -def encode_landm(matched, priors, variances): - """Encode the variances from the priorbox layers into the ground truth boxes - we have matched (based on jaccard overlap) with the prior boxes. - Args: - matched: (tensor) Coords of ground truth for each prior in point-form - Shape: [num_priors, 10]. - priors: (tensor) Prior boxes in center-offset form - Shape: [num_priors,4]. - variances: (list[float]) Variances of priorboxes - Return: - encoded landm (tensor), Shape: [num_priors, 10] - """ - - # dist b/t match center and prior's center - matched = torch.reshape(matched, (matched.size(0), 5, 2)) - priors_cx = priors[:, 0].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2) - priors_cy = priors[:, 1].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2) - priors_w = priors[:, 2].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2) - priors_h = priors[:, 3].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2) - priors = torch.cat([priors_cx, priors_cy, priors_w, priors_h], dim=2) - g_cxcy = matched[:, :, :2] - priors[:, :, :2] - # encode variance - g_cxcy /= (variances[0] * priors[:, :, 2:]) - # g_cxcy /= priors[:, :, 2:] - g_cxcy = g_cxcy.reshape(g_cxcy.size(0), -1) - # return target for smooth_l1_loss - return g_cxcy - - -# Adapted from https://github.com/Hakuyume/chainer-ssd -def decode(loc, priors, variances): - """Decode locations from predictions using priors to undo - the encoding we did for offset regression at train time. - Args: - loc (tensor): location predictions for loc layers, - Shape: [num_priors,4] - priors (tensor): Prior boxes in center-offset form. - Shape: [num_priors,4]. - variances: (list[float]) Variances of priorboxes - Return: - decoded bounding box predictions - """ - - boxes = torch.cat(( - priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:], - priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1) - boxes[:, :2] -= boxes[:, 2:] / 2 - boxes[:, 2:] += boxes[:, :2] - return boxes - -def decode_landm(pre, priors, variances): - """Decode landm from predictions using priors to undo - the encoding we did for offset regression at train time. - Args: - pre (tensor): landm predictions for loc layers, - Shape: [num_priors,10] - priors (tensor): Prior boxes in center-offset form. - Shape: [num_priors,4]. - variances: (list[float]) Variances of priorboxes - Return: - decoded landm predictions - """ - landms = torch.cat((priors[:, :2] + pre[:, :2] * variances[0] * priors[:, 2:], - priors[:, :2] + pre[:, 2:4] * variances[0] * priors[:, 2:], - priors[:, :2] + pre[:, 4:6] * variances[0] * priors[:, 2:], - priors[:, :2] + pre[:, 6:8] * variances[0] * priors[:, 2:], - priors[:, :2] + pre[:, 8:10] * variances[0] * priors[:, 2:], - ), dim=1) - return landms - - -def log_sum_exp(x): - """Utility function for computing log_sum_exp while determining - This will be used to determine unaveraged confidence loss across - all examples in a batch. - Args: - x (Variable(tensor)): conf_preds from conf layers - """ - x_max = x.data.max() - return torch.log(torch.sum(torch.exp(x-x_max), 1, keepdim=True)) + x_max - - -# Original author: Francisco Massa: -# https://github.com/fmassa/object-detection.torch -# Ported to PyTorch by Max deGroot (02/01/2017) -def nms(boxes, scores, overlap=0.5, top_k=200): - """Apply non-maximum suppression at test time to avoid detecting too many - overlapping bounding boxes for a given object. - Args: - boxes: (tensor) The location preds for the img, Shape: [num_priors,4]. - scores: (tensor) The class predscores for the img, Shape:[num_priors]. - overlap: (float) The overlap thresh for suppressing unnecessary boxes. - top_k: (int) The Maximum number of box preds to consider. - Return: - The indices of the kept boxes with respect to num_priors. - """ - - keep = torch.Tensor(scores.size(0)).fill_(0).long() - if boxes.numel() == 0: - return keep - x1 = boxes[:, 0] - y1 = boxes[:, 1] - x2 = boxes[:, 2] - y2 = boxes[:, 3] - area = torch.mul(x2 - x1, y2 - y1) - v, idx = scores.sort(0) # sort in ascending order - # I = I[v >= 0.01] - idx = idx[-top_k:] # indices of the top-k largest vals - xx1 = boxes.new() - yy1 = boxes.new() - xx2 = boxes.new() - yy2 = boxes.new() - w = boxes.new() - h = boxes.new() - - # keep = torch.Tensor() - count = 0 - while idx.numel() > 0: - i = idx[-1] # index of current largest val - # keep.append(i) - keep[count] = i - count += 1 - if idx.size(0) == 1: - break - idx = idx[:-1] # remove kept element from view - # load bboxes of next highest vals - torch.index_select(x1, 0, idx, out=xx1) - torch.index_select(y1, 0, idx, out=yy1) - torch.index_select(x2, 0, idx, out=xx2) - torch.index_select(y2, 0, idx, out=yy2) - # store element-wise max with next highest score - xx1 = torch.clamp(xx1, min=x1[i]) - yy1 = torch.clamp(yy1, min=y1[i]) - xx2 = torch.clamp(xx2, max=x2[i]) - yy2 = torch.clamp(yy2, max=y2[i]) - w.resize_as_(xx2) - h.resize_as_(yy2) - w = xx2 - xx1 - h = yy2 - yy1 - # check sizes of xx1 and xx2.. after each iteration - w = torch.clamp(w, min=0.0) - h = torch.clamp(h, min=0.0) - inter = w*h - # IoU = i / (area(a) + area(b) - i) - rem_areas = torch.index_select(area, 0, idx) # load remaining areas) - union = (rem_areas - inter) + area[i] - IoU = inter/union # store result in iou - # keep only elements with an IoU <= overlap - idx = idx[IoU.le(overlap)] - return keep, count - - diff --git a/spaces/Kimata/Sanskrit-TTS/indic_nlp_library/contrib/correct_moses_tokenizer.py b/spaces/Kimata/Sanskrit-TTS/indic_nlp_library/contrib/correct_moses_tokenizer.py deleted file mode 100644 index 9c656d4d69fd16638dbfa4a4435920bea50a6fe5..0000000000000000000000000000000000000000 --- a/spaces/Kimata/Sanskrit-TTS/indic_nlp_library/contrib/correct_moses_tokenizer.py +++ /dev/null @@ -1,29 +0,0 @@ -import sys -from indicnlp import langinfo -from indicnlp import loader - -if __name__ == '__main__': - """ - This script corrects the incorrect tokenization done by Moses tokenizer. - The Moses tokenizer splits on nukta and halant characters - Usage: python correct_moses_tokenizer.py - """ - - loader.load() - - infname=sys.argv[1] - outfname=sys.argv[2] - lang=sys.argv[3] - - halant_char=langinfo.offset_to_char(langinfo.HALANTA_OFFSET,lang) - nukta_char=langinfo.offset_to_char(langinfo.NUKTA_OFFSET,lang) - - with open(infname,'r',encoding='utf-8') as infile, \ - open(outfname,'w',encoding='utf-8') as outfile: - for line in infile: - outfile.write( - line.replace( - ' {} '.format(halant_char), halant_char).replace( - ' {} '.format(nukta_char), nukta_char).replace( - ' {}{}'.format(nukta_char,halant_char),'{}{}'.format(nukta_char,halant_char)) - ) diff --git a/spaces/Kororinpa/Amadeus_Project/losses.py b/spaces/Kororinpa/Amadeus_Project/losses.py deleted file mode 100644 index fb22a0e834dd87edaa37bb8190eee2c3c7abe0d5..0000000000000000000000000000000000000000 --- a/spaces/Kororinpa/Amadeus_Project/losses.py +++ /dev/null @@ -1,61 +0,0 @@ -import torch -from torch.nn import functional as F - -import commons - - -def feature_loss(fmap_r, fmap_g): - loss = 0 - for dr, dg in zip(fmap_r, fmap_g): - for rl, gl in zip(dr, dg): - rl = rl.float().detach() - gl = gl.float() - loss += torch.mean(torch.abs(rl - gl)) - - return loss * 2 - - -def discriminator_loss(disc_real_outputs, disc_generated_outputs): - loss = 0 - r_losses = [] - g_losses = [] - for dr, dg in zip(disc_real_outputs, disc_generated_outputs): - dr = dr.float() - dg = dg.float() - r_loss = torch.mean((1-dr)**2) - g_loss = torch.mean(dg**2) - loss += (r_loss + g_loss) - r_losses.append(r_loss.item()) - g_losses.append(g_loss.item()) - - return loss, r_losses, g_losses - - -def generator_loss(disc_outputs): - loss = 0 - gen_losses = [] - for dg in disc_outputs: - dg = dg.float() - l = torch.mean((1-dg)**2) - gen_losses.append(l) - loss += l - - return loss, gen_losses - - -def kl_loss(z_p, logs_q, m_p, logs_p, z_mask): - """ - z_p, logs_q: [b, h, t_t] - m_p, logs_p: [b, h, t_t] - """ - z_p = z_p.float() - logs_q = logs_q.float() - m_p = m_p.float() - logs_p = logs_p.float() - z_mask = z_mask.float() - - kl = logs_p - logs_q - 0.5 - kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p) - kl = torch.sum(kl * z_mask) - l = kl / torch.sum(z_mask) - return l diff --git a/spaces/KyanChen/FunSR/scripts/resize.py b/spaces/KyanChen/FunSR/scripts/resize.py deleted file mode 100644 index be41cb906c7139533c001b8a1b91b9f2b2b3a1b4..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/FunSR/scripts/resize.py +++ /dev/null @@ -1,18 +0,0 @@ -import os -from PIL import Image -from tqdm import tqdm - -inp = r'H:\DataSet\SceneCls\UCMerced_LandUse\UCMerced_LandUse\Images' - -for size in [256, 128, 64, 32]: - if size == 256: - inp = './data1024x1024' - else: - inp = './256' - print(size) - os.mkdir(str(size)) - filenames = os.listdir(inp) - for filename in tqdm(filenames): - Image.open(os.path.join(inp, filename)) \ - .resize((size, size), Image.BICUBIC) \ - .save(os.path.join('.', str(size), filename.split('.')[0] + '.png')) diff --git a/spaces/Laihiujin/OneFormer/oneformer/modeling/pixel_decoder/ops/functions/__init__.py b/spaces/Laihiujin/OneFormer/oneformer/modeling/pixel_decoder/ops/functions/__init__.py deleted file mode 100644 index 2b06b5ac538b63bdb9a6c82e4635b95bb5491d5b..0000000000000000000000000000000000000000 --- a/spaces/Laihiujin/OneFormer/oneformer/modeling/pixel_decoder/ops/functions/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# ------------------------------------------------------------------------------------------------ -# Deformable DETR -# Copyright (c) 2020 SenseTime. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------------------------------ -# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 -# ------------------------------------------------------------------------------------------------ - -# Copyright (c) Facebook, Inc. and its affiliates. -# Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR - -from .ms_deform_attn_func import MSDeformAttnFunction - diff --git a/spaces/LanguageBind/LanguageBind/v_cls/transforms.py b/spaces/LanguageBind/LanguageBind/v_cls/transforms.py deleted file mode 100644 index 530b2544d03f9e90a3e1b2f75154037678b52bb0..0000000000000000000000000000000000000000 --- a/spaces/LanguageBind/LanguageBind/v_cls/transforms.py +++ /dev/null @@ -1,586 +0,0 @@ -# -------------------------------------------------------- -# Based on BEiT, timm, DINO and DeiT code bases -# https://github.com/microsoft/unilm/tree/master/beit -# https://github.com/rwightman/pytorch-image-models/tree/master/timm -# https://github.com/facebookresearch/deit -# https://github.com/facebookresearch/dino -# --------------------------------------------------------' -import math -import numbers -import random -import warnings - -import numpy as np -import torch -import torchvision -import torchvision.transforms.functional as F -from PIL import Image, ImageOps - - -class ToNumpy: - - def __call__(self, pil_img): - np_img = np.array(pil_img, dtype=np.uint8) - if np_img.ndim < 3: - np_img = np.expand_dims(np_img, axis=-1) - np_img = np.rollaxis(np_img, 2) # HWC to CHW - return np_img - - -class ToTensor: - - def __init__(self, dtype=torch.float32): - self.dtype = dtype - - def __call__(self, pil_img): - np_img = np.array(pil_img, dtype=np.uint8) - if np_img.ndim < 3: - np_img = np.expand_dims(np_img, axis=-1) - np_img = np.rollaxis(np_img, 2) # HWC to CHW - return torch.from_numpy(np_img).to(dtype=self.dtype) - - -_pil_interpolation_to_str = { - Image.NEAREST: 'PIL.Image.NEAREST', - Image.BILINEAR: 'PIL.Image.BILINEAR', - Image.BICUBIC: 'PIL.Image.BICUBIC', - Image.LANCZOS: 'PIL.Image.LANCZOS', - Image.HAMMING: 'PIL.Image.HAMMING', - Image.BOX: 'PIL.Image.BOX', -} - - -def _pil_interp(method): - if method == 'bicubic': - return Image.BICUBIC - elif method == 'lanczos': - return Image.LANCZOS - elif method == 'hamming': - return Image.HAMMING - else: - # default bilinear, do we want to allow nearest? - return Image.BILINEAR - - -_RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC) - - -class RandomResizedCropAndInterpolationWithTwoPic: - """Crop the given PIL Image to random size and aspect ratio with random interpolation. - - A crop of random size (default: of 0.08 to 1.0) of the original size and a random - aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop - is finally resized to given size. - This is popularly used to train the Inception networks. - - Args: - size: expected output size of each edge - scale: range of size of the origin size cropped - ratio: range of aspect ratio of the origin aspect ratio cropped - interpolation: Default: PIL.Image.BILINEAR - """ - - def __init__(self, - size, - second_size=None, - scale=(0.08, 1.0), - ratio=(3. / 4., 4. / 3.), - interpolation='bilinear', - second_interpolation='lanczos'): - if isinstance(size, tuple): - self.size = size - else: - self.size = (size, size) - if second_size is not None: - if isinstance(second_size, tuple): - self.second_size = second_size - else: - self.second_size = (second_size, second_size) - else: - self.second_size = None - if (scale[0] > scale[1]) or (ratio[0] > ratio[1]): - warnings.warn("range should be of kind (min, max)") - - if interpolation == 'random': - self.interpolation = _RANDOM_INTERPOLATION - else: - self.interpolation = _pil_interp(interpolation) - self.second_interpolation = _pil_interp(second_interpolation) - self.scale = scale - self.ratio = ratio - - @staticmethod - def get_params(img, scale, ratio): - """Get parameters for ``crop`` for a random sized crop. - - Args: - img (PIL Image): Image to be cropped. - scale (tuple): range of size of the origin size cropped - ratio (tuple): range of aspect ratio of the origin aspect ratio cropped - - Returns: - tuple: params (i, j, h, w) to be passed to ``crop`` for a random - sized crop. - """ - area = img.size[0] * img.size[1] - - for attempt in range(10): - target_area = random.uniform(*scale) * area - log_ratio = (math.log(ratio[0]), math.log(ratio[1])) - aspect_ratio = math.exp(random.uniform(*log_ratio)) - - w = int(round(math.sqrt(target_area * aspect_ratio))) - h = int(round(math.sqrt(target_area / aspect_ratio))) - - if w <= img.size[0] and h <= img.size[1]: - i = random.randint(0, img.size[1] - h) - j = random.randint(0, img.size[0] - w) - return i, j, h, w - - # Fallback to central crop - in_ratio = img.size[0] / img.size[1] - if in_ratio < min(ratio): - w = img.size[0] - h = int(round(w / min(ratio))) - elif in_ratio > max(ratio): - h = img.size[1] - w = int(round(h * max(ratio))) - else: # whole image - w = img.size[0] - h = img.size[1] - i = (img.size[1] - h) // 2 - j = (img.size[0] - w) // 2 - return i, j, h, w - - def __call__(self, img): - """ - Args: - img (PIL Image): Image to be cropped and resized. - - Returns: - PIL Image: Randomly cropped and resized image. - """ - i, j, h, w = self.get_params(img, self.scale, self.ratio) - if isinstance(self.interpolation, (tuple, list)): - interpolation = random.choice(self.interpolation) - else: - interpolation = self.interpolation - if self.second_size is None: - return F.resized_crop(img, i, j, h, w, self.size, interpolation) - else: - return F.resized_crop(img, i, j, h, w, self.size, - interpolation), F.resized_crop( - img, i, j, h, w, self.second_size, - self.second_interpolation) - - def __repr__(self): - if isinstance(self.interpolation, (tuple, list)): - interpolate_str = ' '.join( - [_pil_interpolation_to_str[x] for x in self.interpolation]) - else: - interpolate_str = _pil_interpolation_to_str[self.interpolation] - format_string = self.__class__.__name__ + '(size={0}'.format(self.size) - format_string += ', scale={0}'.format( - tuple(round(s, 4) for s in self.scale)) - format_string += ', ratio={0}'.format( - tuple(round(r, 4) for r in self.ratio)) - format_string += ', interpolation={0}'.format(interpolate_str) - if self.second_size is not None: - format_string += ', second_size={0}'.format(self.second_size) - format_string += ', second_interpolation={0}'.format( - _pil_interpolation_to_str[self.second_interpolation]) - format_string += ')' - return format_string - - -class GroupRandomCrop(object): - - def __init__(self, size): - if isinstance(size, numbers.Number): - self.size = (int(size), int(size)) - else: - self.size = size - - def __call__(self, img_tuple): - img_group, label = img_tuple - - w, h = img_group[0].size - th, tw = self.size - - out_images = list() - - x1 = random.randint(0, w - tw) - y1 = random.randint(0, h - th) - - for img in img_group: - assert (img.size[0] == w and img.size[1] == h) - if w == tw and h == th: - out_images.append(img) - else: - out_images.append(img.crop((x1, y1, x1 + tw, y1 + th))) - - return (out_images, label) - - -class GroupCenterCrop(object): - - def __init__(self, size): - self.worker = torchvision.transforms.CenterCrop(size) - - def __call__(self, img_tuple): - img_group, label = img_tuple - return ([self.worker(img) for img in img_group], label) - - -class GroupRandomHorizontalFlip(object): - """Randomly horizontally flips the given PIL.Image with a probability of 0.5 - """ - - def __init__(self, selective_flip=True, is_flow=False): - self.is_flow = is_flow - self.class_LeftRight = [86, 87, 93, 94, 166, 167 - ] if selective_flip else [] - - def __call__(self, img_tuple, is_flow=False): - img_group, label = img_tuple - v = random.random() - if (label not in self.class_LeftRight) and v < 0.5: - ret = [img.transpose(Image.FLIP_LEFT_RIGHT) for img in img_group] - if self.is_flow: - for i in range(0, len(ret), 2): - ret[i] = ImageOps.invert( - ret[i]) # invert flow pixel values when flipping - return (ret, label) - else: - return img_tuple - - -class GroupNormalize(object): - - def __init__(self, mean, std): - self.mean = mean - self.std = std - - def __call__(self, tensor_tuple): - tensor, label = tensor_tuple - rep_mean = self.mean * (tensor.size()[0] // len(self.mean)) - rep_std = self.std * (tensor.size()[0] // len(self.std)) - - # TODO: make efficient - for t, m, s in zip(tensor, rep_mean, rep_std): - t.sub_(m).div_(s) - - return (tensor, label) - - -class GroupGrayScale(object): - - def __init__(self, size): - self.worker = torchvision.transforms.Grayscale(size) - - def __call__(self, img_tuple): - img_group, label = img_tuple - return ([self.worker(img) for img in img_group], label) - - -class GroupScale(object): - """ Rescales the input PIL.Image to the given 'size'. - 'size' will be the size of the smaller edge. - For example, if height > width, then image will be - rescaled to (size * height / width, size) - size: size of the smaller edge - interpolation: Default: PIL.Image.BILINEAR - """ - - def __init__(self, size, interpolation=Image.BILINEAR): - self.worker = torchvision.transforms.Resize(size, interpolation) - - def __call__(self, img_tuple): - img_group, label = img_tuple - return ([self.worker(img) for img in img_group], label) - - -class GroupOverSample(object): - - def __init__(self, crop_size, scale_size=None): - self.crop_size = crop_size if not isinstance(crop_size, int) else ( - crop_size, crop_size) - - if scale_size is not None: - self.scale_worker = GroupScale(scale_size) - else: - self.scale_worker = None - - def __call__(self, img_tuple): - if self.scale_worker is not None: - img_tuple = self.scale_worker(img_tuple) - - img_group, label = img_tuple - - image_w, image_h = img_group[0].size - crop_w, crop_h = self.crop_size - - offsets = GroupMultiScaleCrop.fill_fix_offset(False, image_w, image_h, - crop_w, crop_h) - oversample_group = list() - for o_w, o_h in offsets: - normal_group = list() - flip_group = list() - for i, img in enumerate(img_group): - crop = img.crop((o_w, o_h, o_w + crop_w, o_h + crop_h)) - normal_group.append(crop) - flip_crop = crop.copy().transpose(Image.FLIP_LEFT_RIGHT) - - if img.mode == 'L' and i % 2 == 0: - flip_group.append(ImageOps.invert(flip_crop)) - else: - flip_group.append(flip_crop) - - oversample_group.extend(normal_group) - oversample_group.extend(flip_group) - return (oversample_group, label) - - -class GroupFullResSample(object): - - def __init__(self, crop_size, scale_size=None, flip=True): - self.crop_size = crop_size if not isinstance(crop_size, int) else ( - crop_size, crop_size) - - if scale_size is not None: - self.scale_worker = GroupScale(scale_size) - else: - self.scale_worker = None - self.flip = flip - - def __call__(self, img_tuple): - - if self.scale_worker is not None: - img_tuple = self.scale_worker(img_tuple) - - img_group, label = img_tuple - image_w, image_h = img_group[0].size - crop_w, crop_h = self.crop_size - - w_step = (image_w - crop_w) // 4 - h_step = (image_h - crop_h) // 4 - - offsets = list() - offsets.append((0 * w_step, 2 * h_step)) # left - offsets.append((4 * w_step, 2 * h_step)) # right - offsets.append((2 * w_step, 2 * h_step)) # center - - oversample_group = list() - for o_w, o_h in offsets: - normal_group = list() - flip_group = list() - for i, img in enumerate(img_group): - crop = img.crop((o_w, o_h, o_w + crop_w, o_h + crop_h)) - normal_group.append(crop) - if self.flip: - flip_crop = crop.copy().transpose(Image.FLIP_LEFT_RIGHT) - - if img.mode == 'L' and i % 2 == 0: - flip_group.append(ImageOps.invert(flip_crop)) - else: - flip_group.append(flip_crop) - - oversample_group.extend(normal_group) - oversample_group.extend(flip_group) - return (oversample_group, label) - - -class GroupMultiScaleCrop(object): - - def __init__(self, - input_size, - scales=None, - max_distort=1, - fix_crop=True, - more_fix_crop=True): - self.scales = scales if scales is not None else [1, .875, .75, .66] - self.max_distort = max_distort - self.fix_crop = fix_crop - self.more_fix_crop = more_fix_crop - self.input_size = input_size if not isinstance(input_size, int) else [ - input_size, input_size - ] - self.interpolation = Image.BILINEAR - - def __call__(self, img_tuple): - img_group, label = img_tuple - - im_size = img_group[0].size - - crop_w, crop_h, offset_w, offset_h = self._sample_crop_size(im_size) - crop_img_group = [ - img.crop( - (offset_w, offset_h, offset_w + crop_w, offset_h + crop_h)) - for img in img_group - ] - ret_img_group = [ - img.resize((self.input_size[0], self.input_size[1]), - self.interpolation) for img in crop_img_group - ] - return (ret_img_group, label) - - def _sample_crop_size(self, im_size): - image_w, image_h = im_size[0], im_size[1] - - # find a crop size - base_size = min(image_w, image_h) - crop_sizes = [int(base_size * x) for x in self.scales] - crop_h = [ - self.input_size[1] if abs(x - self.input_size[1]) < 3 else x - for x in crop_sizes - ] - crop_w = [ - self.input_size[0] if abs(x - self.input_size[0]) < 3 else x - for x in crop_sizes - ] - - pairs = [] - for i, h in enumerate(crop_h): - for j, w in enumerate(crop_w): - if abs(i - j) <= self.max_distort: - pairs.append((w, h)) - - crop_pair = random.choice(pairs) - if not self.fix_crop: - w_offset = random.randint(0, image_w - crop_pair[0]) - h_offset = random.randint(0, image_h - crop_pair[1]) - else: - w_offset, h_offset = self._sample_fix_offset( - image_w, image_h, crop_pair[0], crop_pair[1]) - - return crop_pair[0], crop_pair[1], w_offset, h_offset - - def _sample_fix_offset(self, image_w, image_h, crop_w, crop_h): - offsets = self.fill_fix_offset(self.more_fix_crop, image_w, image_h, - crop_w, crop_h) - return random.choice(offsets) - - @staticmethod - def fill_fix_offset(more_fix_crop, image_w, image_h, crop_w, crop_h): - w_step = (image_w - crop_w) // 4 - h_step = (image_h - crop_h) // 4 - - ret = list() - ret.append((0, 0)) # upper left - ret.append((4 * w_step, 0)) # upper right - ret.append((0, 4 * h_step)) # lower left - ret.append((4 * w_step, 4 * h_step)) # lower right - ret.append((2 * w_step, 2 * h_step)) # center - - if more_fix_crop: - ret.append((0, 2 * h_step)) # center left - ret.append((4 * w_step, 2 * h_step)) # center right - ret.append((2 * w_step, 4 * h_step)) # lower center - ret.append((2 * w_step, 0 * h_step)) # upper center - - ret.append((1 * w_step, 1 * h_step)) # upper left quarter - ret.append((3 * w_step, 1 * h_step)) # upper right quarter - ret.append((1 * w_step, 3 * h_step)) # lower left quarter - ret.append((3 * w_step, 3 * h_step)) # lower righ quarter - - return ret - - -class GroupRandomSizedCrop(object): - """Random crop the given PIL.Image to a random size of (0.08 to 1.0) of the original size - and and a random aspect ratio of 3/4 to 4/3 of the original aspect ratio - This is popularly used to train the Inception networks - size: size of the smaller edge - interpolation: Default: PIL.Image.BILINEAR - """ - - def __init__(self, size, interpolation=Image.BILINEAR): - self.size = size - self.interpolation = interpolation - - def __call__(self, img_tuple): - img_group, label = img_tuple - - for attempt in range(10): - area = img_group[0].size[0] * img_group[0].size[1] - target_area = random.uniform(0.08, 1.0) * area - aspect_ratio = random.uniform(3. / 4, 4. / 3) - - w = int(round(math.sqrt(target_area * aspect_ratio))) - h = int(round(math.sqrt(target_area / aspect_ratio))) - - if random.random() < 0.5: - w, h = h, w - - if w <= img_group[0].size[0] and h <= img_group[0].size[1]: - x1 = random.randint(0, img_group[0].size[0] - w) - y1 = random.randint(0, img_group[0].size[1] - h) - found = True - break - else: - found = False - x1 = 0 - y1 = 0 - - if found: - out_group = list() - for img in img_group: - img = img.crop((x1, y1, x1 + w, y1 + h)) - assert (img.size == (w, h)) - out_group.append( - img.resize((self.size, self.size), self.interpolation)) - return out_group - else: - # Fallback - scale = GroupScale(self.size, interpolation=self.interpolation) - crop = GroupRandomCrop(self.size) - return crop(scale(img_group)) - - -class Stack(object): - - def __init__(self, roll=False): - self.roll = roll - - def __call__(self, img_tuple): - img_group, label = img_tuple - - if img_group[0].mode == 'L': - return (np.concatenate([np.expand_dims(x, 2) for x in img_group], - axis=2), label) - elif img_group[0].mode == 'RGB': - if self.roll: - return (np.concatenate( - [np.array(x)[:, :, ::-1] for x in img_group], - axis=2), label) - else: - return (np.concatenate(img_group, axis=2), label) - - -class ToTorchFormatTensor(object): - """ Converts a PIL.Image (RGB) or numpy.ndarray (H x W x C) in the range [0, 255] - to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0] """ - - def __init__(self, div=True): - self.div = div - - def __call__(self, pic_tuple): - pic, label = pic_tuple - - if isinstance(pic, np.ndarray): - # handle numpy array - img = torch.from_numpy(pic).permute(2, 0, 1).contiguous() - else: - # handle PIL Image - img = torch.as_tensor(pic.tobytes(), dtype=torch.uint8) - img = img.view(pic.size[1], pic.size[0], len(pic.mode)) - # put it from HWC to CHW format - # yikes, this transpose takes 80% of the loading time/CPU - img = img.transpose(0, 1).transpose(0, 2).contiguous() - return (img.float().div(255.) if self.div else img.float(), label) - - -class IdentityTransform(object): - - def __call__(self, data): - return data diff --git a/spaces/Lazyhope/RepoSnipy/app.py b/spaces/Lazyhope/RepoSnipy/app.py deleted file mode 100644 index ef7049efcef4d750e66a45080550a79f2662ff9b..0000000000000000000000000000000000000000 --- a/spaces/Lazyhope/RepoSnipy/app.py +++ /dev/null @@ -1,200 +0,0 @@ -import re -from pathlib import Path -from typing import List, Optional - -import pandas as pd -import streamlit as st -from docarray import BaseDoc -from docarray.index import InMemoryExactNNIndex -from docarray.typing import TorchTensor -from transformers import pipeline - -INDEX_PATH = Path(__file__).parent.joinpath("data/index.bin") - - -@st.cache_resource(show_spinner="Loading dataset...") -def load_index(): - class RepoDoc(BaseDoc): - name: str - topics: List[str] - stars: int - license: str - code_embedding: Optional[TorchTensor[768]] - doc_embedding: Optional[TorchTensor[768]] - - default_doc = RepoDoc( - name="", - topics=[], - stars=0, - license="", - code_embedding=None, - doc_embedding=None, - ) - - return InMemoryExactNNIndex[RepoDoc](index_file_path=INDEX_PATH), default_doc - - -@st.cache_resource(show_spinner="Loading RepoSim pipeline...") -def load_model(): - return pipeline( - model="Lazyhope/RepoSim", - trust_remote_code=True, - device_map="auto", - ) - - -@st.cache_data(show_spinner=False) -def run_model(_model, repo_name, github_token): - with st.spinner( - f"Downloading and extracting the {repo_name}, this may take a while..." - ): - extracted_infos = _model.preprocess(repo_name, github_token=github_token) - - if not extracted_infos: - return None - - with st.spinner(f"Generating embeddings for {repo_name}..."): - repo_info = _model.forward(extracted_infos, st_progress=st.progress(0.0))[0] - - return repo_info - - -def run_search(index, query, search_field, limit): - top_matches, scores = index.find( - query=query, search_field=search_field, limit=limit - ) - - search_results = top_matches.to_dataframe() - search_results["scores"] = scores - - return search_results - - -index, default_doc = load_index() -model = load_model() - -with st.sidebar: - st.text_input( - label="GitHub Token", - key="github_token", - type="password", - placeholder="Paste your GitHub token here", - help="Consider setting GitHub token to avoid hitting rate limits: https://docs.github.com/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token", - ) - - st.slider( - label="Search results limit", - min_value=1, - max_value=100, - value=10, - step=1, - key="search_results_limit", - help="Limit the number of search results", - ) - - st.multiselect( - label="Display columns", - options=["scores", "name", "topics", "stars", "license"], - default=["scores", "name", "topics", "stars", "license"], - help="Select columns to display in the search results", - key="display_columns", - ) - - -repo_regex = r"^((git@|http(s)?://)?(github\.com)(/|:))?(?P[\w.-]+)(/)(?P[\w.-]+?)(\.git)?(/)?$" - -st.title("RepoSnipy") - -st.text_input( - "Enter a GitHub repo URL or owner/repo (case-sensitive):", - value="", - max_chars=200, - placeholder="numpy/numpy", - key="repo_input", -) - -st.checkbox( - label="Add/Update this repo to the index", - value=False, - key="update_index", - help="Encode the latest version of this repo and add/update it to the index", -) - - -search = st.button("Search") -if search: - match_res = re.match(repo_regex, st.session_state.repo_input) - if match_res is not None: - repo_name = f"{match_res.group('owner')}/{match_res.group('repo')}" - - records = index.filter({"name": {"$eq": repo_name}}) - query_doc = default_doc.copy() if not records else records[0] - if st.session_state.update_index or not records: - repo_info = run_model(model, repo_name, st.session_state.github_token) - if repo_info is None: - st.error("Repo not found or invalid GitHub token!") - st.stop() - - # Update document inplace - query_doc.name = repo_info["name"] - query_doc.topics = repo_info["topics"] - query_doc.stars = repo_info["stars"] - query_doc.license = repo_info["license"] - query_doc.code_embedding = repo_info["mean_code_embedding"] - query_doc.doc_embedding = repo_info["mean_doc_embedding"] - - if st.session_state.update_index: - if not records: - if not query_doc.license: - st.warning( - "License is missing in this repo and will not be persisted!" - ) - elif ( - query_doc.code_embedding is None and query_doc.doc_embedding is None - ): - st.warning( - "This repo has no function code or docstring extracted and will not be persisted!" - ) - else: - index.index(query_doc) - st.success("Repo added to the index!") - else: - st.success("Repo updated in the index!") - - with st.spinner("Persisting the index..."): - index.persist(file=INDEX_PATH) - - st.session_state["query_doc"] = query_doc - else: - st.error("Invalid input!") - -if "query_doc" in st.session_state: - query_doc = st.session_state.query_doc - limit = st.session_state.search_results_limit - st.dataframe( - pd.DataFrame( - [ - { - "name": query_doc.name, - "topics": query_doc.topics, - "stars": query_doc.stars, - "license": query_doc.license, - } - ], - ) - ) - - display_columns = st.session_state.display_columns - code_sim_tab, doc_sim_tab = st.tabs(["Code Similarity", "Docstring Similarity"]) - - if query_doc.code_embedding is not None: - code_sim_res = run_search(index, query_doc, "code_embedding", limit) - code_sim_tab.dataframe(code_sim_res[display_columns]) - else: - code_sim_tab.error("No function code was extracted for this repo!") - - if query_doc.doc_embedding is not None: - doc_sim_res = run_search(index, query_doc, "doc_embedding", limit) - doc_sim_tab.dataframe(doc_sim_res[display_columns]) - else: - doc_sim_tab.error("No function docstring was extracted for this repo!") diff --git a/spaces/Lbin123/Lbingo/tailwind.config.js b/spaces/Lbin123/Lbingo/tailwind.config.js deleted file mode 100644 index 03da3c3c45be6983b9f5ffa6df5f1fd0870e9636..0000000000000000000000000000000000000000 --- a/spaces/Lbin123/Lbingo/tailwind.config.js +++ /dev/null @@ -1,48 +0,0 @@ -/** @type {import('tailwindcss').Config} */ -module.exports = { - content: [ - './src/pages/**/*.{js,ts,jsx,tsx,mdx}', - './src/components/**/*.{js,ts,jsx,tsx,mdx}', - './src/app/**/*.{js,ts,jsx,tsx,mdx}', - './src/ui/**/*.{js,ts,jsx,tsx,mdx}', - ], - "darkMode": "class", - theme: { - extend: { - colors: { - 'primary-blue': 'rgb(var(--color-primary-blue) / )', - secondary: 'rgb(var(--color-secondary) / )', - 'primary-background': 'rgb(var(--primary-background) / )', - 'primary-text': 'rgb(var(--primary-text) / )', - 'secondary-text': 'rgb(var(--secondary-text) / )', - 'light-text': 'rgb(var(--light-text) / )', - 'primary-border': 'rgb(var(--primary-border) / )', - }, - keyframes: { - slideDownAndFade: { - from: { opacity: 0, transform: 'translateY(-2px)' }, - to: { opacity: 1, transform: 'translateY(0)' }, - }, - slideLeftAndFade: { - from: { opacity: 0, transform: 'translateX(2px)' }, - to: { opacity: 1, transform: 'translateX(0)' }, - }, - slideUpAndFade: { - from: { opacity: 0, transform: 'translateY(2px)' }, - to: { opacity: 1, transform: 'translateY(0)' }, - }, - slideRightAndFade: { - from: { opacity: 0, transform: 'translateX(2px)' }, - to: { opacity: 1, transform: 'translateX(0)' }, - }, - }, - animation: { - slideDownAndFade: 'slideDownAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideLeftAndFade: 'slideLeftAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideUpAndFade: 'slideUpAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideRightAndFade: 'slideRightAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - }, - }, - }, - plugins: [require('@headlessui/tailwindcss'), require('tailwind-scrollbar')], -} diff --git a/spaces/Liu-LAB/GPT-academic/docs/waifu_plugin/live2d.js b/spaces/Liu-LAB/GPT-academic/docs/waifu_plugin/live2d.js deleted file mode 100644 index 2cf559be672c438dfbd35db61eea12465ed0dffb..0000000000000000000000000000000000000000 --- a/spaces/Liu-LAB/GPT-academic/docs/waifu_plugin/live2d.js +++ /dev/null @@ -1,4238 +0,0 @@ -! -function(t) { - function i(r) { - if (e[r]) return e[r].exports; - var o = e[r] = { - i: r, - l: !1, - exports: {} - }; - return t[r].call(o.exports, o, o.exports, i), o.l = !0, o.exports - } - var e = {}; - i.m = t, i.c = e, i.d = function(t, e, r) { - i.o(t, e) || Object.defineProperty(t, e, { - configurable: !1, - enumerable: !0, - get: r - }) - }, i.n = function(t) { - var e = t && t.__esModule ? - function() { - return t. - default - } : function() { - return t - }; - return i.d(e, "a", e), e - }, i.o = function(t, i) { - return Object.prototype.hasOwnProperty.call(t, i) - }, i.p = "", i(i.s = 4) -}([function(t, i, e) { - "use strict"; - - function r() { - this.live2DModel = null, this.modelMatrix = null, this.eyeBlink = null, this.physics = null, this.pose = null, this.debugMode = !1, this.initialized = !1, this.updating = !1, this.alpha = 1, this.accAlpha = 0, this.lipSync = !1, this.lipSyncValue = 0, this.accelX = 0, this.accelY = 0, this.accelZ = 0, this.dragX = 0, this.dragY = 0, this.startTimeMSec = null, this.mainMotionManager = new h, this.expressionManager = new h, this.motions = {}, this.expressions = {}, this.isTexLoaded = !1 - } - function o() { - AMotion.prototype.constructor.call(this), this.paramList = new Array - } - function n() { - this.id = "", this.type = -1, this.value = null - } - function s() { - this.nextBlinkTime = null, this.stateStartTime = null, this.blinkIntervalMsec = null, this.eyeState = g.STATE_FIRST, this.blinkIntervalMsec = 4e3, this.closingMotionMsec = 100, this.closedMotionMsec = 50, this.openingMotionMsec = 150, this.closeIfZero = !0, this.eyeID_L = "PARAM_EYE_L_OPEN", this.eyeID_R = "PARAM_EYE_R_OPEN" - } - function _() { - this.tr = new Float32Array(16), this.identity() - } - function a(t, i) { - _.prototype.constructor.call(this), this.width = t, this.height = i - } - function h() { - MotionQueueManager.prototype.constructor.call(this), this.currentPriority = null, this.reservePriority = null, this.super = MotionQueueManager.prototype - } - function l() { - this.physicsList = new Array, this.startTimeMSec = UtSystem.getUserTimeMSec() - } - function $() { - this.lastTime = 0, this.lastModel = null, this.partsGroups = new Array - } - function u(t) { - this.paramIndex = -1, this.partsIndex = -1, this.link = null, this.id = t - } - function p() { - this.EPSILON = .01, this.faceTargetX = 0, this.faceTargetY = 0, this.faceX = 0, this.faceY = 0, this.faceVX = 0, this.faceVY = 0, this.lastTimeSec = 0 - } - function f() { - _.prototype.constructor.call(this), this.screenLeft = null, this.screenRight = null, this.screenTop = null, this.screenBottom = null, this.maxLeft = null, this.maxRight = null, this.maxTop = null, this.maxBottom = null, this.max = Number.MAX_VALUE, this.min = 0 - } - function c() {} - var d = 0; - r.prototype.getModelMatrix = function() { - return this.modelMatrix - }, r.prototype.setAlpha = function(t) { - t > .999 && (t = 1), t < .001 && (t = 0), this.alpha = t - }, r.prototype.getAlpha = function() { - return this.alpha - }, r.prototype.isInitialized = function() { - return this.initialized - }, r.prototype.setInitialized = function(t) { - this.initialized = t - }, r.prototype.isUpdating = function() { - return this.updating - }, r.prototype.setUpdating = function(t) { - this.updating = t - }, r.prototype.getLive2DModel = function() { - return this.live2DModel - }, r.prototype.setLipSync = function(t) { - this.lipSync = t - }, r.prototype.setLipSyncValue = function(t) { - this.lipSyncValue = t - }, r.prototype.setAccel = function(t, i, e) { - this.accelX = t, this.accelY = i, this.accelZ = e - }, r.prototype.setDrag = function(t, i) { - this.dragX = t, this.dragY = i - }, r.prototype.getMainMotionManager = function() { - return this.mainMotionManager - }, r.prototype.getExpressionManager = function() { - return this.expressionManager - }, r.prototype.loadModelData = function(t, i) { - var e = c.getPlatformManager(); - this.debugMode && e.log("Load model : " + t); - var r = this; - e.loadLive2DModel(t, function(t) { - if (r.live2DModel = t, r.live2DModel.saveParam(), 0 != Live2D.getError()) return void console.error("Error : Failed to loadModelData()."); - r.modelMatrix = new a(r.live2DModel.getCanvasWidth(), r.live2DModel.getCanvasHeight()), r.modelMatrix.setWidth(2), r.modelMatrix.setCenterPosition(0, 0), i(r.live2DModel) - }) - }, r.prototype.loadTexture = function(t, i, e) { - d++; - var r = c.getPlatformManager(); - this.debugMode && r.log("Load Texture : " + i); - var o = this; - r.loadTexture(this.live2DModel, t, i, function() { - d--, 0 == d && (o.isTexLoaded = !0), "function" == typeof e && e() - }) - }, r.prototype.loadMotion = function(t, i, e) { - var r = c.getPlatformManager(); - this.debugMode && r.log("Load Motion : " + i); - var o = null, - n = this; - r.loadBytes(i, function(i) { - o = Live2DMotion.loadMotion(i), null != t && (n.motions[t] = o), e(o) - }) - }, r.prototype.loadExpression = function(t, i, e) { - var r = c.getPlatformManager(); - this.debugMode && r.log("Load Expression : " + i); - var n = this; - r.loadBytes(i, function(i) { - null != t && (n.expressions[t] = o.loadJson(i)), "function" == typeof e && e() - }) - }, r.prototype.loadPose = function(t, i) { - var e = c.getPlatformManager(); - this.debugMode && e.log("Load Pose : " + t); - var r = this; - try { - e.loadBytes(t, function(t) { - r.pose = $.load(t), "function" == typeof i && i() - }) - } catch (t) { - console.warn(t) - } - }, r.prototype.loadPhysics = function(t) { - var i = c.getPlatformManager(); - this.debugMode && i.log("Load Physics : " + t); - var e = this; - try { - i.loadBytes(t, function(t) { - e.physics = l.load(t) - }) - } catch (t) { - console.warn(t) - } - }, r.prototype.hitTestSimple = function(t, i, e) { - if (null === this.live2DModel) return !1; - var r = this.live2DModel.getDrawDataIndex(t); - if (r < 0) return !1; - for (var o = this.live2DModel.getTransformedPoints(r), n = this.live2DModel.getCanvasWidth(), s = 0, _ = this.live2DModel.getCanvasHeight(), a = 0, h = 0; h < o.length; h += 2) { - var l = o[h], - $ = o[h + 1]; - l < n && (n = l), l > s && (s = l), $ < _ && (_ = $), $ > a && (a = $) - } - var u = this.modelMatrix.invertTransformX(i), - p = this.modelMatrix.invertTransformY(e); - return n <= u && u <= s && _ <= p && p <= a - }, r.prototype.hitTestSimpleCustom = function(t, i, e, r) { - return null !== this.live2DModel && (e >= t[0] && e <= i[0] && r <= t[1] && r >= i[1]) - }, o.prototype = new AMotion, o.EXPRESSION_DEFAULT = "DEFAULT", o.TYPE_SET = 0, o.TYPE_ADD = 1, o.TYPE_MULT = 2, o.loadJson = function(t) { - var i = new o, - e = c.getPlatformManager(), - r = e.jsonParseFromBytes(t); - if (i.setFadeIn(parseInt(r.fade_in) > 0 ? parseInt(r.fade_in) : 1e3), i.setFadeOut(parseInt(r.fade_out) > 0 ? parseInt(r.fade_out) : 1e3), null == r.params) return i; - var s = r.params, - _ = s.length; - i.paramList = []; - for (var a = 0; a < _; a++) { - var h = s[a], - l = h.id.toString(), - $ = parseFloat(h.val), - u = o.TYPE_ADD, - p = null != h.calc ? h.calc.toString() : "add"; - if ((u = "add" === p ? o.TYPE_ADD : "mult" === p ? o.TYPE_MULT : "set" === p ? o.TYPE_SET : o.TYPE_ADD) == o.TYPE_ADD) { - var f = null == h.def ? 0 : parseFloat(h.def); - $ -= f - } else if (u == o.TYPE_MULT) { - var f = null == h.def ? 1 : parseFloat(h.def); - 0 == f && (f = 1), $ /= f - } - var d = new n; - d.id = l, d.type = u, d.value = $, i.paramList.push(d) - } - return i - }, o.prototype.updateParamExe = function(t, i, e, r) { - for (var n = this.paramList.length - 1; n >= 0; --n) { - var s = this.paramList[n]; - s.type == o.TYPE_ADD ? t.addToParamFloat(s.id, s.value, e) : s.type == o.TYPE_MULT ? t.multParamFloat(s.id, s.value, e) : s.type == o.TYPE_SET && t.setParamFloat(s.id, s.value, e) - } - }, s.prototype.calcNextBlink = function() { - return UtSystem.getUserTimeMSec() + Math.random() * (2 * this.blinkIntervalMsec - 1) - }, s.prototype.setInterval = function(t) { - this.blinkIntervalMsec = t - }, s.prototype.setEyeMotion = function(t, i, e) { - this.closingMotionMsec = t, this.closedMotionMsec = i, this.openingMotionMsec = e - }, s.prototype.updateParam = function(t) { - var i, e = UtSystem.getUserTimeMSec(), - r = 0; - switch (this.eyeState) { - case g.STATE_CLOSING: - r = (e - this.stateStartTime) / this.closingMotionMsec, r >= 1 && (r = 1, this.eyeState = g.STATE_CLOSED, this.stateStartTime = e), i = 1 - r; - break; - case g.STATE_CLOSED: - r = (e - this.stateStartTime) / this.closedMotionMsec, r >= 1 && (this.eyeState = g.STATE_OPENING, this.stateStartTime = e), i = 0; - break; - case g.STATE_OPENING: - r = (e - this.stateStartTime) / this.openingMotionMsec, r >= 1 && (r = 1, this.eyeState = g.STATE_INTERVAL, this.nextBlinkTime = this.calcNextBlink()), i = r; - break; - case g.STATE_INTERVAL: - this.nextBlinkTime < e && (this.eyeState = g.STATE_CLOSING, this.stateStartTime = e), i = 1; - break; - case g.STATE_FIRST: - default: - this.eyeState = g.STATE_INTERVAL, this.nextBlinkTime = this.calcNextBlink(), i = 1 - } - this.closeIfZero || (i = -i), t.setParamFloat(this.eyeID_L, i), t.setParamFloat(this.eyeID_R, i) - }; - var g = function() {}; - g.STATE_FIRST = "STATE_FIRST", g.STATE_INTERVAL = "STATE_INTERVAL", g.STATE_CLOSING = "STATE_CLOSING", g.STATE_CLOSED = "STATE_CLOSED", g.STATE_OPENING = "STATE_OPENING", _.mul = function(t, i, e) { - var r, o, n, s = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; - for (r = 0; r < 4; r++) for (o = 0; o < 4; o++) for (n = 0; n < 4; n++) s[r + 4 * o] += t[r + 4 * n] * i[n + 4 * o]; - for (r = 0; r < 16; r++) e[r] = s[r] - }, _.prototype.identity = function() { - for (var t = 0; t < 16; t++) this.tr[t] = t % 5 == 0 ? 1 : 0 - }, _.prototype.getArray = function() { - return this.tr - }, _.prototype.getCopyMatrix = function() { - return new Float32Array(this.tr) - }, _.prototype.setMatrix = function(t) { - if (null != this.tr && this.tr.length == this.tr.length) for (var i = 0; i < 16; i++) this.tr[i] = t[i] - }, _.prototype.getScaleX = function() { - return this.tr[0] - }, _.prototype.getScaleY = function() { - return this.tr[5] - }, _.prototype.transformX = function(t) { - return this.tr[0] * t + this.tr[12] - }, _.prototype.transformY = function(t) { - return this.tr[5] * t + this.tr[13] - }, _.prototype.invertTransformX = function(t) { - return (t - this.tr[12]) / this.tr[0] - }, _.prototype.invertTransformY = function(t) { - return (t - this.tr[13]) / this.tr[5] - }, _.prototype.multTranslate = function(t, i) { - var e = [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, t, i, 0, 1]; - _.mul(e, this.tr, this.tr) - }, _.prototype.translate = function(t, i) { - this.tr[12] = t, this.tr[13] = i - }, _.prototype.translateX = function(t) { - this.tr[12] = t - }, _.prototype.translateY = function(t) { - this.tr[13] = t - }, _.prototype.multScale = function(t, i) { - var e = [t, 0, 0, 0, 0, i, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]; - _.mul(e, this.tr, this.tr) - }, _.prototype.scale = function(t, i) { - this.tr[0] = t, this.tr[5] = i - }, a.prototype = new _, a.prototype.setPosition = function(t, i) { - this.translate(t, i) - }, a.prototype.setCenterPosition = function(t, i) { - var e = this.width * this.getScaleX(), - r = this.height * this.getScaleY(); - this.translate(t - e / 2, i - r / 2) - }, a.prototype.top = function(t) { - this.setY(t) - }, a.prototype.bottom = function(t) { - var i = this.height * this.getScaleY(); - this.translateY(t - i) - }, a.prototype.left = function(t) { - this.setX(t) - }, a.prototype.right = function(t) { - var i = this.width * this.getScaleX(); - this.translateX(t - i) - }, a.prototype.centerX = function(t) { - var i = this.width * this.getScaleX(); - this.translateX(t - i / 2) - }, a.prototype.centerY = function(t) { - var i = this.height * this.getScaleY(); - this.translateY(t - i / 2) - }, a.prototype.setX = function(t) { - this.translateX(t) - }, a.prototype.setY = function(t) { - this.translateY(t) - }, a.prototype.setHeight = function(t) { - var i = t / this.height, - e = -i; - this.scale(i, e) - }, a.prototype.setWidth = function(t) { - var i = t / this.width, - e = -i; - this.scale(i, e) - }, h.prototype = new MotionQueueManager, h.prototype.getCurrentPriority = function() { - return this.currentPriority - }, h.prototype.getReservePriority = function() { - return this.reservePriority - }, h.prototype.reserveMotion = function(t) { - return !(this.reservePriority >= t) && (!(this.currentPriority >= t) && (this.reservePriority = t, !0)) - }, h.prototype.setReservePriority = function(t) { - this.reservePriority = t - }, h.prototype.updateParam = function(t) { - var i = MotionQueueManager.prototype.updateParam.call(this, t); - return this.isFinished() && (this.currentPriority = 0), i - }, h.prototype.startMotionPrio = function(t, i) { - return i == this.reservePriority && (this.reservePriority = 0), this.currentPriority = i, this.startMotion(t, !1) - }, l.load = function(t) { - for (var i = new l, e = c.getPlatformManager(), r = e.jsonParseFromBytes(t), o = r.physics_hair, n = o.length, s = 0; s < n; s++) { - var _ = o[s], - a = new PhysicsHair, - h = _.setup, - $ = parseFloat(h.length), - u = parseFloat(h.regist), - p = parseFloat(h.mass); - a.setup($, u, p); - for (var f = _.src, d = f.length, g = 0; g < d; g++) { - var y = f[g], - m = y.id, - T = PhysicsHair.Src.SRC_TO_X, - P = y.ptype; - "x" === P ? T = PhysicsHair.Src.SRC_TO_X : "y" === P ? T = PhysicsHair.Src.SRC_TO_Y : "angle" === P ? T = PhysicsHair.Src.SRC_TO_G_ANGLE : UtDebug.error("live2d", "Invalid parameter:PhysicsHair.Src"); - var S = parseFloat(y.scale), - v = parseFloat(y.weight); - a.addSrcParam(T, m, S, v) - } - for (var L = _.targets, M = L.length, g = 0; g < M; g++) { - var E = L[g], - m = E.id, - T = PhysicsHair.Target.TARGET_FROM_ANGLE, - P = E.ptype; - "angle" === P ? T = PhysicsHair.Target.TARGET_FROM_ANGLE : "angle_v" === P ? T = PhysicsHair.Target.TARGET_FROM_ANGLE_V : UtDebug.error("live2d", "Invalid parameter:PhysicsHair.Target"); - var S = parseFloat(E.scale), - v = parseFloat(E.weight); - a.addTargetParam(T, m, S, v) - } - i.physicsList.push(a) - } - return i - }, l.prototype.updateParam = function(t) { - for (var i = UtSystem.getUserTimeMSec() - this.startTimeMSec, e = 0; e < this.physicsList.length; e++) this.physicsList[e].update(t, i) - }, $.load = function(t) { - for (var i = new $, e = c.getPlatformManager(), r = e.jsonParseFromBytes(t), o = r.parts_visible, n = o.length, s = 0; s < n; s++) { - for (var _ = o[s], a = _.group, h = a.length, l = new Array, p = 0; p < h; p++) { - var f = a[p], - d = new u(f.id); - if (l[p] = d, null != f.link) { - var g = f.link, - y = g.length; - d.link = new Array; - for (var m = 0; m < y; m++) { - var T = new u(g[m]); - d.link.push(T) - } - } - } - i.partsGroups.push(l) - } - return i - }, $.prototype.updateParam = function(t) { - if (null != t) { - t != this.lastModel && this.initParam(t), this.lastModel = t; - var i = UtSystem.getUserTimeMSec(), - e = 0 == this.lastTime ? 0 : (i - this.lastTime) / 1e3; - this.lastTime = i, e < 0 && (e = 0); - for (var r = 0; r < this.partsGroups.length; r++) this.normalizePartsOpacityGroup(t, this.partsGroups[r], e), this.copyOpacityOtherParts(t, this.partsGroups[r]) - } - }, $.prototype.initParam = function(t) { - if (null != t) for (var i = 0; i < this.partsGroups.length; i++) for (var e = this.partsGroups[i], r = 0; r < e.length; r++) { - e[r].initIndex(t); - var o = e[r].partsIndex, - n = e[r].paramIndex; - if (!(o < 0)) { - var s = 0 != t.getParamFloat(n); - if (t.setPartsOpacity(o, s ? 1 : 0), t.setParamFloat(n, s ? 1 : 0), null != e[r].link) for (var _ = 0; _ < e[r].link.length; _++) e[r].link[_].initIndex(t) - } - } - }, $.prototype.normalizePartsOpacityGroup = function(t, i, e) { - for (var r = -1, o = 1, n = 0; n < i.length; n++) { - var s = i[n].partsIndex, - _ = i[n].paramIndex; - if (!(s < 0) && 0 != t.getParamFloat(_)) { - if (r >= 0) break; - r = n, o = t.getPartsOpacity(s), o += e / .5, o > 1 && (o = 1) - } - } - r < 0 && (r = 0, o = 1); - for (var n = 0; n < i.length; n++) { - var s = i[n].partsIndex; - if (!(s < 0)) if (r == n) t.setPartsOpacity(s, o); - else { - var a, h = t.getPartsOpacity(s); - a = o < .5 ? -.5 * o / .5 + 1 : .5 * (1 - o) / .5; - var l = (1 - a) * (1 - o); - l > .15 && (a = 1 - .15 / (1 - o)), h > a && (h = a), t.setPartsOpacity(s, h) - } - } - }, $.prototype.copyOpacityOtherParts = function(t, i) { - for (var e = 0; e < i.length; e++) { - var r = i[e]; - if (null != r.link && !(r.partsIndex < 0)) for (var o = t.getPartsOpacity(r.partsIndex), n = 0; n < r.link.length; n++) { - var s = r.link[n]; - s.partsIndex < 0 || t.setPartsOpacity(s.partsIndex, o) - } - } - }, u.prototype.initIndex = function(t) { - this.paramIndex = t.getParamIndex("VISIBLE:" + this.id), this.partsIndex = t.getPartsDataIndex(PartsDataID.getID(this.id)), t.setParamFloat(this.paramIndex, 1) - }, p.FRAME_RATE = 30, p.prototype.setPoint = function(t, i) { - this.faceTargetX = t, this.faceTargetY = i - }, p.prototype.getX = function() { - return this.faceX - }, p.prototype.getY = function() { - return this.faceY - }, p.prototype.update = function() { - var t = 40 / 7.5 / p.FRAME_RATE; - if (0 == this.lastTimeSec) return void(this.lastTimeSec = UtSystem.getUserTimeMSec()); - var i = UtSystem.getUserTimeMSec(), - e = (i - this.lastTimeSec) * p.FRAME_RATE / 1e3; - this.lastTimeSec = i; - var r = .15 * p.FRAME_RATE, - o = e * t / r, - n = this.faceTargetX - this.faceX, - s = this.faceTargetY - this.faceY; - if (!(Math.abs(n) <= this.EPSILON && Math.abs(s) <= this.EPSILON)) { - var _ = Math.sqrt(n * n + s * s), - a = t * n / _, - h = t * s / _, - l = a - this.faceVX, - $ = h - this.faceVY, - u = Math.sqrt(l * l + $ * $); - (u < -o || u > o) && (l *= o / u, $ *= o / u, u = o), this.faceVX += l, this.faceVY += $; - var f = .5 * (Math.sqrt(o * o + 16 * o * _ - 8 * o * _) - o), - c = Math.sqrt(this.faceVX * this.faceVX + this.faceVY * this.faceVY); - c > f && (this.faceVX *= f / c, this.faceVY *= f / c), this.faceX += this.faceVX, this.faceY += this.faceVY - } - }, f.prototype = new _, f.prototype.getMaxScale = function() { - return this.max - }, f.prototype.getMinScale = function() { - return this.min - }, f.prototype.setMaxScale = function(t) { - this.max = t - }, f.prototype.setMinScale = function(t) { - this.min = t - }, f.prototype.isMaxScale = function() { - return this.getScaleX() == this.max - }, f.prototype.isMinScale = function() { - return this.getScaleX() == this.min - }, f.prototype.adjustTranslate = function(t, i) { - this.tr[0] * this.maxLeft + (this.tr[12] + t) > this.screenLeft && (t = this.screenLeft - this.tr[0] * this.maxLeft - this.tr[12]), this.tr[0] * this.maxRight + (this.tr[12] + t) < this.screenRight && (t = this.screenRight - this.tr[0] * this.maxRight - this.tr[12]), this.tr[5] * this.maxTop + (this.tr[13] + i) < this.screenTop && (i = this.screenTop - this.tr[5] * this.maxTop - this.tr[13]), this.tr[5] * this.maxBottom + (this.tr[13] + i) > this.screenBottom && (i = this.screenBottom - this.tr[5] * this.maxBottom - this.tr[13]); - var e = [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, t, i, 0, 1]; - _.mul(e, this.tr, this.tr) - }, f.prototype.adjustScale = function(t, i, e) { - var r = e * this.tr[0]; - r < this.min ? this.tr[0] > 0 && (e = this.min / this.tr[0]) : r > this.max && this.tr[0] > 0 && (e = this.max / this.tr[0]); - var o = [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, t, i, 0, 1], - n = [e, 0, 0, 0, 0, e, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1], - s = [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, -t, -i, 0, 1]; - _.mul(s, this.tr, this.tr), _.mul(n, this.tr, this.tr), _.mul(o, this.tr, this.tr) - }, f.prototype.setScreenRect = function(t, i, e, r) { - this.screenLeft = t, this.screenRight = i, this.screenTop = r, this.screenBottom = e - }, f.prototype.setMaxScreenRect = function(t, i, e, r) { - this.maxLeft = t, this.maxRight = i, this.maxTop = r, this.maxBottom = e - }, f.prototype.getScreenLeft = function() { - return this.screenLeft - }, f.prototype.getScreenRight = function() { - return this.screenRight - }, f.prototype.getScreenBottom = function() { - return this.screenBottom - }, f.prototype.getScreenTop = function() { - return this.screenTop - }, f.prototype.getMaxLeft = function() { - return this.maxLeft - }, f.prototype.getMaxRight = function() { - return this.maxRight - }, f.prototype.getMaxBottom = function() { - return this.maxBottom - }, f.prototype.getMaxTop = function() { - return this.maxTop - }, c.platformManager = null, c.getPlatformManager = function() { - return c.platformManager - }, c.setPlatformManager = function(t) { - c.platformManager = t - }, t.exports = { - L2DTargetPoint: p, - Live2DFramework: c, - L2DViewMatrix: f, - L2DPose: $, - L2DPartsParam: u, - L2DPhysics: l, - L2DMotionManager: h, - L2DModelMatrix: a, - L2DMatrix44: _, - EYE_STATE: g, - L2DEyeBlink: s, - L2DExpressionParam: n, - L2DExpressionMotion: o, - L2DBaseModel: r - } -}, function(t, i, e) { - "use strict"; - var r = { - DEBUG_LOG: !1, - DEBUG_MOUSE_LOG: !1, - DEBUG_DRAW_HIT_AREA: !1, - DEBUG_DRAW_ALPHA_MODEL: !1, - VIEW_MAX_SCALE: 2, - VIEW_MIN_SCALE: .8, - VIEW_LOGICAL_LEFT: -1, - VIEW_LOGICAL_RIGHT: 1, - VIEW_LOGICAL_MAX_LEFT: -2, - VIEW_LOGICAL_MAX_RIGHT: 2, - VIEW_LOGICAL_MAX_BOTTOM: -2, - VIEW_LOGICAL_MAX_TOP: 2, - PRIORITY_NONE: 0, - PRIORITY_IDLE: 1, - PRIORITY_SLEEPY: 2, - PRIORITY_NORMAL: 3, - PRIORITY_FORCE: 4, - MOTION_GROUP_IDLE: "idle", - MOTION_GROUP_SLEEPY: "sleepy", - MOTION_GROUP_TAP_BODY: "tap_body", - MOTION_GROUP_FLICK_HEAD: "flick_head", - MOTION_GROUP_PINCH_IN: "pinch_in", - MOTION_GROUP_PINCH_OUT: "pinch_out", - MOTION_GROUP_SHAKE: "shake", - HIT_AREA_HEAD: "head", - HIT_AREA_BODY: "body" - }; - t.exports = r -}, function(t, i, e) { - "use strict"; - - function r(t) { - n = t - } - function o() { - return n - } - Object.defineProperty(i, "__esModule", { - value: !0 - }), i.setContext = r, i.getContext = o; - var n = void 0 -}, function(t, i, e) { - "use strict"; - - function r() {} - r.matrixStack = [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1], r.depth = 0, r.currentMatrix = [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1], r.tmp = new Array(16), r.reset = function() { - this.depth = 0 - }, r.loadIdentity = function() { - for (var t = 0; t < 16; t++) this.currentMatrix[t] = t % 5 == 0 ? 1 : 0 - }, r.push = function() { - var t = (this.depth, 16 * (this.depth + 1)); - this.matrixStack.length < t + 16 && (this.matrixStack.length = t + 16); - for (var i = 0; i < 16; i++) this.matrixStack[t + i] = this.currentMatrix[i]; - this.depth++ - }, r.pop = function() { - --this.depth < 0 && (myError("Invalid matrix stack."), this.depth = 0); - for (var t = 16 * this.depth, i = 0; i < 16; i++) this.currentMatrix[i] = this.matrixStack[t + i] - }, r.getMatrix = function() { - return this.currentMatrix - }, r.multMatrix = function(t) { - var i, e, r; - for (i = 0; i < 16; i++) this.tmp[i] = 0; - for (i = 0; i < 4; i++) for (e = 0; e < 4; e++) for (r = 0; r < 4; r++) this.tmp[i + 4 * e] += this.currentMatrix[i + 4 * r] * t[r + 4 * e]; - for (i = 0; i < 16; i++) this.currentMatrix[i] = this.tmp[i] - }, t.exports = r -}, function(t, i, e) { - t.exports = e(5) -}, function(t, i, e) { - "use strict"; - - function r(t) { - return t && t.__esModule ? t : { - default: - t - } - } - function o(t) { - C = document.getElementById(t), C.addEventListener && (window.addEventListener("click", g), window.addEventListener("mousedown", g), window.addEventListener("mousemove", g), window.addEventListener("mouseup", g), document.addEventListener("mouseout", g), window.addEventListener("touchstart", y), window.addEventListener("touchend", y), window.addEventListener("touchmove", y)) - } - function n(t) { - var i = C.width, - e = C.height; - N = new M.L2DTargetPoint; - var r = e / i, - o = w. - default.VIEW_LOGICAL_LEFT, - n = w. - default.VIEW_LOGICAL_RIGHT, - _ = -r, - h = r; - if (window.Live2D.captureFrame = !1, B = new M.L2DViewMatrix, B.setScreenRect(o, n, _, h), B.setMaxScreenRect(w. - default.VIEW_LOGICAL_MAX_LEFT, w. - default.VIEW_LOGICAL_MAX_RIGHT, w. - default.VIEW_LOGICAL_MAX_BOTTOM, w. - default.VIEW_LOGICAL_MAX_TOP), B.setMaxScale(w. - default.VIEW_MAX_SCALE), B.setMinScale(w. - default.VIEW_MIN_SCALE), U = new M.L2DMatrix44, U.multScale(1, i / e), G = new M.L2DMatrix44, G.multTranslate(-i / 2, -e / 2), G.multScale(2 / i, -2 / i), F = v(), (0, D.setContext)(F), !F) return console.error("Failed to create WebGL context."), void(window.WebGLRenderingContext && console.error("Your browser don't support WebGL, check https://get.webgl.org/ for futher information.")); - window.Live2D.setGL(F), F.clearColor(0, 0, 0, 0), a(t), s() - } - function s() { - b || (b = !0, function t() { - _(); - var i = window.requestAnimationFrame || window.mozRequestAnimationFrame || window.webkitRequestAnimationFrame || window.msRequestAnimationFrame; - if (window.Live2D.captureFrame) { - window.Live2D.captureFrame = !1; - var e = document.createElement("a"); - document.body.appendChild(e), e.setAttribute("type", "hidden"), e.href = C.toDataURL(), e.download = window.Live2D.captureName || "live2d.png", e.click() - } - i(t, C) - }()) - } - function _() { - O. - default.reset(), O. - default.loadIdentity(), N.update(), R.setDrag(N.getX(), N.getY()), F.clear(F.COLOR_BUFFER_BIT), O. - default.multMatrix(U.getArray()), O. - default.multMatrix(B.getArray()), O. - default.push(); - for (var t = 0; t < R.numModels(); t++) { - var i = R.getModel(t); - if (null == i) return; - i.initialized && !i.updating && (i.update(), i.draw(F)) - } - O. - default.pop() - } - function a(t) { - R.reloadFlg = !0, R.count++, R.changeModel(F, t) - } - function h(t, i) { - return t.x * i.x + t.y * i.y - } - function l(t, i) { - var e = Math.sqrt(t * t + i * i); - return { - x: t / e, - y: i / e - } - } - function $(t, i, e) { - function r(t, i) { - return 180 * Math.acos(h({ - x: 0, - y: 1 - }, l(t, i))) / Math.PI - } - if (i.x < e.left + e.width && i.y < e.top + e.height && i.x > e.left && i.y > e.top) return i; - var o = t.x - i.x, - n = t.y - i.y, - s = r(o, n); - i.x < t.x && (s = 360 - s); - var _ = 360 - r(e.left - t.x, -1 * (e.top - t.y)), - a = 360 - r(e.left - t.x, -1 * (e.top + e.height - t.y)), - $ = r(e.left + e.width - t.x, -1 * (e.top - t.y)), - u = r(e.left + e.width - t.x, -1 * (e.top + e.height - t.y)), - p = n / o, - f = {}; - if (s < $) { - var c = e.top - t.y, - d = c / p; - f = { - y: t.y + c, - x: t.x + d - } - } else if (s < u) { - var g = e.left + e.width - t.x, - y = g * p; - f = { - y: t.y + y, - x: t.x + g - } - } else if (s < a) { - var m = e.top + e.height - t.y, - T = m / p; - f = { - y: t.y + m, - x: t.x + T - } - } else if (s < _) { - var P = t.x - e.left, - S = P * p; - f = { - y: t.y - S, - x: t.x - P - } - } else { - var v = e.top - t.y, - L = v / p; - f = { - y: t.y + v, - x: t.x + L - } - } - return f - } - function u(t) { - Y = !0; - var i = C.getBoundingClientRect(), - e = P(t.clientX - i.left), - r = S(t.clientY - i.top), - o = $({ - x: i.left + i.width / 2, - y: i.top + i.height * X - }, { - x: t.clientX, - y: t.clientY - }, i), - n = m(o.x - i.left), - s = T(o.y - i.top); - w. - default.DEBUG_MOUSE_LOG && console.log("onMouseMove device( x:" + t.clientX + " y:" + t.clientY + " ) view( x:" + n + " y:" + s + ")"), k = e, V = r, N.setPoint(n, s) - } - function p(t) { - Y = !0; - var i = C.getBoundingClientRect(), - e = P(t.clientX - i.left), - r = S(t.clientY - i.top), - o = $({ - x: i.left + i.width / 2, - y: i.top + i.height * X - }, { - x: t.clientX, - y: t.clientY - }, i), - n = m(o.x - i.left), - s = T(o.y - i.top); - w. - default.DEBUG_MOUSE_LOG && console.log("onMouseDown device( x:" + t.clientX + " y:" + t.clientY + " ) view( x:" + n + " y:" + s + ")"), k = e, V = r, R.tapEvent(n, s) - } - function f(t) { - var i = C.getBoundingClientRect(), - e = P(t.clientX - i.left), - r = S(t.clientY - i.top), - o = $({ - x: i.left + i.width / 2, - y: i.top + i.height * X - }, { - x: t.clientX, - y: t.clientY - }, i), - n = m(o.x - i.left), - s = T(o.y - i.top); - w. - default.DEBUG_MOUSE_LOG && console.log("onMouseMove device( x:" + t.clientX + " y:" + t.clientY + " ) view( x:" + n + " y:" + s + ")"), Y && (k = e, V = r, N.setPoint(n, s)) - } - function c() { - Y && (Y = !1), N.setPoint(0, 0) - } - function d() { - w. - default.DEBUG_LOG && console.log("Set Session Storage."), sessionStorage.setItem("Sleepy", "1") - } - function g(t) { - if ("mousewheel" == t.type); - else if ("mousedown" == t.type) p(t); - else if ("mousemove" == t.type) { - var i = sessionStorage.getItem("Sleepy"); - "1" === i && sessionStorage.setItem("Sleepy", "0"), u(t) - } else if ("mouseup" == t.type) { - if ("button" in t && 0 != t.button) return - } else if ("mouseout" == t.type) { - w. - default.DEBUG_LOG && console.log("Mouse out Window."), c(); - var e = sessionStorage.getItem("SleepyTimer"); - window.clearTimeout(e), e = window.setTimeout(d, 5e4), sessionStorage.setItem("SleepyTimer", e) - } - } - function y(t) { - var i = t.touches[0]; - "touchstart" == t.type ? 1 == t.touches.length && u(i) : "touchmove" == t.type ? f(i) : "touchend" == t.type && c() - } - function m(t) { - var i = G.transformX(t); - return B.invertTransformX(i) - } - function T(t) { - var i = G.transformY(t); - return B.invertTransformY(i) - } - function P(t) { - return G.transformX(t) - } - function S(t) { - return G.transformY(t) - } - function v() { - for (var t = ["webgl", "experimental-webgl", "webkit-3d", "moz-webgl"], i = 0; i < t.length; i++) try { - var e = C.getContext(t[i], { - premultipliedAlpha: !0 - }); - if (e) return e - } catch (t) {} - return null - } - function L(t, i, e) { - X = void 0 === e ? .5 : e, o(t), n(i) - } - e(6); - var M = e(0), - E = e(8), - A = r(E), - I = e(1), - w = r(I), - x = e(3), - O = r(x), - D = e(2), - R = (window.navigator.platform.toLowerCase(), new A. - default), - b = !1, - F = null, - C = null, - N = null, - B = null, - U = null, - G = null, - Y = !1, - k = 0, - V = 0, - X = .5; - window.loadlive2d = L -}, function(t, i, e) { - "use strict"; - (function(t) { - ! - function() { - function i() { - At || (this._$MT = null, this._$5S = null, this._$NP = 0, i._$42++, this._$5S = new Y(this)) - } - function e(t) { - if (!At) { - this.clipContextList = new Array, this.glcontext = t.gl, this.dp_webgl = t, this.curFrameNo = 0, this.firstError_clipInNotUpdate = !0, this.colorBuffer = 0, this.isInitGLFBFunc = !1, this.tmpBoundsOnModel = new S, at.glContext.length > at.frameBuffers.length && (this.curFrameNo = this.getMaskRenderTexture()), this.tmpModelToViewMatrix = new R, this.tmpMatrix2 = new R, this.tmpMatrixForMask = new R, this.tmpMatrixForDraw = new R, this.CHANNEL_COLORS = new Array; - var i = new A; - i = new A, i.r = 0, i.g = 0, i.b = 0, i.a = 1, this.CHANNEL_COLORS.push(i), i = new A, i.r = 1, i.g = 0, i.b = 0, i.a = 0, this.CHANNEL_COLORS.push(i), i = new A, i.r = 0, i.g = 1, i.b = 0, i.a = 0, this.CHANNEL_COLORS.push(i), i = new A, i.r = 0, i.g = 0, i.b = 1, i.a = 0, this.CHANNEL_COLORS.push(i); - for (var e = 0; e < this.CHANNEL_COLORS.length; e++) this.dp_webgl.setChannelFlagAsColor(e, this.CHANNEL_COLORS[e]) - } - } - function r(t, i, e) { - this.clipIDList = new Array, this.clipIDList = e, this.clippingMaskDrawIndexList = new Array; - for (var r = 0; r < e.length; r++) this.clippingMaskDrawIndexList.push(i.getDrawDataIndex(e[r])); - this.clippedDrawContextList = new Array, this.isUsing = !0, this.layoutChannelNo = 0, this.layoutBounds = new S, this.allClippedDrawRect = new S, this.matrixForMask = new Float32Array(16), this.matrixForDraw = new Float32Array(16), this.owner = t - } - function o(t, i) { - this._$gP = t, this.drawDataIndex = i - } - function n() { - At || (this.color = null) - } - function s() { - At || (this._$dP = null, this._$eo = null, this._$V0 = null, this._$dP = 1e3, this._$eo = 1e3, this._$V0 = 1, this._$a0()) - } - function _() {} - function a() { - this._$r = null, this._$0S = null - } - function h() { - At || (this.x = null, this.y = null, this.width = null, this.height = null) - } - function l(t) { - At || et.prototype.constructor.call(this, t) - } - function $() {} - function u(t) { - At || et.prototype.constructor.call(this, t) - } - function p() { - At || (this._$vo = null, this._$F2 = null, this._$ao = 400, this._$1S = 400, p._$42++) - } - function f() { - At || (this.p1 = new c, this.p2 = new c, this._$Fo = 0, this._$Db = 0, this._$L2 = 0, this._$M2 = 0, this._$ks = 0, this._$9b = 0, this._$iP = 0, this._$iT = 0, this._$lL = new Array, this._$qP = new Array, this.setup(.3, .5, .1)) - } - function c() { - this._$p = 1, this.x = 0, this.y = 0, this.vx = 0, this.vy = 0, this.ax = 0, this.ay = 0, this.fx = 0, this.fy = 0, this._$s0 = 0, this._$70 = 0, this._$7L = 0, this._$HL = 0 - } - function d(t, i, e) { - this._$wL = null, this.scale = null, this._$V0 = null, this._$wL = t, this.scale = i, this._$V0 = e - } - function g(t, i, e, r) { - d.prototype.constructor.call(this, i, e, r), this._$tL = null, this._$tL = t - } - function y(t, i, e) { - this._$wL = null, this.scale = null, this._$V0 = null, this._$wL = t, this.scale = i, this._$V0 = e - } - function T(t, i, e, r) { - y.prototype.constructor.call(this, i, e, r), this._$YP = null, this._$YP = t - } - function P() { - At || (this._$fL = 0, this._$gL = 0, this._$B0 = 1, this._$z0 = 1, this._$qT = 0, this.reflectX = !1, this.reflectY = !1) - } - function S() { - At || (this.x = null, this.y = null, this.width = null, this.height = null) - } - function v() {} - function L() { - At || (this.x = null, this.y = null) - } - function M() { - At || (this._$gP = null, this._$dr = null, this._$GS = null, this._$qb = null, this._$Lb = null, this._$mS = null, this.clipID = null, this.clipIDList = new Array) - } - function E() { - At || (this._$Eb = E._$ps, this._$lT = 1, this._$C0 = 1, this._$tT = 1, this._$WL = 1, this.culling = !1, this.matrix4x4 = new Float32Array(16), this.premultipliedAlpha = !1, this.anisotropy = 0, this.clippingProcess = E.CLIPPING_PROCESS_NONE, this.clipBufPre_clipContextMask = null, this.clipBufPre_clipContextDraw = null, this.CHANNEL_COLORS = new Array) - } - function A() { - At || (this.a = 1, this.r = 1, this.g = 1, this.b = 1, this.scale = 1, this._$ho = 1, this.blendMode = at.L2D_COLOR_BLEND_MODE_MULT) - } - function I() { - At || (this._$kP = null, this._$dr = null, this._$Ai = !0, this._$mS = null) - } - function w() {} - function x() { - At || (this._$VP = 0, this._$wL = null, this._$GP = null, this._$8o = x._$ds, this._$2r = -1, this._$O2 = 0, this._$ri = 0) - } - function O() {} - function D() { - At || (this._$Ob = null) - } - function R() { - this.m = new Float32Array(16), this.identity() - } - function b(t) { - At || et.prototype.constructor.call(this, t) - } - function F() { - At || (this._$7 = 1, this._$f = 0, this._$H = 0, this._$g = 1, this._$k = 0, this._$w = 0, this._$hi = STATE_IDENTITY, this._$Z = _$pS) - } - function C() { - At || (s.prototype.constructor.call(this), this.motions = new Array, this._$7r = null, this._$7r = C._$Co++, this._$D0 = 30, this._$yT = 0, this._$E = !0, this.loopFadeIn = !0, this._$AS = -1, _$a0()) - } - function N() { - this._$P = new Float32Array(100), this.size = 0 - } - function B() { - this._$4P = null, this._$I0 = null, this._$RP = null - } - function U() {} - function G() {} - function Y(t) { - At || (this._$QT = !0, this._$co = -1, this._$qo = 0, this._$pb = new Array(Y._$is), this._$_2 = new Float32Array(Y._$is), this._$vr = new Float32Array(Y._$is), this._$Rr = new Float32Array(Y._$is), this._$Or = new Float32Array(Y._$is), this._$fs = new Float32Array(Y._$is), this._$Js = new Array(Y._$is), this._$3S = new Array, this._$aS = new Array, this._$Bo = null, this._$F2 = new Array, this._$db = new Array, this._$8b = new Array, this._$Hr = new Array, this._$Ws = null, this._$Vs = null, this._$Er = null, this._$Es = new Int16Array(U._$Qb), this._$ZP = new Float32Array(2 * U._$1r), this._$Ri = t, this._$b0 = Y._$HP++, this.clipManager = null, this.dp_webgl = null) - } - function k() {} - function V() { - At || (this._$12 = null, this._$bb = null, this._$_L = null, this._$jo = null, this._$iL = null, this._$0L = null, this._$Br = null, this._$Dr = null, this._$Cb = null, this._$mr = null, this._$_L = wt.STATE_FIRST, this._$Br = 4e3, this._$Dr = 100, this._$Cb = 50, this._$mr = 150, this._$jo = !0, this._$iL = "PARAM_EYE_L_OPEN", this._$0L = "PARAM_EYE_R_OPEN") - } - function X() { - At || (E.prototype.constructor.call(this), this._$sb = new Int32Array(X._$As), this._$U2 = new Array, this.transform = null, this.gl = null, null == X._$NT && (X._$NT = X._$9r(256), X._$vS = X._$9r(256), X._$no = X._$vb(256))) - } - function z() { - At || (I.prototype.constructor.call(this), this._$GS = null, this._$Y0 = null) - } - function H(t) { - _t.prototype.constructor.call(this, t), this._$8r = I._$ur, this._$Yr = null, this._$Wr = null - } - function W() { - At || (M.prototype.constructor.call(this), this._$gP = null, this._$dr = null, this._$GS = null, this._$qb = null, this._$Lb = null, this._$mS = null) - } - function j() { - At || (this._$NL = null, this._$3S = null, this._$aS = null, j._$42++) - } - function q() { - At || (i.prototype.constructor.call(this), this._$zo = new X) - } - function J() { - At || (s.prototype.constructor.call(this), this.motions = new Array, this._$o2 = null, this._$7r = J._$Co++, this._$D0 = 30, this._$yT = 0, this._$E = !1, this.loopFadeIn = !0, this._$rr = -1, this._$eP = 0) - } - function Q(t, i) { - return String.fromCharCode(t.getUint8(i)) - } - function N() { - this._$P = new Float32Array(100), this.size = 0 - } - function B() { - this._$4P = null, this._$I0 = null, this._$RP = null - } - function Z() { - At || (I.prototype.constructor.call(this), this._$o = 0, this._$A = 0, this._$GS = null, this._$Eo = null) - } - function K(t) { - _t.prototype.constructor.call(this, t), this._$8r = I._$ur, this._$Cr = null, this._$hr = null - } - function tt() { - At || (this.visible = !0, this._$g0 = !1, this._$NL = null, this._$3S = null, this._$aS = null, tt._$42++) - } - function it(t) { - this._$VS = null, this._$e0 = null, this._$e0 = t - } - function et(t) { - At || (this.id = t) - } - function rt() {} - function ot() { - At || (this._$4S = null) - } - function nt(t, i) { - this.canvas = t, this.context = i, this.viewport = new Array(0, 0, t.width, t.height), this._$6r = 1, this._$xP = 0, this._$3r = 1, this._$uP = 0, this._$Qo = -1, this.cacheImages = {} - } - function st() { - At || (this._$TT = null, this._$LT = null, this._$FS = null, this._$wL = null) - } - function _t(t) { - At || (this._$e0 = null, this._$IP = null, this._$JS = !1, this._$AT = !0, this._$e0 = t, this.totalScale = 1, this._$7s = 1, this.totalOpacity = 1) - } - function at() {} - function ht() {} - function lt(t) { - At || (this._$ib = t) - } - function $t() { - At || (W.prototype.constructor.call(this), this._$LP = -1, this._$d0 = 0, this._$Yo = 0, this._$JP = null, this._$5P = null, this._$BP = null, this._$Eo = null, this._$Qi = null, this._$6s = $t._$ms, this.culling = !0, this.gl_cacheImage = null, this.instanceNo = $t._$42++) - } - function ut(t) { - Mt.prototype.constructor.call(this, t), this._$8r = W._$ur, this._$Cr = null, this._$hr = null - } - function pt() { - At || (this.x = null, this.y = null) - } - function ft(t) { - At || (i.prototype.constructor.call(this), this.drawParamWebGL = new mt(t), this.drawParamWebGL.setGL(at.getGL(t))) - } - function ct() { - At || (this.motions = null, this._$eb = !1, this.motions = new Array) - } - function dt() { - this._$w0 = null, this._$AT = !0, this._$9L = !1, this._$z2 = -1, this._$bs = -1, this._$Do = -1, this._$sr = null, this._$sr = dt._$Gs++ - } - function gt() { - this.m = new Array(1, 0, 0, 0, 1, 0, 0, 0, 1) - } - function yt(t) { - At || et.prototype.constructor.call(this, t) - } - function mt(t) { - At || (E.prototype.constructor.call(this), this.textures = new Array, this.transform = null, this.gl = null, this.glno = t, this.firstDraw = !0, this.anisotropyExt = null, this.maxAnisotropy = 0, this._$As = 32, this._$Gr = !1, this._$NT = null, this._$vS = null, this._$no = null, this.vertShader = null, this.fragShader = null, this.vertShaderOff = null, this.fragShaderOff = null) - } - function Tt(t, i, e) { - return null == i && (i = t.createBuffer()), t.bindBuffer(t.ARRAY_BUFFER, i), t.bufferData(t.ARRAY_BUFFER, e, t.DYNAMIC_DRAW), i - } - function Pt(t, i, e) { - return null == i && (i = t.createBuffer()), t.bindBuffer(t.ELEMENT_ARRAY_BUFFER, i), t.bufferData(t.ELEMENT_ARRAY_BUFFER, e, t.DYNAMIC_DRAW), i - } - function St(t) { - At || (this._$P = new Int8Array(8), this._$R0 = new DataView(this._$P.buffer), this._$3i = new Int8Array(1e3), this._$hL = 0, this._$v0 = 0, this._$S2 = 0, this._$Ko = new Array, this._$T = t, this._$F = 0) - } - function vt() {} - function Lt() {} - function Mt(t) { - At || (this._$e0 = null, this._$IP = null, this._$Us = null, this._$7s = null, this._$IS = [!1], this._$VS = null, this._$AT = !0, this.baseOpacity = 1, this.clipBufPre_clipContext = null, this._$e0 = t) - } - function Et() {} - var At = !0; - i._$0s = 1, i._$4s = 2, i._$42 = 0, i._$62 = function(t, e) { - try { - if (e instanceof ArrayBuffer && (e = new DataView(e)), !(e instanceof DataView)) throw new lt("_$SS#loadModel(b) / b _$x be DataView or ArrayBuffer"); - var r, o = new St(e), - n = o._$ST(), - s = o._$ST(), - a = o._$ST(); - if (109 != n || 111 != s || 99 != a) throw new lt("_$gi _$C _$li , _$Q0 _$P0."); - if (r = o._$ST(), o._$gr(r), r > G._$T7) { - t._$NP |= i._$4s; - throw new lt("_$gi _$C _$li , _$n0 _$_ version _$li ( SDK : " + G._$T7 + " < _$f0 : " + r + " )@_$SS#loadModel()\n") - } - var h = o._$nP(); - if (r >= G._$s7) { - var l = o._$9T(), - $ = o._$9T(); - if (-30584 != l || -30584 != $) throw t._$NP |= i._$0s, new lt("_$gi _$C _$li , _$0 _$6 _$Ui.") - } - t._$KS(h); - var u = t.getModelContext(); - u.setDrawParam(t.getDrawParam()), u.init() - } catch (t) { - _._$Rb(t) - } - }, i.prototype._$KS = function(t) { - this._$MT = t - }, i.prototype.getModelImpl = function() { - return null == this._$MT && (this._$MT = new p, this._$MT._$zP()), this._$MT - }, i.prototype.getCanvasWidth = function() { - return null == this._$MT ? 0 : this._$MT.getCanvasWidth() - }, i.prototype.getCanvasHeight = function() { - return null == this._$MT ? 0 : this._$MT.getCanvasHeight() - }, i.prototype.getParamFloat = function(t) { - return "number" != typeof t && (t = this._$5S.getParamIndex(u.getID(t))), this._$5S.getParamFloat(t) - }, i.prototype.setParamFloat = function(t, i, e) { - "number" != typeof t && (t = this._$5S.getParamIndex(u.getID(t))), arguments.length < 3 && (e = 1), this._$5S.setParamFloat(t, this._$5S.getParamFloat(t) * (1 - e) + i * e) - }, i.prototype.addToParamFloat = function(t, i, e) { - "number" != typeof t && (t = this._$5S.getParamIndex(u.getID(t))), arguments.length < 3 && (e = 1), this._$5S.setParamFloat(t, this._$5S.getParamFloat(t) + i * e) - }, i.prototype.multParamFloat = function(t, i, e) { - "number" != typeof t && (t = this._$5S.getParamIndex(u.getID(t))), arguments.length < 3 && (e = 1), this._$5S.setParamFloat(t, this._$5S.getParamFloat(t) * (1 + (i - 1) * e)) - }, i.prototype.getParamIndex = function(t) { - return this._$5S.getParamIndex(u.getID(t)) - }, i.prototype.loadParam = function() { - this._$5S.loadParam() - }, i.prototype.saveParam = function() { - this._$5S.saveParam() - }, i.prototype.init = function() { - this._$5S.init() - }, i.prototype.update = function() { - this._$5S.update() - }, i.prototype._$Rs = function() { - return _._$li("_$60 _$PT _$Rs()"), -1 - }, i.prototype._$Ds = function(t) { - _._$li("_$60 _$PT _$SS#_$Ds() \n") - }, i.prototype._$K2 = function() {}, i.prototype.draw = function() {}, i.prototype.getModelContext = function() { - return this._$5S - }, i.prototype._$s2 = function() { - return this._$NP - }, i.prototype._$P7 = function(t, i, e, r) { - var o = -1, - n = 0, - s = this; - if (0 != e) if (1 == t.length) { - var _ = t[0], - a = 0 != s.getParamFloat(_), - h = i[0], - l = s.getPartsOpacity(h), - $ = e / r; - a ? (l += $) > 1 && (l = 1) : (l -= $) < 0 && (l = 0), s.setPartsOpacity(h, l) - } else { - for (var u = 0; u < t.length; u++) { - var _ = t[u], - p = 0 != s.getParamFloat(_); - if (p) { - if (o >= 0) break; - o = u; - var h = i[u]; - n = s.getPartsOpacity(h), n += e / r, n > 1 && (n = 1) - } - } - o < 0 && (console.log("No _$wi _$q0/ _$U default[%s]", t[0]), o = 0, n = 1, s.loadParam(), s.setParamFloat(t[o], n), s.saveParam()); - for (var u = 0; u < t.length; u++) { - var h = i[u]; - if (o == u) s.setPartsOpacity(h, n); - else { - var f, c = s.getPartsOpacity(h); - f = n < .5 ? -.5 * n / .5 + 1 : .5 * (1 - n) / .5; - var d = (1 - f) * (1 - n); - d > .15 && (f = 1 - .15 / (1 - n)), c > f && (c = f), s.setPartsOpacity(h, c) - } - } - } else for (var u = 0; u < t.length; u++) { - var _ = t[u], - h = i[u], - p = 0 != s.getParamFloat(_); - s.setPartsOpacity(h, p ? 1 : 0) - } - }, i.prototype.setPartsOpacity = function(t, i) { - "number" != typeof t && (t = this._$5S.getPartsDataIndex(l.getID(t))), this._$5S.setPartsOpacity(t, i) - }, i.prototype.getPartsDataIndex = function(t) { - return t instanceof l || (t = l.getID(t)), this._$5S.getPartsDataIndex(t) - }, i.prototype.getPartsOpacity = function(t) { - return "number" != typeof t && (t = this._$5S.getPartsDataIndex(l.getID(t))), t < 0 ? 0 : this._$5S.getPartsOpacity(t) - }, i.prototype.getDrawParam = function() {}, i.prototype.getDrawDataIndex = function(t) { - return this._$5S.getDrawDataIndex(b.getID(t)) - }, i.prototype.getDrawData = function(t) { - return this._$5S.getDrawData(t) - }, i.prototype.getTransformedPoints = function(t) { - var i = this._$5S._$C2(t); - return i instanceof ut ? i.getTransformedPoints() : null - }, i.prototype.getIndexArray = function(t) { - if (t < 0 || t >= this._$5S._$aS.length) return null; - var i = this._$5S._$aS[t]; - return null != i && i.getType() == W._$wb && i instanceof $t ? i.getIndexArray() : null - }, e.CHANNEL_COUNT = 4, e.RENDER_TEXTURE_USE_MIPMAP = !1, e.NOT_USED_FRAME = -100, e.prototype._$L7 = function() { - if (this.tmpModelToViewMatrix && (this.tmpModelToViewMatrix = null), this.tmpMatrix2 && (this.tmpMatrix2 = null), this.tmpMatrixForMask && (this.tmpMatrixForMask = null), this.tmpMatrixForDraw && (this.tmpMatrixForDraw = null), this.tmpBoundsOnModel && (this.tmpBoundsOnModel = null), this.CHANNEL_COLORS) { - for (var t = this.CHANNEL_COLORS.length - 1; t >= 0; --t) this.CHANNEL_COLORS.splice(t, 1); - this.CHANNEL_COLORS = [] - } - this.releaseShader() - }, e.prototype.releaseShader = function() { - for (var t = at.frameBuffers.length, i = 0; i < t; i++) this.gl.deleteFramebuffer(at.frameBuffers[i].framebuffer); - at.frameBuffers = [], at.glContext = [] - }, e.prototype.init = function(t, i, e) { - for (var o = 0; o < i.length; o++) { - var n = i[o].getClipIDList(); - if (null != n) { - var s = this.findSameClip(n); - null == s && (s = new r(this, t, n), this.clipContextList.push(s)); - var _ = i[o].getDrawDataID(), - a = t.getDrawDataIndex(_); - s.addClippedDrawData(_, a); - e[o].clipBufPre_clipContext = s - } - } - }, e.prototype.getMaskRenderTexture = function() { - var t = null; - return t = this.dp_webgl.createFramebuffer(), at.frameBuffers[this.dp_webgl.glno] = t, this.dp_webgl.glno - }, e.prototype.setupClip = function(t, i) { - for (var e = 0, r = 0; r < this.clipContextList.length; r++) { - var o = this.clipContextList[r]; - this.calcClippedDrawTotalBounds(t, o), o.isUsing && e++ - } - if (e > 0) { - var n = i.gl.getParameter(i.gl.FRAMEBUFFER_BINDING), - s = new Array(4); - s[0] = 0, s[1] = 0, s[2] = i.gl.canvas.width, s[3] = i.gl.canvas.height, i.gl.viewport(0, 0, at.clippingMaskBufferSize, at.clippingMaskBufferSize), this.setupLayoutBounds(e), i.gl.bindFramebuffer(i.gl.FRAMEBUFFER, at.frameBuffers[this.curFrameNo].framebuffer), i.gl.clearColor(0, 0, 0, 0), i.gl.clear(i.gl.COLOR_BUFFER_BIT); - for (var r = 0; r < this.clipContextList.length; r++) { - var o = this.clipContextList[r], - _ = o.allClippedDrawRect, - a = (o.layoutChannelNo, o.layoutBounds); - this.tmpBoundsOnModel._$jL(_), this.tmpBoundsOnModel.expand(.05 * _.width, .05 * _.height); - var h = a.width / this.tmpBoundsOnModel.width, - l = a.height / this.tmpBoundsOnModel.height; - this.tmpMatrix2.identity(), this.tmpMatrix2.translate(-1, -1, 0), this.tmpMatrix2.scale(2, 2, 1), this.tmpMatrix2.translate(a.x, a.y, 0), this.tmpMatrix2.scale(h, l, 1), this.tmpMatrix2.translate(-this.tmpBoundsOnModel.x, -this.tmpBoundsOnModel.y, 0), this.tmpMatrixForMask.setMatrix(this.tmpMatrix2.m), this.tmpMatrix2.identity(), this.tmpMatrix2.translate(a.x, a.y, 0), this.tmpMatrix2.scale(h, l, 1), this.tmpMatrix2.translate(-this.tmpBoundsOnModel.x, -this.tmpBoundsOnModel.y, 0), this.tmpMatrixForDraw.setMatrix(this.tmpMatrix2.m); - for (var $ = this.tmpMatrixForMask.getArray(), u = 0; u < 16; u++) o.matrixForMask[u] = $[u]; - for (var p = this.tmpMatrixForDraw.getArray(), u = 0; u < 16; u++) o.matrixForDraw[u] = p[u]; - for (var f = o.clippingMaskDrawIndexList.length, c = 0; c < f; c++) { - var d = o.clippingMaskDrawIndexList[c], - g = t.getDrawData(d), - y = t._$C2(d); - i.setClipBufPre_clipContextForMask(o), g.draw(i, t, y) - } - } - i.gl.bindFramebuffer(i.gl.FRAMEBUFFER, n), i.setClipBufPre_clipContextForMask(null), i.gl.viewport(s[0], s[1], s[2], s[3]) - } - }, e.prototype.getColorBuffer = function() { - return this.colorBuffer - }, e.prototype.findSameClip = function(t) { - for (var i = 0; i < this.clipContextList.length; i++) { - var e = this.clipContextList[i], - r = e.clipIDList.length; - if (r == t.length) { - for (var o = 0, n = 0; n < r; n++) for (var s = e.clipIDList[n], _ = 0; _ < r; _++) if (t[_] == s) { - o++; - break - } - if (o == r) return e - } - } - return null - }, e.prototype.calcClippedDrawTotalBounds = function(t, i) { - for (var e = t._$Ri.getModelImpl().getCanvasWidth(), r = t._$Ri.getModelImpl().getCanvasHeight(), o = e > r ? e : r, n = o, s = o, _ = 0, a = 0, h = i.clippedDrawContextList.length, l = 0; l < h; l++) { - var $ = i.clippedDrawContextList[l], - u = $.drawDataIndex, - p = t._$C2(u); - if (p._$yo()) { - for (var f = p.getTransformedPoints(), c = f.length, d = [], g = [], y = 0, m = U._$i2; m < c; m += U._$No) d[y] = f[m], g[y] = f[m + 1], y++; - var T = Math.min.apply(null, d), - P = Math.min.apply(null, g), - S = Math.max.apply(null, d), - v = Math.max.apply(null, g); - T < n && (n = T), P < s && (s = P), S > _ && (_ = S), v > a && (a = v) - } - } - if (n == o) i.allClippedDrawRect.x = 0, i.allClippedDrawRect.y = 0, i.allClippedDrawRect.width = 0, i.allClippedDrawRect.height = 0, i.isUsing = !1; - else { - var L = _ - n, - M = a - s; - i.allClippedDrawRect.x = n, i.allClippedDrawRect.y = s, i.allClippedDrawRect.width = L, i.allClippedDrawRect.height = M, i.isUsing = !0 - } - }, e.prototype.setupLayoutBounds = function(t) { - var i = t / e.CHANNEL_COUNT, - r = t % e.CHANNEL_COUNT; - i = ~~i, r = ~~r; - for (var o = 0, n = 0; n < e.CHANNEL_COUNT; n++) { - var s = i + (n < r ? 1 : 0); - if (0 == s); - else if (1 == s) { - var a = this.clipContextList[o++]; - a.layoutChannelNo = n, a.layoutBounds.x = 0, a.layoutBounds.y = 0, a.layoutBounds.width = 1, a.layoutBounds.height = 1 - } else if (2 == s) for (var h = 0; h < s; h++) { - var l = h % 2, - $ = 0; - l = ~~l; - var a = this.clipContextList[o++]; - a.layoutChannelNo = n, a.layoutBounds.x = .5 * l, a.layoutBounds.y = 0, a.layoutBounds.width = .5, a.layoutBounds.height = 1 - } else if (s <= 4) for (var h = 0; h < s; h++) { - var l = h % 2, - $ = h / 2; - l = ~~l, $ = ~~$; - var a = this.clipContextList[o++]; - a.layoutChannelNo = n, a.layoutBounds.x = .5 * l, a.layoutBounds.y = .5 * $, a.layoutBounds.width = .5, a.layoutBounds.height = .5 - } else if (s <= 9) for (var h = 0; h < s; h++) { - var l = h % 3, - $ = h / 3; - l = ~~l, $ = ~~$; - var a = this.clipContextList[o++]; - a.layoutChannelNo = n, a.layoutBounds.x = l / 3, a.layoutBounds.y = $ / 3, a.layoutBounds.width = 1 / 3, a.layoutBounds.height = 1 / 3 - } else _._$li("_$6 _$0P mask count : %d", s) - } - }, r.prototype.addClippedDrawData = function(t, i) { - var e = new o(t, i); - this.clippedDrawContextList.push(e) - }, s._$JT = function(t, i, e) { - var r = t / i, - o = e / i, - n = o, - s = 1 - (1 - o) * (1 - o), - _ = 1 - (1 - n) * (1 - n), - a = 1 / 3 * (1 - o) * s + (n * (2 / 3) + 1 / 3 * (1 - n)) * (1 - s), - h = (n + 2 / 3 * (1 - n)) * _ + (o * (1 / 3) + 2 / 3 * (1 - o)) * (1 - _), - l = 1 - 3 * h + 3 * a - 0, - $ = 3 * h - 6 * a + 0, - u = 3 * a - 0; - if (r <= 0) return 0; - if (r >= 1) return 1; - var p = r, - f = p * p; - return l * (p * f) + $ * f + u * p + 0 - }, s.prototype._$a0 = function() {}, s.prototype.setFadeIn = function(t) { - this._$dP = t - }, s.prototype.setFadeOut = function(t) { - this._$eo = t - }, s.prototype._$pT = function(t) { - this._$V0 = t - }, s.prototype.getFadeOut = function() { - return this._$eo - }, s.prototype._$4T = function() { - return this._$eo - }, s.prototype._$mT = function() { - return this._$V0 - }, s.prototype.getDurationMSec = function() { - return -1 - }, s.prototype.getLoopDurationMSec = function() { - return -1 - }, s.prototype.updateParam = function(t, i) { - if (i._$AT && !i._$9L) { - var e = w.getUserTimeMSec(); - if (i._$z2 < 0) { - i._$z2 = e, i._$bs = e; - var r = this.getDurationMSec(); - i._$Do < 0 && (i._$Do = r <= 0 ? -1 : i._$z2 + r) - } - var o = this._$V0; - o = o * (0 == this._$dP ? 1 : ht._$r2((e - i._$bs) / this._$dP)) * (0 == this._$eo || i._$Do < 0 ? 1 : ht._$r2((i._$Do - e) / this._$eo)), 0 <= o && o <= 1 || console.log("### assert!! ### "), this.updateParamExe(t, e, o, i), i._$Do > 0 && i._$Do < e && (i._$9L = !0) - } - }, s.prototype.updateParamExe = function(t, i, e, r) {}, _._$8s = 0, _._$fT = new Object, _.start = function(t) { - var i = _._$fT[t]; - null == i && (i = new a, i._$r = t, _._$fT[t] = i), i._$0S = w.getSystemTimeMSec() - }, _.dump = function(t) { - var i = _._$fT[t]; - if (null != i) { - var e = w.getSystemTimeMSec(), - r = e - i._$0S; - return console.log(t + " : " + r + "ms"), r - } - return -1 - }, _.end = function(t) { - var i = _._$fT[t]; - if (null != i) { - return w.getSystemTimeMSec() - i._$0S - } - return -1 - }, _._$li = function(t, i) { - console.log("_$li : " + t + "\n", i) - }, _._$Ji = function(t, i) { - console.log(t, i) - }, _._$dL = function(t, i) { - console.log(t, i), console.log("\n") - }, _._$KL = function(t, i) { - for (var e = 0; e < i; e++) e % 16 == 0 && e > 0 ? console.log("\n") : e % 8 == 0 && e > 0 && console.log(" "), console.log("%02X ", 255 & t[e]); - console.log("\n") - }, _._$nr = function(t, i, e) { - console.log("%s\n", t); - for (var r = i.length, o = 0; o < r; ++o) console.log("%5d", i[o]), console.log("%s\n", e), console.log(","); - console.log("\n") - }, _._$Rb = function(t) { - console.log("dump exception : " + t), console.log("stack :: " + t.stack) - }, h.prototype._$8P = function() { - return .5 * (this.x + this.x + this.width) - }, h.prototype._$6P = function() { - return .5 * (this.y + this.y + this.height) - }, h.prototype._$EL = function() { - return this.x + this.width - }, h.prototype._$5T = function() { - return this.y + this.height - }, h.prototype._$jL = function(t, i, e, r) { - this.x = t, this.y = i, this.width = e, this.height = r - }, h.prototype._$jL = function(t) { - this.x = t.x, this.y = t.y, this.width = t.width, this.height = t.height - }, l.prototype = new et, l._$tP = new Object, l._$27 = function() { - l._$tP.clear() - }, l.getID = function(t) { - var i = l._$tP[t]; - return null == i && (i = new l(t), l._$tP[t] = i), i - }, l.prototype._$3s = function() { - return new l - }, u.prototype = new et, u._$tP = new Object, u._$27 = function() { - u._$tP.clear() - }, u.getID = function(t) { - var i = u._$tP[t]; - return null == i && (i = new u(t), u._$tP[t] = i), i - }, u.prototype._$3s = function() { - return new u - }, p._$42 = 0, p.prototype._$zP = function() { - null == this._$vo && (this._$vo = new ot), null == this._$F2 && (this._$F2 = new Array) - }, p.prototype.getCanvasWidth = function() { - return this._$ao - }, p.prototype.getCanvasHeight = function() { - return this._$1S - }, p.prototype._$F0 = function(t) { - this._$vo = t._$nP(), this._$F2 = t._$nP(), this._$ao = t._$6L(), this._$1S = t._$6L() - }, p.prototype._$6S = function(t) { - this._$F2.push(t) - }, p.prototype._$Xr = function() { - return this._$F2 - }, p.prototype._$E2 = function() { - return this._$vo - }, f.prototype.setup = function(t, i, e) { - this._$ks = this._$Yb(), this.p2._$xT(), 3 == arguments.length && (this._$Fo = t, this._$L2 = i, this.p1._$p = e, this.p2._$p = e, this.p2.y = t, this.setup()) - }, f.prototype.getPhysicsPoint1 = function() { - return this.p1 - }, f.prototype.getPhysicsPoint2 = function() { - return this.p2 - }, f.prototype._$qr = function() { - return this._$Db - }, f.prototype._$pr = function(t) { - this._$Db = t - }, f.prototype._$5r = function() { - return this._$M2 - }, f.prototype._$Cs = function() { - return this._$9b - }, f.prototype._$Yb = function() { - return -180 * Math.atan2(this.p1.x - this.p2.x, -(this.p1.y - this.p2.y)) / Math.PI - }, f.prototype.addSrcParam = function(t, i, e, r) { - var o = new g(t, i, e, r); - this._$lL.push(o) - }, f.prototype.addTargetParam = function(t, i, e, r) { - var o = new T(t, i, e, r); - this._$qP.push(o) - }, f.prototype.update = function(t, i) { - if (0 == this._$iP) return this._$iP = this._$iT = i, void(this._$Fo = Math.sqrt((this.p1.x - this.p2.x) * (this.p1.x - this.p2.x) + (this.p1.y - this.p2.y) * (this.p1.y - this.p2.y))); - var e = (i - this._$iT) / 1e3; - if (0 != e) { - for (var r = this._$lL.length - 1; r >= 0; --r) { - this._$lL[r]._$oP(t, this) - } - this._$oo(t, e), this._$M2 = this._$Yb(), this._$9b = (this._$M2 - this._$ks) / e, this._$ks = this._$M2 - } - for (var r = this._$qP.length - 1; r >= 0; --r) { - this._$qP[r]._$YS(t, this) - } - this._$iT = i - }, f.prototype._$oo = function(t, i) { - i < .033 && (i = .033); - var e = 1 / i; - this.p1.vx = (this.p1.x - this.p1._$s0) * e, this.p1.vy = (this.p1.y - this.p1._$70) * e, this.p1.ax = (this.p1.vx - this.p1._$7L) * e, this.p1.ay = (this.p1.vy - this.p1._$HL) * e, this.p1.fx = this.p1.ax * this.p1._$p, this.p1.fy = this.p1.ay * this.p1._$p, this.p1._$xT(); - var r, o, n = -Math.atan2(this.p1.y - this.p2.y, this.p1.x - this.p2.x), - s = Math.cos(n), - _ = Math.sin(n), - a = 9.8 * this.p2._$p, - h = this._$Db * Lt._$bS, - l = a * Math.cos(n - h); - r = l * _, o = l * s; - var $ = -this.p1.fx * _ * _, - u = -this.p1.fy * _ * s, - p = -this.p2.vx * this._$L2, - f = -this.p2.vy * this._$L2; - this.p2.fx = r + $ + p, this.p2.fy = o + u + f, this.p2.ax = this.p2.fx / this.p2._$p, this.p2.ay = this.p2.fy / this.p2._$p, this.p2.vx += this.p2.ax * i, this.p2.vy += this.p2.ay * i, this.p2.x += this.p2.vx * i, this.p2.y += this.p2.vy * i; - var c = Math.sqrt((this.p1.x - this.p2.x) * (this.p1.x - this.p2.x) + (this.p1.y - this.p2.y) * (this.p1.y - this.p2.y)); - this.p2.x = this.p1.x + this._$Fo * (this.p2.x - this.p1.x) / c, this.p2.y = this.p1.y + this._$Fo * (this.p2.y - this.p1.y) / c, this.p2.vx = (this.p2.x - this.p2._$s0) * e, this.p2.vy = (this.p2.y - this.p2._$70) * e, this.p2._$xT() - }, c.prototype._$xT = function() { - this._$s0 = this.x, this._$70 = this.y, this._$7L = this.vx, this._$HL = this.vy - }, d.prototype._$oP = function(t, i) {}, g.prototype = new d, g.prototype._$oP = function(t, i) { - var e = this.scale * t.getParamFloat(this._$wL), - r = i.getPhysicsPoint1(); - switch (this._$tL) { - default: - case f.Src.SRC_TO_X: - r.x = r.x + (e - r.x) * this._$V0; - break; - case f.Src.SRC_TO_Y: - r.y = r.y + (e - r.y) * this._$V0; - break; - case f.Src.SRC_TO_G_ANGLE: - var o = i._$qr(); - o += (e - o) * this._$V0, i._$pr(o) - } - }, y.prototype._$YS = function(t, i) {}, T.prototype = new y, T.prototype._$YS = function(t, i) { - switch (this._$YP) { - default: - case f.Target.TARGET_FROM_ANGLE: - t.setParamFloat(this._$wL, this.scale * i._$5r(), this._$V0); - break; - case f.Target.TARGET_FROM_ANGLE_V: - t.setParamFloat(this._$wL, this.scale * i._$Cs(), this._$V0) - } - }, f.Src = function() {}, f.Src.SRC_TO_X = "SRC_TO_X", f.Src.SRC_TO_Y = "SRC_TO_Y", f.Src.SRC_TO_G_ANGLE = "SRC_TO_G_ANGLE", f.Target = function() {}, f.Target.TARGET_FROM_ANGLE = "TARGET_FROM_ANGLE", f.Target.TARGET_FROM_ANGLE_V = "TARGET_FROM_ANGLE_V", P.prototype.init = function(t) { - this._$fL = t._$fL, this._$gL = t._$gL, this._$B0 = t._$B0, this._$z0 = t._$z0, this._$qT = t._$qT, this.reflectX = t.reflectX, this.reflectY = t.reflectY - }, P.prototype._$F0 = function(t) { - this._$fL = t._$_T(), this._$gL = t._$_T(), this._$B0 = t._$_T(), this._$z0 = t._$_T(), this._$qT = t._$_T(), t.getFormatVersion() >= G.LIVE2D_FORMAT_VERSION_V2_10_SDK2 && (this.reflectX = t._$po(), this.reflectY = t._$po()) - }, P.prototype._$e = function() {}; - var It = function() {}; - It._$ni = function(t, i, e, r, o, n, s, _, a) { - var h = s * n - _ * o; - if (0 == h) return null; - var l, $ = ((t - e) * n - (i - r) * o) / h; - return l = 0 != o ? (t - e - $ * s) / o : (i - r - $ * _) / n, isNaN(l) && (l = (t - e - $ * s) / o, isNaN(l) && (l = (i - r - $ * _) / n), isNaN(l) && (console.log("a is NaN @UtVector#_$ni() "), console.log("v1x : " + o), console.log("v1x != 0 ? " + (0 != o)))), null == a ? new Array(l, $) : (a[0] = l, a[1] = $, a) - }, S.prototype._$8P = function() { - return this.x + .5 * this.width - }, S.prototype._$6P = function() { - return this.y + .5 * this.height - }, S.prototype._$EL = function() { - return this.x + this.width - }, S.prototype._$5T = function() { - return this.y + this.height - }, S.prototype._$jL = function(t, i, e, r) { - this.x = t, this.y = i, this.width = e, this.height = r - }, S.prototype._$jL = function(t) { - this.x = t.x, this.y = t.y, this.width = t.width, this.height = t.height - }, S.prototype.contains = function(t, i) { - return this.x <= this.x && this.y <= this.y && this.x <= this.x + this.width && this.y <= this.y + this.height - }, S.prototype.expand = function(t, i) { - this.x -= t, this.y -= i, this.width += 2 * t, this.height += 2 * i - }, v._$Z2 = function(t, i, e, r) { - var o = i._$Q2(t, e), - n = t._$vs(), - s = t._$Tr(); - if (i._$zr(n, s, o), o <= 0) return r[n[0]]; - if (1 == o) { - var _ = r[n[0]], - a = r[n[1]], - h = s[0]; - return _ + (a - _) * h | 0 - } - if (2 == o) { - var _ = r[n[0]], - a = r[n[1]], - l = r[n[2]], - $ = r[n[3]], - h = s[0], - u = s[1], - p = _ + (a - _) * h | 0, - f = l + ($ - l) * h | 0; - return p + (f - p) * u | 0 - } - if (3 == o) { - var c = r[n[0]], - d = r[n[1]], - g = r[n[2]], - y = r[n[3]], - m = r[n[4]], - T = r[n[5]], - P = r[n[6]], - S = r[n[7]], - h = s[0], - u = s[1], - v = s[2], - _ = c + (d - c) * h | 0, - a = g + (y - g) * h | 0, - l = m + (T - m) * h | 0, - $ = P + (S - P) * h | 0, - p = _ + (a - _) * u | 0, - f = l + ($ - l) * u | 0; - return p + (f - p) * v | 0 - } - if (4 == o) { - var L = r[n[0]], - M = r[n[1]], - E = r[n[2]], - A = r[n[3]], - I = r[n[4]], - w = r[n[5]], - x = r[n[6]], - O = r[n[7]], - D = r[n[8]], - R = r[n[9]], - b = r[n[10]], - F = r[n[11]], - C = r[n[12]], - N = r[n[13]], - B = r[n[14]], - U = r[n[15]], - h = s[0], - u = s[1], - v = s[2], - G = s[3], - c = L + (M - L) * h | 0, - d = E + (A - E) * h | 0, - g = I + (w - I) * h | 0, - y = x + (O - x) * h | 0, - m = D + (R - D) * h | 0, - T = b + (F - b) * h | 0, - P = C + (N - C) * h | 0, - S = B + (U - B) * h | 0, - _ = c + (d - c) * u | 0, - a = g + (y - g) * u | 0, - l = m + (T - m) * u | 0, - $ = P + (S - P) * u | 0, - p = _ + (a - _) * v | 0, - f = l + ($ - l) * v | 0; - return p + (f - p) * G | 0 - } - for (var Y = 1 << o, k = new Float32Array(Y), V = 0; V < Y; V++) { - for (var X = V, z = 1, H = 0; H < o; H++) z *= X % 2 == 0 ? 1 - s[H] : s[H], X /= 2; - k[V] = z - } - for (var W = new Float32Array(Y), j = 0; j < Y; j++) W[j] = r[n[j]]; - for (var q = 0, j = 0; j < Y; j++) q += k[j] * W[j]; - return q + .5 | 0 - }, v._$br = function(t, i, e, r) { - var o = i._$Q2(t, e), - n = t._$vs(), - s = t._$Tr(); - if (i._$zr(n, s, o), o <= 0) return r[n[0]]; - if (1 == o) { - var _ = r[n[0]], - a = r[n[1]], - h = s[0]; - return _ + (a - _) * h - } - if (2 == o) { - var _ = r[n[0]], - a = r[n[1]], - l = r[n[2]], - $ = r[n[3]], - h = s[0], - u = s[1]; - return (1 - u) * (_ + (a - _) * h) + u * (l + ($ - l) * h) - } - if (3 == o) { - var p = r[n[0]], - f = r[n[1]], - c = r[n[2]], - d = r[n[3]], - g = r[n[4]], - y = r[n[5]], - m = r[n[6]], - T = r[n[7]], - h = s[0], - u = s[1], - P = s[2]; - return (1 - P) * ((1 - u) * (p + (f - p) * h) + u * (c + (d - c) * h)) + P * ((1 - u) * (g + (y - g) * h) + u * (m + (T - m) * h)) - } - if (4 == o) { - var S = r[n[0]], - v = r[n[1]], - L = r[n[2]], - M = r[n[3]], - E = r[n[4]], - A = r[n[5]], - I = r[n[6]], - w = r[n[7]], - x = r[n[8]], - O = r[n[9]], - D = r[n[10]], - R = r[n[11]], - b = r[n[12]], - F = r[n[13]], - C = r[n[14]], - N = r[n[15]], - h = s[0], - u = s[1], - P = s[2], - B = s[3]; - return (1 - B) * ((1 - P) * ((1 - u) * (S + (v - S) * h) + u * (L + (M - L) * h)) + P * ((1 - u) * (E + (A - E) * h) + u * (I + (w - I) * h))) + B * ((1 - P) * ((1 - u) * (x + (O - x) * h) + u * (D + (R - D) * h)) + P * ((1 - u) * (b + (F - b) * h) + u * (C + (N - C) * h))) - } - for (var U = 1 << o, G = new Float32Array(U), Y = 0; Y < U; Y++) { - for (var k = Y, V = 1, X = 0; X < o; X++) V *= k % 2 == 0 ? 1 - s[X] : s[X], k /= 2; - G[Y] = V - } - for (var z = new Float32Array(U), H = 0; H < U; H++) z[H] = r[n[H]]; - for (var W = 0, H = 0; H < U; H++) W += G[H] * z[H]; - return W - }, v._$Vr = function(t, i, e, r, o, n, s, _) { - var a = i._$Q2(t, e), - h = t._$vs(), - l = t._$Tr(); - i._$zr(h, l, a); - var $ = 2 * r, - u = s; - if (a <= 0) { - var p = h[0], - f = o[p]; - if (2 == _ && 0 == s) w._$jT(f, 0, n, 0, $); - else for (var c = 0; c < $;) n[u] = f[c++], n[u + 1] = f[c++], u += _ - } else if (1 == a) for (var f = o[h[0]], d = o[h[1]], g = l[0], y = 1 - g, c = 0; c < $;) n[u] = f[c] * y + d[c] * g, ++c, n[u + 1] = f[c] * y + d[c] * g, ++c, u += _; - else if (2 == a) for (var f = o[h[0]], d = o[h[1]], m = o[h[2]], T = o[h[3]], g = l[0], P = l[1], y = 1 - g, S = 1 - P, v = S * y, L = S * g, M = P * y, E = P * g, c = 0; c < $;) n[u] = v * f[c] + L * d[c] + M * m[c] + E * T[c], ++c, n[u + 1] = v * f[c] + L * d[c] + M * m[c] + E * T[c], ++c, u += _; - else if (3 == a) for (var A = o[h[0]], I = o[h[1]], x = o[h[2]], O = o[h[3]], D = o[h[4]], R = o[h[5]], b = o[h[6]], F = o[h[7]], g = l[0], P = l[1], C = l[2], y = 1 - g, S = 1 - P, N = 1 - C, B = N * S * y, U = N * S * g, G = N * P * y, Y = N * P * g, k = C * S * y, V = C * S * g, X = C * P * y, z = C * P * g, c = 0; c < $;) n[u] = B * A[c] + U * I[c] + G * x[c] + Y * O[c] + k * D[c] + V * R[c] + X * b[c] + z * F[c], ++c, n[u + 1] = B * A[c] + U * I[c] + G * x[c] + Y * O[c] + k * D[c] + V * R[c] + X * b[c] + z * F[c], ++c, u += _; - else if (4 == a) for (var H = o[h[0]], W = o[h[1]], j = o[h[2]], q = o[h[3]], J = o[h[4]], Q = o[h[5]], Z = o[h[6]], K = o[h[7]], tt = o[h[8]], it = o[h[9]], et = o[h[10]], rt = o[h[11]], ot = o[h[12]], nt = o[h[13]], st = o[h[14]], _t = o[h[15]], g = l[0], P = l[1], C = l[2], at = l[3], y = 1 - g, S = 1 - P, N = 1 - C, ht = 1 - at, lt = ht * N * S * y, $t = ht * N * S * g, ut = ht * N * P * y, pt = ht * N * P * g, ft = ht * C * S * y, ct = ht * C * S * g, dt = ht * C * P * y, gt = ht * C * P * g, yt = at * N * S * y, mt = at * N * S * g, Tt = at * N * P * y, Pt = at * N * P * g, St = at * C * S * y, vt = at * C * S * g, Lt = at * C * P * y, Mt = at * C * P * g, c = 0; c < $;) n[u] = lt * H[c] + $t * W[c] + ut * j[c] + pt * q[c] + ft * J[c] + ct * Q[c] + dt * Z[c] + gt * K[c] + yt * tt[c] + mt * it[c] + Tt * et[c] + Pt * rt[c] + St * ot[c] + vt * nt[c] + Lt * st[c] + Mt * _t[c], ++c, n[u + 1] = lt * H[c] + $t * W[c] + ut * j[c] + pt * q[c] + ft * J[c] + ct * Q[c] + dt * Z[c] + gt * K[c] + yt * tt[c] + mt * it[c] + Tt * et[c] + Pt * rt[c] + St * ot[c] + vt * nt[c] + Lt * st[c] + Mt * _t[c], ++c, u += _; - else { - for (var Et = 1 << a, At = new Float32Array(Et), It = 0; It < Et; It++) { - for (var wt = It, xt = 1, Ot = 0; Ot < a; Ot++) xt *= wt % 2 == 0 ? 1 - l[Ot] : l[Ot], wt /= 2; - At[It] = xt - } - for (var Dt = new Float32Array(Et), Rt = 0; Rt < Et; Rt++) Dt[Rt] = o[h[Rt]]; - for (var c = 0; c < $;) { - for (var bt = 0, Ft = 0, Ct = c + 1, Rt = 0; Rt < Et; Rt++) bt += At[Rt] * Dt[Rt][c], Ft += At[Rt] * Dt[Rt][Ct]; - c += 2, n[u] = bt, n[u + 1] = Ft, u += _ - } - } - }, L.prototype._$HT = function(t, i) { - this.x = t, this.y = i - }, L.prototype._$HT = function(t) { - this.x = t.x, this.y = t.y - }, M._$ur = -2, M._$ES = 500, M._$wb = 2, M._$8S = 3, M._$52 = M._$ES, M._$R2 = M._$ES, M._$or = function() { - return M._$52 - }, M._$Pr = function() { - return M._$R2 - }, M.prototype.convertClipIDForV2_11 = function(t) { - var i = []; - return null == t ? null : 0 == t.length ? null : /,/.test(t) ? i = t.id.split(",") : (i.push(t.id), i) - }, M.prototype._$F0 = function(t) { - this._$gP = t._$nP(), this._$dr = t._$nP(), this._$GS = t._$nP(), this._$qb = t._$6L(), this._$Lb = t._$cS(), this._$mS = t._$Tb(), t.getFormatVersion() >= G._$T7 ? (this.clipID = t._$nP(), this.clipIDList = this.convertClipIDForV2_11(this.clipID)) : this.clipIDList = [], this._$MS(this._$Lb) - }, M.prototype.getClipIDList = function() { - return this.clipIDList - }, M.prototype.init = function(t) {}, M.prototype._$Nr = function(t, i) { - if (i._$IS[0] = !1, i._$Us = v._$Z2(t, this._$GS, i._$IS, this._$Lb), at._$Zs); - else if (i._$IS[0]) return; - i._$7s = v._$br(t, this._$GS, i._$IS, this._$mS) - }, M.prototype._$2b = function(t, i) {}, M.prototype.getDrawDataID = function() { - return this._$gP - }, M.prototype._$j2 = function(t) { - this._$gP = t - }, M.prototype.getOpacity = function(t, i) { - return i._$7s - }, M.prototype._$zS = function(t, i) { - return i._$Us - }, M.prototype._$MS = function(t) { - for (var i = t.length - 1; i >= 0; --i) { - var e = t[i]; - e < M._$52 ? M._$52 = e : e > M._$R2 && (M._$R2 = e) - } - }, M.prototype.getTargetBaseDataID = function() { - return this._$dr - }, M.prototype._$gs = function(t) { - this._$dr = t - }, M.prototype._$32 = function() { - return null != this._$dr && this._$dr != yt._$2o() - }, M.prototype.preDraw = function(t, i, e) {}, M.prototype.draw = function(t, i, e) {}, M.prototype.getType = function() {}, M.prototype._$B2 = function(t, i, e) {}, E._$ps = 32, E.CLIPPING_PROCESS_NONE = 0, E.CLIPPING_PROCESS_OVERWRITE_ALPHA = 1, E.CLIPPING_PROCESS_MULTIPLY_ALPHA = 2, E.CLIPPING_PROCESS_DRAW = 3, E.CLIPPING_PROCESS_CLEAR_ALPHA = 4, E.prototype.setChannelFlagAsColor = function(t, i) { - this.CHANNEL_COLORS[t] = i - }, E.prototype.getChannelFlagAsColor = function(t) { - return this.CHANNEL_COLORS[t] - }, E.prototype._$ZT = function() {}, E.prototype._$Uo = function(t, i, e, r, o, n, s) {}, E.prototype._$Rs = function() { - return -1 - }, E.prototype._$Ds = function(t) {}, E.prototype.setBaseColor = function(t, i, e, r) { - t < 0 ? t = 0 : t > 1 && (t = 1), i < 0 ? i = 0 : i > 1 && (i = 1), e < 0 ? e = 0 : e > 1 && (e = 1), r < 0 ? r = 0 : r > 1 && (r = 1), this._$lT = t, this._$C0 = i, this._$tT = e, this._$WL = r - }, E.prototype._$WP = function(t) { - this.culling = t - }, E.prototype.setMatrix = function(t) { - for (var i = 0; i < 16; i++) this.matrix4x4[i] = t[i] - }, E.prototype._$IT = function() { - return this.matrix4x4 - }, E.prototype.setPremultipliedAlpha = function(t) { - this.premultipliedAlpha = t - }, E.prototype.isPremultipliedAlpha = function() { - return this.premultipliedAlpha - }, E.prototype.setAnisotropy = function(t) { - this.anisotropy = t - }, E.prototype.getAnisotropy = function() { - return this.anisotropy - }, E.prototype.getClippingProcess = function() { - return this.clippingProcess - }, E.prototype.setClippingProcess = function(t) { - this.clippingProcess = t - }, E.prototype.setClipBufPre_clipContextForMask = function(t) { - this.clipBufPre_clipContextMask = t - }, E.prototype.getClipBufPre_clipContextMask = function() { - return this.clipBufPre_clipContextMask - }, E.prototype.setClipBufPre_clipContextForDraw = function(t) { - this.clipBufPre_clipContextDraw = t - }, E.prototype.getClipBufPre_clipContextDraw = function() { - return this.clipBufPre_clipContextDraw - }, I._$ur = -2, I._$c2 = 1, I._$_b = 2, I.prototype._$F0 = function(t) { - this._$kP = t._$nP(), this._$dr = t._$nP() - }, I.prototype.readV2_opacity = function(t) { - t.getFormatVersion() >= G.LIVE2D_FORMAT_VERSION_V2_10_SDK2 && (this._$mS = t._$Tb()) - }, I.prototype.init = function(t) {}, I.prototype._$Nr = function(t, i) {}, I.prototype.interpolateOpacity = function(t, i, e, r) { - null == this._$mS ? e.setInterpolatedOpacity(1) : e.setInterpolatedOpacity(v._$br(t, i, r, this._$mS)) - }, I.prototype._$2b = function(t, i) {}, I.prototype._$nb = function(t, i, e, r, o, n, s) {}, I.prototype.getType = function() {}, I.prototype._$gs = function(t) { - this._$dr = t - }, I.prototype._$a2 = function(t) { - this._$kP = t - }, I.prototype.getTargetBaseDataID = function() { - return this._$dr - }, I.prototype.getBaseDataID = function() { - return this._$kP - }, I.prototype._$32 = function() { - return null != this._$dr && this._$dr != yt._$2o() - }, w._$W2 = 0, w._$CS = w._$W2, w._$Mo = function() { - return !0 - }, w._$XP = function(t) { - try { - for (var i = getTimeMSec(); getTimeMSec() - i < t;); - } catch (t) { - t._$Rb() - } - }, w.getUserTimeMSec = function() { - return w._$CS == w._$W2 ? w.getSystemTimeMSec() : w._$CS - }, w.setUserTimeMSec = function(t) { - w._$CS = t - }, w.updateUserTimeMSec = function() { - return w._$CS = w.getSystemTimeMSec() - }, w.getTimeMSec = function() { - return (new Date).getTime() - }, w.getSystemTimeMSec = function() { - return (new Date).getTime() - }, w._$Q = function(t) {}, w._$jT = function(t, i, e, r, o) { - for (var n = 0; n < o; n++) e[r + n] = t[i + n] - }, x._$ds = -2, x.prototype._$F0 = function(t) { - this._$wL = t._$nP(), this._$VP = t._$6L(), this._$GP = t._$nP() - }, x.prototype.getParamIndex = function(t) { - return this._$2r != t && (this._$8o = x._$ds), this._$8o - }, x.prototype._$Pb = function(t, i) { - this._$8o = t, this._$2r = i - }, x.prototype.getParamID = function() { - return this._$wL - }, x.prototype._$yP = function(t) { - this._$wL = t - }, x.prototype._$N2 = function() { - return this._$VP - }, x.prototype._$d2 = function() { - return this._$GP - }, x.prototype._$t2 = function(t, i) { - this._$VP = t, this._$GP = i - }, x.prototype._$Lr = function() { - return this._$O2 - }, x.prototype._$wr = function(t) { - this._$O2 = t - }, x.prototype._$SL = function() { - return this._$ri - }, x.prototype._$AL = function(t) { - this._$ri = t - }, O.startsWith = function(t, i, e) { - var r = i + e.length; - if (r >= t.length) return !1; - for (var o = i; o < r; o++) if (O.getChar(t, o) != e.charAt(o - i)) return !1; - return !0 - }, O.getChar = function(t, i) { - return String.fromCharCode(t.getUint8(i)) - }, O.createString = function(t, i, e) { - for (var r = new ArrayBuffer(2 * e), o = new Uint16Array(r), n = 0; n < e; n++) o[n] = t.getUint8(i + n); - return String.fromCharCode.apply(null, o) - }, O._$LS = function(t, i, e, r) { - t instanceof ArrayBuffer && (t = new DataView(t)); - var o = e, - n = !1, - s = !1, - _ = 0, - a = O.getChar(t, o); - "-" == a && (n = !0, o++); - for (var h = !1; o < i; o++) { - switch (a = O.getChar(t, o)) { - case "0": - _ *= 10; - break; - case "1": - _ = 10 * _ + 1; - break; - case "2": - _ = 10 * _ + 2; - break; - case "3": - _ = 10 * _ + 3; - break; - case "4": - _ = 10 * _ + 4; - break; - case "5": - _ = 10 * _ + 5; - break; - case "6": - _ = 10 * _ + 6; - break; - case "7": - _ = 10 * _ + 7; - break; - case "8": - _ = 10 * _ + 8; - break; - case "9": - _ = 10 * _ + 9; - break; - case ".": - s = !0, o++, h = !0; - break; - default: - h = !0 - } - if (h) break - } - if (s) for (var l = .1, $ = !1; o < i; o++) { - switch (a = O.getChar(t, o)) { - case "0": - break; - case "1": - _ += 1 * l; - break; - case "2": - _ += 2 * l; - break; - case "3": - _ += 3 * l; - break; - case "4": - _ += 4 * l; - break; - case "5": - _ += 5 * l; - break; - case "6": - _ += 6 * l; - break; - case "7": - _ += 7 * l; - break; - case "8": - _ += 8 * l; - break; - case "9": - _ += 9 * l; - break; - default: - $ = !0 - } - if (l *= .1, $) break - } - return n && (_ = -_), r[0] = o, _ - }, D.prototype._$zP = function() { - this._$Ob = new Array - }, D.prototype._$F0 = function(t) { - this._$Ob = t._$nP() - }, D.prototype._$Ur = function(t) { - if (t._$WS()) return !0; - for (var i = t._$v2(), e = this._$Ob.length - 1; e >= 0; --e) { - var r = this._$Ob[e].getParamIndex(i); - if (r == x._$ds && (r = t.getParamIndex(this._$Ob[e].getParamID())), t._$Xb(r)) return !0 - } - return !1 - }, D.prototype._$Q2 = function(t, i) { - for (var e, r, o = this._$Ob.length, n = t._$v2(), s = 0, _ = 0; _ < o; _++) { - var a = this._$Ob[_]; - if (e = a.getParamIndex(n), e == x._$ds && (e = t.getParamIndex(a.getParamID()), a._$Pb(e, n)), e < 0) throw new Exception("err 23242 : " + a.getParamID()); - var h = e < 0 ? 0 : t.getParamFloat(e); - r = a._$N2(); - var l, $, u = a._$d2(), - p = -1, - f = 0; - if (r < 1); - else if (1 == r) l = u[0], l - U._$J < h && h < l + U._$J ? (p = 0, f = 0) : (p = 0, i[0] = !0); - else if (l = u[0], h < l - U._$J) p = 0, i[0] = !0; - else if (h < l + U._$J) p = 0; - else { - for (var c = !1, d = 1; d < r; ++d) { - if ($ = u[d], h < $ + U._$J) { - $ - U._$J < h ? p = d : (p = d - 1, f = (h - l) / ($ - l), s++), c = !0; - break - } - l = $ - } - c || (p = r - 1, f = 0, i[0] = !0) - } - a._$wr(p), a._$AL(f) - } - return s - }, D.prototype._$zr = function(t, i, e) { - var r = 1 << e; - r + 1 > U._$Qb && console.log("err 23245\n"); - for (var o = this._$Ob.length, n = 1, s = 1, _ = 0, a = 0; a < r; ++a) t[a] = 0; - for (var h = 0; h < o; ++h) { - var l = this._$Ob[h]; - if (0 == l._$SL()) { - var $ = l._$Lr() * n; - if ($ < 0 && at._$3T) throw new Exception("err 23246"); - for (var a = 0; a < r; ++a) t[a] += $ - } else { - for (var $ = n * l._$Lr(), u = n * (l._$Lr() + 1), a = 0; a < r; ++a) t[a] += (a / s | 0) % 2 == 0 ? $ : u; - i[_++] = l._$SL(), s *= 2 - } - n *= l._$N2() - } - t[r] = 65535, i[_] = -1 - }, D.prototype._$h2 = function(t, i, e) { - for (var r = new Float32Array(i), o = 0; o < i; ++o) r[o] = e[o]; - var n = new x; - n._$yP(t), n._$t2(i, r), this._$Ob.push(n) - }, D.prototype._$J2 = function(t) { - for (var i = t, e = this._$Ob.length, r = 0; r < e; ++r) { - var o = this._$Ob[r], - n = o._$N2(), - s = i % o._$N2(), - _ = o._$d2()[s]; - console.log("%s[%d]=%7.2f / ", o.getParamID(), s, _), i /= n - } - console.log("\n") - }, D.prototype.getParamCount = function() { - return this._$Ob.length - }, D.prototype._$zs = function() { - return this._$Ob - }, R.prototype.identity = function() { - for (var t = 0; t < 16; t++) this.m[t] = t % 5 == 0 ? 1 : 0 - }, R.prototype.getArray = function() { - return this.m - }, R.prototype.getCopyMatrix = function() { - return new Float32Array(this.m) - }, R.prototype.setMatrix = function(t) { - if (null != t && 16 == t.length) for (var i = 0; i < 16; i++) this.m[i] = t[i] - }, R.prototype.mult = function(t, i, e) { - return null == i ? null : (this == i ? this.mult_safe(this.m, t.m, i.m, e) : this.mult_fast(this.m, t.m, i.m, e), i) - }, R.prototype.mult_safe = function(t, i, e, r) { - if (t == e) { - var o = new Array(16); - this.mult_fast(t, i, o, r); - for (var n = 15; n >= 0; --n) e[n] = o[n] - } else this.mult_fast(t, i, e, r) - }, R.prototype.mult_fast = function(t, i, e, r) { - r ? (e[0] = t[0] * i[0] + t[4] * i[1] + t[8] * i[2], e[4] = t[0] * i[4] + t[4] * i[5] + t[8] * i[6], e[8] = t[0] * i[8] + t[4] * i[9] + t[8] * i[10], e[12] = t[0] * i[12] + t[4] * i[13] + t[8] * i[14] + t[12], e[1] = t[1] * i[0] + t[5] * i[1] + t[9] * i[2], e[5] = t[1] * i[4] + t[5] * i[5] + t[9] * i[6], e[9] = t[1] * i[8] + t[5] * i[9] + t[9] * i[10], e[13] = t[1] * i[12] + t[5] * i[13] + t[9] * i[14] + t[13], e[2] = t[2] * i[0] + t[6] * i[1] + t[10] * i[2], e[6] = t[2] * i[4] + t[6] * i[5] + t[10] * i[6], e[10] = t[2] * i[8] + t[6] * i[9] + t[10] * i[10], e[14] = t[2] * i[12] + t[6] * i[13] + t[10] * i[14] + t[14], e[3] = e[7] = e[11] = 0, e[15] = 1) : (e[0] = t[0] * i[0] + t[4] * i[1] + t[8] * i[2] + t[12] * i[3], e[4] = t[0] * i[4] + t[4] * i[5] + t[8] * i[6] + t[12] * i[7], e[8] = t[0] * i[8] + t[4] * i[9] + t[8] * i[10] + t[12] * i[11], e[12] = t[0] * i[12] + t[4] * i[13] + t[8] * i[14] + t[12] * i[15], e[1] = t[1] * i[0] + t[5] * i[1] + t[9] * i[2] + t[13] * i[3], e[5] = t[1] * i[4] + t[5] * i[5] + t[9] * i[6] + t[13] * i[7], e[9] = t[1] * i[8] + t[5] * i[9] + t[9] * i[10] + t[13] * i[11], e[13] = t[1] * i[12] + t[5] * i[13] + t[9] * i[14] + t[13] * i[15], e[2] = t[2] * i[0] + t[6] * i[1] + t[10] * i[2] + t[14] * i[3], e[6] = t[2] * i[4] + t[6] * i[5] + t[10] * i[6] + t[14] * i[7], e[10] = t[2] * i[8] + t[6] * i[9] + t[10] * i[10] + t[14] * i[11], e[14] = t[2] * i[12] + t[6] * i[13] + t[10] * i[14] + t[14] * i[15], e[3] = t[3] * i[0] + t[7] * i[1] + t[11] * i[2] + t[15] * i[3], e[7] = t[3] * i[4] + t[7] * i[5] + t[11] * i[6] + t[15] * i[7], e[11] = t[3] * i[8] + t[7] * i[9] + t[11] * i[10] + t[15] * i[11], e[15] = t[3] * i[12] + t[7] * i[13] + t[11] * i[14] + t[15] * i[15]) - }, R.prototype.translate = function(t, i, e) { - this.m[12] = this.m[0] * t + this.m[4] * i + this.m[8] * e + this.m[12], this.m[13] = this.m[1] * t + this.m[5] * i + this.m[9] * e + this.m[13], this.m[14] = this.m[2] * t + this.m[6] * i + this.m[10] * e + this.m[14], this.m[15] = this.m[3] * t + this.m[7] * i + this.m[11] * e + this.m[15] - }, R.prototype.scale = function(t, i, e) { - this.m[0] *= t, this.m[4] *= i, this.m[8] *= e, this.m[1] *= t, this.m[5] *= i, this.m[9] *= e, this.m[2] *= t, this.m[6] *= i, this.m[10] *= e, this.m[3] *= t, this.m[7] *= i, this.m[11] *= e - }, R.prototype.rotateX = function(t) { - var i = Lt.fcos(t), - e = Lt._$9(t), - r = this.m[4]; - this.m[4] = r * i + this.m[8] * e, this.m[8] = r * -e + this.m[8] * i, r = this.m[5], this.m[5] = r * i + this.m[9] * e, this.m[9] = r * -e + this.m[9] * i, r = this.m[6], this.m[6] = r * i + this.m[10] * e, this.m[10] = r * -e + this.m[10] * i, r = this.m[7], this.m[7] = r * i + this.m[11] * e, this.m[11] = r * -e + this.m[11] * i - }, R.prototype.rotateY = function(t) { - var i = Lt.fcos(t), - e = Lt._$9(t), - r = this.m[0]; - this.m[0] = r * i + this.m[8] * -e, this.m[8] = r * e + this.m[8] * i, r = this.m[1], this.m[1] = r * i + this.m[9] * -e, this.m[9] = r * e + this.m[9] * i, r = m[2], this.m[2] = r * i + this.m[10] * -e, this.m[10] = r * e + this.m[10] * i, r = m[3], this.m[3] = r * i + this.m[11] * -e, this.m[11] = r * e + this.m[11] * i - }, R.prototype.rotateZ = function(t) { - var i = Lt.fcos(t), - e = Lt._$9(t), - r = this.m[0]; - this.m[0] = r * i + this.m[4] * e, this.m[4] = r * -e + this.m[4] * i, r = this.m[1], this.m[1] = r * i + this.m[5] * e, this.m[5] = r * -e + this.m[5] * i, r = this.m[2], this.m[2] = r * i + this.m[6] * e, this.m[6] = r * -e + this.m[6] * i, r = this.m[3], this.m[3] = r * i + this.m[7] * e, this.m[7] = r * -e + this.m[7] * i - }, b.prototype = new et, b._$tP = new Object, b._$27 = function() { - b._$tP.clear() - }, b.getID = function(t) { - var i = b._$tP[t]; - return null == i && (i = new b(t), b._$tP[t] = i), i - }, b.prototype._$3s = function() { - return new b - }, F._$kS = -1, F._$pS = 0, F._$hb = 1, F.STATE_IDENTITY = 0, F._$gb = 1, F._$fo = 2, F._$go = 4, F.prototype.transform = function(t, i, e) { - var r, o, n, s, _, a, h = 0, - l = 0; - switch (this._$hi) { - default: - return; - case F._$go | F._$fo | F._$gb: - for (r = this._$7, o = this._$H, n = this._$k, s = this._$f, _ = this._$g, a = this._$w; --e >= 0;) { - var $ = t[h++], - u = t[h++]; - i[l++] = r * $ + o * u + n, i[l++] = s * $ + _ * u + a - } - return; - case F._$go | F._$fo: - for (r = this._$7, o = this._$H, s = this._$f, _ = this._$g; --e >= 0;) { - var $ = t[h++], - u = t[h++]; - i[l++] = r * $ + o * u, i[l++] = s * $ + _ * u - } - return; - case F._$go | F._$gb: - for (o = this._$H, n = this._$k, s = this._$f, a = this._$w; --e >= 0;) { - var $ = t[h++]; - i[l++] = o * t[h++] + n, i[l++] = s * $ + a - } - return; - case F._$go: - for (o = this._$H, s = this._$f; --e >= 0;) { - var $ = t[h++]; - i[l++] = o * t[h++], i[l++] = s * $ - } - return; - case F._$fo | F._$gb: - for (r = this._$7, n = this._$k, _ = this._$g, a = this._$w; --e >= 0;) i[l++] = r * t[h++] + n, i[l++] = _ * t[h++] + a; - return; - case F._$fo: - for (r = this._$7, _ = this._$g; --e >= 0;) i[l++] = r * t[h++], i[l++] = _ * t[h++]; - return; - case F._$gb: - for (n = this._$k, a = this._$w; --e >= 0;) i[l++] = t[h++] + n, i[l++] = t[h++] + a; - return; - case F.STATE_IDENTITY: - return void(t == i && h == l || w._$jT(t, h, i, l, 2 * e)) - } - }, F.prototype.update = function() { - 0 == this._$H && 0 == this._$f ? 1 == this._$7 && 1 == this._$g ? 0 == this._$k && 0 == this._$w ? (this._$hi = F.STATE_IDENTITY, this._$Z = F._$pS) : (this._$hi = F._$gb, this._$Z = F._$hb) : 0 == this._$k && 0 == this._$w ? (this._$hi = F._$fo, this._$Z = F._$kS) : (this._$hi = F._$fo | F._$gb, this._$Z = F._$kS) : 0 == this._$7 && 0 == this._$g ? 0 == this._$k && 0 == this._$w ? (this._$hi = F._$go, this._$Z = F._$kS) : (this._$hi = F._$go | F._$gb, this._$Z = F._$kS) : 0 == this._$k && 0 == this._$w ? (this._$hi = F._$go | F._$fo, this._$Z = F._$kS) : (this._$hi = F._$go | F._$fo | F._$gb, this._$Z = F._$kS) - }, F.prototype._$RT = function(t) { - this._$IT(t); - var i = t[0], - e = t[2], - r = t[1], - o = t[3], - n = Math.sqrt(i * i + r * r), - s = i * o - e * r; - 0 == n ? at._$so && console.log("affine._$RT() / rt==0") : (t[0] = n, t[1] = s / n, t[2] = (r * o + i * e) / s, t[3] = Math.atan2(r, i)) - }, F.prototype._$ho = function(t, i, e, r) { - var o = new Float32Array(6), - n = new Float32Array(6); - t._$RT(o), i._$RT(n); - var s = new Float32Array(6); - s[0] = o[0] + (n[0] - o[0]) * e, s[1] = o[1] + (n[1] - o[1]) * e, s[2] = o[2] + (n[2] - o[2]) * e, s[3] = o[3] + (n[3] - o[3]) * e, s[4] = o[4] + (n[4] - o[4]) * e, s[5] = o[5] + (n[5] - o[5]) * e, r._$CT(s) - }, F.prototype._$CT = function(t) { - var i = Math.cos(t[3]), - e = Math.sin(t[3]); - this._$7 = t[0] * i, this._$f = t[0] * e, this._$H = t[1] * (t[2] * i - e), this._$g = t[1] * (t[2] * e + i), this._$k = t[4], this._$w = t[5], this.update() - }, F.prototype._$IT = function(t) { - t[0] = this._$7, t[1] = this._$f, t[2] = this._$H, t[3] = this._$g, t[4] = this._$k, t[5] = this._$w - }, C.prototype = new s, C._$cs = "VISIBLE:", C._$ar = "LAYOUT:", C._$Co = 0, C._$D2 = [], C._$1T = 1, C.loadMotion = function(t) { - var i = new C, - e = [0], - r = t.length; - i._$yT = 0; - for (var o = 0; o < r; ++o) { - var n = 255 & t[o]; - if ("\n" != n && "\r" != n) if ("#" != n) if ("$" != n) { - if ("a" <= n && n <= "z" || "A" <= n && n <= "Z" || "_" == n) { - for (var s = o, _ = -1; o < r && ("\r" != (n = 255 & t[o]) && "\n" != n); ++o) if ("=" == n) { - _ = o; - break - } - if (_ >= 0) { - var a = new B; - O.startsWith(t, s, C._$cs) ? (a._$RP = B._$hs, a._$4P = new String(t, s, _ - s)) : O.startsWith(t, s, C._$ar) ? (a._$4P = new String(t, s + 7, _ - s - 7), O.startsWith(t, s + 7, "ANCHOR_X") ? a._$RP = B._$xs : O.startsWith(t, s + 7, "ANCHOR_Y") ? a._$RP = B._$us : O.startsWith(t, s + 7, "SCALE_X") ? a._$RP = B._$qs : O.startsWith(t, s + 7, "SCALE_Y") ? a._$RP = B._$Ys : O.startsWith(t, s + 7, "X") ? a._$RP = B._$ws : O.startsWith(t, s + 7, "Y") && (a._$RP = B._$Ns)) : (a._$RP = B._$Fr, a._$4P = new String(t, s, _ - s)), i.motions.push(a); - var h = 0; - for (C._$D2.clear(), o = _ + 1; o < r && ("\r" != (n = 255 & t[o]) && "\n" != n); ++o) if ("," != n && " " != n && "\t" != n) { - var l = O._$LS(t, r, o, e); - if (e[0] > 0) { - C._$D2.push(l), h++; - var $ = e[0]; - if ($ < o) { - console.log("_$n0 _$hi . @Live2DMotion loadMotion()\n"); - break - } - o = $ - } - } - a._$I0 = C._$D2._$BL(), h > i._$yT && (i._$yT = h) - } - } - } else { - for (var s = o, _ = -1; o < r && ("\r" != (n = 255 & t[o]) && "\n" != n); ++o) if ("=" == n) { - _ = o; - break - } - var u = !1; - if (_ >= 0) for (_ == s + 4 && "f" == t[s + 1] && "p" == t[s + 2] && "s" == t[s + 3] && (u = !0), o = _ + 1; o < r && ("\r" != (n = 255 & t[o]) && "\n" != n); ++o) if ("," != n && " " != n && "\t" != n) { - var l = O._$LS(t, r, o, e); - e[0] > 0 && u && 5 < l && l < 121 && (i._$D0 = l), o = e[0] - } - for (; o < r && ("\n" != t[o] && "\r" != t[o]); ++o); - } else for (; o < r && ("\n" != t[o] && "\r" != t[o]); ++o); - } - return i._$AS = 1e3 * i._$yT / i._$D0 | 0, i - }, C.prototype.getDurationMSec = function() { - return this._$AS - }, C.prototype.dump = function() { - for (var t = 0; t < this.motions.length; t++) { - var i = this.motions[t]; - console.log("_$wL[%s] [%d]. ", i._$4P, i._$I0.length); - for (var e = 0; e < i._$I0.length && e < 10; e++) console.log("%5.2f ,", i._$I0[e]); - console.log("\n") - } - }, C.prototype.updateParamExe = function(t, i, e, r) { - for (var o = i - r._$z2, n = o * this._$D0 / 1e3, s = 0 | n, _ = n - s, a = 0; a < this.motions.length; a++) { - var h = this.motions[a], - l = h._$I0.length, - $ = h._$4P; - if (h._$RP == B._$hs) { - var u = h._$I0[s >= l ? l - 1 : s]; - t.setParamFloat($, u) - } else if (B._$ws <= h._$RP && h._$RP <= B._$Ys); - else { - var p = t.getParamFloat($), - f = h._$I0[s >= l ? l - 1 : s], - c = h._$I0[s + 1 >= l ? l - 1 : s + 1], - d = f + (c - f) * _, - g = p + (d - p) * e; - t.setParamFloat($, g) - } - } - s >= this._$yT && (this._$E ? (r._$z2 = i, this.loopFadeIn && (r._$bs = i)) : r._$9L = !0) - }, C.prototype._$r0 = function() { - return this._$E - }, C.prototype._$aL = function(t) { - this._$E = t - }, C.prototype.isLoopFadeIn = function() { - return this.loopFadeIn - }, C.prototype.setLoopFadeIn = function(t) { - this.loopFadeIn = t - }, N.prototype.clear = function() { - this.size = 0 - }, N.prototype.add = function(t) { - if (this._$P.length <= this.size) { - var i = new Float32Array(2 * this.size); - w._$jT(this._$P, 0, i, 0, this.size), this._$P = i - } - this._$P[this.size++] = t - }, N.prototype._$BL = function() { - var t = new Float32Array(this.size); - return w._$jT(this._$P, 0, t, 0, this.size), t - }, B._$Fr = 0, B._$hs = 1, B._$ws = 100, B._$Ns = 101, B._$xs = 102, B._$us = 103, B._$qs = 104, B._$Ys = 105, U._$Ms = 1, U._$Qs = 2, U._$i2 = 0, U._$No = 2, U._$do = U._$Ms, U._$Ls = !0, U._$1r = 5, U._$Qb = 65, U._$J = 1e-4, U._$FT = .001, U._$Ss = 3, G._$o7 = 6, G._$S7 = 7, G._$s7 = 8, G._$77 = 9, G.LIVE2D_FORMAT_VERSION_V2_10_SDK2 = 10, G.LIVE2D_FORMAT_VERSION_V2_11_SDK2_1 = 11, G._$T7 = G.LIVE2D_FORMAT_VERSION_V2_11_SDK2_1, G._$Is = -2004318072, G._$h0 = 0, G._$4L = 23, G._$7P = 33, G._$uT = function(t) { - console.log("_$bo :: _$6 _$mo _$E0 : %d\n", t) - }, G._$9o = function(t) { - if (t < 40) return G._$uT(t), null; - if (t < 50) return G._$uT(t), null; - if (t < 60) return G._$uT(t), null; - if (t < 100) switch (t) { - case 65: - return new Z; - case 66: - return new D; - case 67: - return new x; - case 68: - return new z; - case 69: - return new P; - case 70: - return new $t; - default: - return G._$uT(t), null - } else if (t < 150) switch (t) { - case 131: - return new st; - case 133: - return new tt; - case 136: - return new p; - case 137: - return new ot; - case 142: - return new j - } - return G._$uT(t), null - }, Y._$HP = 0, Y._$_0 = !0; - Y._$V2 = -1, Y._$W0 = -1, Y._$jr = !1, Y._$ZS = !0, Y._$tr = -1e6, Y._$lr = 1e6, Y._$is = 32, Y._$e = !1, Y.prototype.getDrawDataIndex = function(t) { - for (var i = this._$aS.length - 1; i >= 0; --i) if (null != this._$aS[i] && this._$aS[i].getDrawDataID() == t) return i; - return -1 - }, Y.prototype.getDrawData = function(t) { - if (t instanceof b) { - if (null == this._$Bo) { - this._$Bo = new Object; - for (var i = this._$aS.length, e = 0; e < i; e++) { - var r = this._$aS[e], - o = r.getDrawDataID(); - null != o && (this._$Bo[o] = r) - } - } - return this._$Bo[id] - } - return t < this._$aS.length ? this._$aS[t] : null - }, Y.prototype.release = function() { - this._$3S.clear(), this._$aS.clear(), this._$F2.clear(), null != this._$Bo && this._$Bo.clear(), this._$db.clear(), this._$8b.clear(), this._$Hr.clear() - }, Y.prototype.init = function() { - this._$co++, this._$F2.length > 0 && this.release(); - for (var t = this._$Ri.getModelImpl(), i = t._$Xr(), r = i.length, o = new Array, n = new Array, s = 0; s < r; ++s) { - var _ = i[s]; - this._$F2.push(_), this._$Hr.push(_.init(this)); - for (var a = _.getBaseData(), h = a.length, l = 0; l < h; ++l) o.push(a[l]); - for (var l = 0; l < h; ++l) { - var $ = a[l].init(this); - $._$l2(s), n.push($) - } - for (var u = _.getDrawData(), p = u.length, l = 0; l < p; ++l) { - var f = u[l], - c = f.init(this); - c._$IP = s, this._$aS.push(f), this._$8b.push(c) - } - } - for (var d = o.length, g = yt._$2o();;) { - for (var y = !1, s = 0; s < d; ++s) { - var m = o[s]; - if (null != m) { - var T = m.getTargetBaseDataID(); - (null == T || T == g || this.getBaseDataIndex(T) >= 0) && (this._$3S.push(m), this._$db.push(n[s]), o[s] = null, y = !0) - } - } - if (!y) break - } - var P = t._$E2(); - if (null != P) { - var S = P._$1s(); - if (null != S) for (var v = S.length, s = 0; s < v; ++s) { - var L = S[s]; - null != L && this._$02(L.getParamID(), L.getDefaultValue(), L.getMinValue(), L.getMaxValue()) - } - } - this.clipManager = new e(this.dp_webgl), this.clipManager.init(this, this._$aS, this._$8b), this._$QT = !0 - }, Y.prototype.update = function() { - Y._$e && _.start("_$zL"); - for (var t = this._$_2.length, i = 0; i < t; i++) this._$_2[i] != this._$vr[i] && (this._$Js[i] = Y._$ZS, this._$vr[i] = this._$_2[i]); - var e = this._$3S.length, - r = this._$aS.length, - o = W._$or(), - n = W._$Pr(), - s = n - o + 1; - (null == this._$Ws || this._$Ws.length < s) && (this._$Ws = new Int16Array(s), this._$Vs = new Int16Array(s)); - for (var i = 0; i < s; i++) this._$Ws[i] = Y._$V2, this._$Vs[i] = Y._$V2; - (null == this._$Er || this._$Er.length < r) && (this._$Er = new Int16Array(r)); - for (var i = 0; i < r; i++) this._$Er[i] = Y._$W0; - Y._$e && _.dump("_$zL"), Y._$e && _.start("_$UL"); - for (var a = null, h = 0; h < e; ++h) { - var l = this._$3S[h], - $ = this._$db[h]; - try { - l._$Nr(this, $), l._$2b(this, $) - } catch (t) { - null == a && (a = t) - } - } - null != a && Y._$_0 && _._$Rb(a), Y._$e && _.dump("_$UL"), Y._$e && _.start("_$DL"); - for (var u = null, p = 0; p < r; ++p) { - var f = this._$aS[p], - c = this._$8b[p]; - try { - if (f._$Nr(this, c), c._$u2()) continue; - f._$2b(this, c); - var d, g = Math.floor(f._$zS(this, c) - o); - try { - d = this._$Vs[g] - } catch (t) { - console.log("_$li :: %s / %s \t\t\t\t@@_$fS\n", t.toString(), f.getDrawDataID().toString()), g = Math.floor(f._$zS(this, c) - o); - continue - } - d == Y._$V2 ? this._$Ws[g] = p : this._$Er[d] = p, this._$Vs[g] = p - } catch (t) { - null == u && (u = t, at._$sT(at._$H7)) - } - } - null != u && Y._$_0 && _._$Rb(u), Y._$e && _.dump("_$DL"), Y._$e && _.start("_$eL"); - for (var i = this._$Js.length - 1; i >= 0; i--) this._$Js[i] = Y._$jr; - return this._$QT = !1, Y._$e && _.dump("_$eL"), !1 - }, Y.prototype.preDraw = function(t) { - null != this.clipManager && (t._$ZT(), this.clipManager.setupClip(this, t)) - }, Y.prototype.draw = function(t) { - if (null == this._$Ws) return void _._$li("call _$Ri.update() before _$Ri.draw() "); - var i = this._$Ws.length; - t._$ZT(); - for (var e = 0; e < i; ++e) { - var r = this._$Ws[e]; - if (r != Y._$V2) for (;;) { - var o = this._$aS[r], - n = this._$8b[r]; - if (n._$yo()) { - var s = n._$IP, - a = this._$Hr[s]; - n._$VS = a.getPartsOpacity(), o.draw(t, this, n) - } - var h = this._$Er[r]; - if (h <= r || h == Y._$W0) break; - r = h - } - } - }, Y.prototype.getParamIndex = function(t) { - for (var i = this._$pb.length - 1; i >= 0; --i) if (this._$pb[i] == t) return i; - return this._$02(t, 0, Y._$tr, Y._$lr) - }, Y.prototype._$BS = function(t) { - return this.getBaseDataIndex(t) - }, Y.prototype.getBaseDataIndex = function(t) { - for (var i = this._$3S.length - 1; i >= 0; --i) if (null != this._$3S[i] && this._$3S[i].getBaseDataID() == t) return i; - return -1 - }, Y.prototype._$UT = function(t, i) { - var e = new Float32Array(i); - return w._$jT(t, 0, e, 0, t.length), e - }, Y.prototype._$02 = function(t, i, e, r) { - if (this._$qo >= this._$pb.length) { - var o = this._$pb.length, - n = new Array(2 * o); - w._$jT(this._$pb, 0, n, 0, o), this._$pb = n, this._$_2 = this._$UT(this._$_2, 2 * o), this._$vr = this._$UT(this._$vr, 2 * o), this._$Rr = this._$UT(this._$Rr, 2 * o), this._$Or = this._$UT(this._$Or, 2 * o); - var s = new Array; - w._$jT(this._$Js, 0, s, 0, o), this._$Js = s - } - return this._$pb[this._$qo] = t, this._$_2[this._$qo] = i, this._$vr[this._$qo] = i, this._$Rr[this._$qo] = e, this._$Or[this._$qo] = r, this._$Js[this._$qo] = Y._$ZS, this._$qo++ - }, Y.prototype._$Zo = function(t, i) { - this._$3S[t] = i - }, Y.prototype.setParamFloat = function(t, i) { - i < this._$Rr[t] && (i = this._$Rr[t]), i > this._$Or[t] && (i = this._$Or[t]), this._$_2[t] = i - }, Y.prototype.loadParam = function() { - var t = this._$_2.length; - t > this._$fs.length && (t = this._$fs.length), w._$jT(this._$fs, 0, this._$_2, 0, t) - }, Y.prototype.saveParam = function() { - var t = this._$_2.length; - t > this._$fs.length && (this._$fs = new Float32Array(t)), w._$jT(this._$_2, 0, this._$fs, 0, t) - }, Y.prototype._$v2 = function() { - return this._$co - }, Y.prototype._$WS = function() { - return this._$QT - }, Y.prototype._$Xb = function(t) { - return this._$Js[t] == Y._$ZS - }, Y.prototype._$vs = function() { - return this._$Es - }, Y.prototype._$Tr = function() { - return this._$ZP - }, Y.prototype.getBaseData = function(t) { - return this._$3S[t] - }, Y.prototype.getParamFloat = function(t) { - return this._$_2[t] - }, Y.prototype.getParamMax = function(t) { - return this._$Or[t] - }, Y.prototype.getParamMin = function(t) { - return this._$Rr[t] - }, Y.prototype.setPartsOpacity = function(t, i) { - this._$Hr[t].setPartsOpacity(i) - }, Y.prototype.getPartsOpacity = function(t) { - return this._$Hr[t].getPartsOpacity() - }, Y.prototype.getPartsDataIndex = function(t) { - for (var i = this._$F2.length - 1; i >= 0; --i) if (null != this._$F2[i] && this._$F2[i]._$p2() == t) return i; - return -1 - }, Y.prototype._$q2 = function(t) { - return this._$db[t] - }, Y.prototype._$C2 = function(t) { - return this._$8b[t] - }, Y.prototype._$Bb = function(t) { - return this._$Hr[t] - }, Y.prototype._$5s = function(t, i) { - for (var e = this._$Ws.length, r = t, o = 0; o < e; ++o) { - var n = this._$Ws[o]; - if (n != Y._$V2) for (;;) { - var s = this._$8b[n]; - s._$yo() && (s._$GT()._$B2(this, s, r), r += i); - var _ = this._$Er[n]; - if (_ <= n || _ == Y._$W0) break; - n = _ - } - } - }, Y.prototype.setDrawParam = function(t) { - this.dp_webgl = t - }, Y.prototype.getDrawParam = function() { - return this.dp_webgl - }, k._$0T = function(t) { - return k._$0T(new _$5(t)) - }, k._$0T = function(t) { - if (!t.exists()) throw new _$ls(t._$3b()); - for (var i, e = t.length(), r = new Int8Array(e), o = new _$Xs(new _$kb(t), 8192), n = 0; - (i = o.read(r, n, e - n)) > 0;) n += i; - return r - }, k._$C = function(t) { - var i = null, - e = null; - try { - i = t instanceof Array ? t : new _$Xs(t, 8192), e = new _$js; - for (var r, o = new Int8Array(1e3); - (r = i.read(o)) > 0;) e.write(o, 0, r); - return e._$TS() - } finally { - null != t && t.close(), null != e && (e.flush(), e.close()) - } - }, V.prototype._$T2 = function() { - return w.getUserTimeMSec() + Math._$10() * (2 * this._$Br - 1) - }, V.prototype._$uo = function(t) { - this._$Br = t - }, V.prototype._$QS = function(t, i, e) { - this._$Dr = t, this._$Cb = i, this._$mr = e - }, V.prototype._$7T = function(t) { - var i, e = w.getUserTimeMSec(), - r = 0; - switch (this._$_L) { - case STATE_CLOSING: - r = (e - this._$bb) / this._$Dr, r >= 1 && (r = 1, this._$_L = wt.STATE_CLOSED, this._$bb = e), i = 1 - r; - break; - case STATE_CLOSED: - r = (e - this._$bb) / this._$Cb, r >= 1 && (this._$_L = wt.STATE_OPENING, this._$bb = e), i = 0; - break; - case STATE_OPENING: - r = (e - this._$bb) / this._$mr, r >= 1 && (r = 1, this._$_L = wt.STATE_INTERVAL, this._$12 = this._$T2()), i = r; - break; - case STATE_INTERVAL: - this._$12 < e && (this._$_L = wt.STATE_CLOSING, this._$bb = e), i = 1; - break; - case STATE_FIRST: - default: - this._$_L = wt.STATE_INTERVAL, this._$12 = this._$T2(), i = 1 - } - this._$jo || (i = -i), t.setParamFloat(this._$iL, i), t.setParamFloat(this._$0L, i) - }; - var wt = function() {}; - wt.STATE_FIRST = "STATE_FIRST", wt.STATE_INTERVAL = "STATE_INTERVAL", wt.STATE_CLOSING = "STATE_CLOSING", wt.STATE_CLOSED = "STATE_CLOSED", wt.STATE_OPENING = "STATE_OPENING", X.prototype = new E, X._$As = 32, X._$Gr = !1, X._$NT = null, X._$vS = null, X._$no = null, X._$9r = function(t) { - return new Float32Array(t) - }, X._$vb = function(t) { - return new Int16Array(t) - }, X._$cr = function(t, i) { - return null == t || t._$yL() < i.length ? (t = X._$9r(2 * i.length), t.put(i), t._$oT(0)) : (t.clear(), t.put(i), t._$oT(0)), t - }, X._$mb = function(t, i) { - return null == t || t._$yL() < i.length ? (t = X._$vb(2 * i.length), t.put(i), t._$oT(0)) : (t.clear(), t.put(i), t._$oT(0)), t - }, X._$Hs = function() { - return X._$Gr - }, X._$as = function(t) { - X._$Gr = t - }, X.prototype.setGL = function(t) { - this.gl = t - }, X.prototype.setTransform = function(t) { - this.transform = t - }, X.prototype._$ZT = function() {}, X.prototype._$Uo = function(t, i, e, r, o, n, s, _) { - if (!(n < .01)) { - var a = this._$U2[t], - h = n > .9 ? at.EXPAND_W : 0; - this.gl.drawElements(a, e, r, o, n, h, this.transform, _) - } - }, X.prototype._$Rs = function() { - throw new Error("_$Rs") - }, X.prototype._$Ds = function(t) { - throw new Error("_$Ds") - }, X.prototype._$K2 = function() { - for (var t = 0; t < this._$sb.length; t++) { - 0 != this._$sb[t] && (this.gl._$Sr(1, this._$sb, t), this._$sb[t] = 0) - } - }, X.prototype.setTexture = function(t, i) { - this._$sb.length < t + 1 && this._$nS(t), this._$sb[t] = i - }, X.prototype.setTexture = function(t, i) { - this._$sb.length < t + 1 && this._$nS(t), this._$U2[t] = i - }, X.prototype._$nS = function(t) { - var i = Math.max(2 * this._$sb.length, t + 1 + 10), - e = new Int32Array(i); - w._$jT(this._$sb, 0, e, 0, this._$sb.length), this._$sb = e; - var r = new Array; - w._$jT(this._$U2, 0, r, 0, this._$U2.length), this._$U2 = r - }, z.prototype = new I, z._$Xo = new Float32Array(2), z._$io = new Float32Array(2), z._$0o = new Float32Array(2), z._$Lo = new Float32Array(2), z._$To = new Float32Array(2), z._$Po = new Float32Array(2), z._$gT = new Array, z.prototype._$zP = function() { - this._$GS = new D, this._$GS._$zP(), this._$Y0 = new Array - }, z.prototype.getType = function() { - return I._$c2 - }, z.prototype._$F0 = function(t) { - I.prototype._$F0.call(this, t), this._$GS = t._$nP(), this._$Y0 = t._$nP(), I.prototype.readV2_opacity.call(this, t) - }, z.prototype.init = function(t) { - var i = new H(this); - return i._$Yr = new P, this._$32() && (i._$Wr = new P), i - }, z.prototype._$Nr = function(t, i) { - this != i._$GT() && console.log("### assert!! ### "); - var e = i; - if (this._$GS._$Ur(t)) { - var r = z._$gT; - r[0] = !1; - var o = this._$GS._$Q2(t, r); - i._$Ib(r[0]), this.interpolateOpacity(t, this._$GS, i, r); - var n = t._$vs(), - s = t._$Tr(); - if (this._$GS._$zr(n, s, o), o <= 0) { - var _ = this._$Y0[n[0]]; - e._$Yr.init(_) - } else if (1 == o) { - var _ = this._$Y0[n[0]], - a = this._$Y0[n[1]], - h = s[0]; - e._$Yr._$fL = _._$fL + (a._$fL - _._$fL) * h, e._$Yr._$gL = _._$gL + (a._$gL - _._$gL) * h, e._$Yr._$B0 = _._$B0 + (a._$B0 - _._$B0) * h, e._$Yr._$z0 = _._$z0 + (a._$z0 - _._$z0) * h, e._$Yr._$qT = _._$qT + (a._$qT - _._$qT) * h - } else if (2 == o) { - var _ = this._$Y0[n[0]], - a = this._$Y0[n[1]], - l = this._$Y0[n[2]], - $ = this._$Y0[n[3]], - h = s[0], - u = s[1], - p = _._$fL + (a._$fL - _._$fL) * h, - f = l._$fL + ($._$fL - l._$fL) * h; - e._$Yr._$fL = p + (f - p) * u, p = _._$gL + (a._$gL - _._$gL) * h, f = l._$gL + ($._$gL - l._$gL) * h, e._$Yr._$gL = p + (f - p) * u, p = _._$B0 + (a._$B0 - _._$B0) * h, f = l._$B0 + ($._$B0 - l._$B0) * h, e._$Yr._$B0 = p + (f - p) * u, p = _._$z0 + (a._$z0 - _._$z0) * h, f = l._$z0 + ($._$z0 - l._$z0) * h, e._$Yr._$z0 = p + (f - p) * u, p = _._$qT + (a._$qT - _._$qT) * h, f = l._$qT + ($._$qT - l._$qT) * h, e._$Yr._$qT = p + (f - p) * u - } else if (3 == o) { - var c = this._$Y0[n[0]], - d = this._$Y0[n[1]], - g = this._$Y0[n[2]], - y = this._$Y0[n[3]], - m = this._$Y0[n[4]], - T = this._$Y0[n[5]], - P = this._$Y0[n[6]], - S = this._$Y0[n[7]], - h = s[0], - u = s[1], - v = s[2], - p = c._$fL + (d._$fL - c._$fL) * h, - f = g._$fL + (y._$fL - g._$fL) * h, - L = m._$fL + (T._$fL - m._$fL) * h, - M = P._$fL + (S._$fL - P._$fL) * h; - e._$Yr._$fL = (1 - v) * (p + (f - p) * u) + v * (L + (M - L) * u), p = c._$gL + (d._$gL - c._$gL) * h, f = g._$gL + (y._$gL - g._$gL) * h, L = m._$gL + (T._$gL - m._$gL) * h, M = P._$gL + (S._$gL - P._$gL) * h, e._$Yr._$gL = (1 - v) * (p + (f - p) * u) + v * (L + (M - L) * u), p = c._$B0 + (d._$B0 - c._$B0) * h, f = g._$B0 + (y._$B0 - g._$B0) * h, L = m._$B0 + (T._$B0 - m._$B0) * h, M = P._$B0 + (S._$B0 - P._$B0) * h, e._$Yr._$B0 = (1 - v) * (p + (f - p) * u) + v * (L + (M - L) * u), p = c._$z0 + (d._$z0 - c._$z0) * h, f = g._$z0 + (y._$z0 - g._$z0) * h, L = m._$z0 + (T._$z0 - m._$z0) * h, M = P._$z0 + (S._$z0 - P._$z0) * h, e._$Yr._$z0 = (1 - v) * (p + (f - p) * u) + v * (L + (M - L) * u), p = c._$qT + (d._$qT - c._$qT) * h, f = g._$qT + (y._$qT - g._$qT) * h, L = m._$qT + (T._$qT - m._$qT) * h, M = P._$qT + (S._$qT - P._$qT) * h, e._$Yr._$qT = (1 - v) * (p + (f - p) * u) + v * (L + (M - L) * u) - } else if (4 == o) { - var E = this._$Y0[n[0]], - A = this._$Y0[n[1]], - I = this._$Y0[n[2]], - w = this._$Y0[n[3]], - x = this._$Y0[n[4]], - O = this._$Y0[n[5]], - D = this._$Y0[n[6]], - R = this._$Y0[n[7]], - b = this._$Y0[n[8]], - F = this._$Y0[n[9]], - C = this._$Y0[n[10]], - N = this._$Y0[n[11]], - B = this._$Y0[n[12]], - U = this._$Y0[n[13]], - G = this._$Y0[n[14]], - Y = this._$Y0[n[15]], - h = s[0], - u = s[1], - v = s[2], - k = s[3], - p = E._$fL + (A._$fL - E._$fL) * h, - f = I._$fL + (w._$fL - I._$fL) * h, - L = x._$fL + (O._$fL - x._$fL) * h, - M = D._$fL + (R._$fL - D._$fL) * h, - V = b._$fL + (F._$fL - b._$fL) * h, - X = C._$fL + (N._$fL - C._$fL) * h, - H = B._$fL + (U._$fL - B._$fL) * h, - W = G._$fL + (Y._$fL - G._$fL) * h; - e._$Yr._$fL = (1 - k) * ((1 - v) * (p + (f - p) * u) + v * (L + (M - L) * u)) + k * ((1 - v) * (V + (X - V) * u) + v * (H + (W - H) * u)), p = E._$gL + (A._$gL - E._$gL) * h, f = I._$gL + (w._$gL - I._$gL) * h, L = x._$gL + (O._$gL - x._$gL) * h, M = D._$gL + (R._$gL - D._$gL) * h, V = b._$gL + (F._$gL - b._$gL) * h, X = C._$gL + (N._$gL - C._$gL) * h, H = B._$gL + (U._$gL - B._$gL) * h, W = G._$gL + (Y._$gL - G._$gL) * h, e._$Yr._$gL = (1 - k) * ((1 - v) * (p + (f - p) * u) + v * (L + (M - L) * u)) + k * ((1 - v) * (V + (X - V) * u) + v * (H + (W - H) * u)), p = E._$B0 + (A._$B0 - E._$B0) * h, f = I._$B0 + (w._$B0 - I._$B0) * h, L = x._$B0 + (O._$B0 - x._$B0) * h, M = D._$B0 + (R._$B0 - D._$B0) * h, V = b._$B0 + (F._$B0 - b._$B0) * h, X = C._$B0 + (N._$B0 - C._$B0) * h, H = B._$B0 + (U._$B0 - B._$B0) * h, W = G._$B0 + (Y._$B0 - G._$B0) * h, e._$Yr._$B0 = (1 - k) * ((1 - v) * (p + (f - p) * u) + v * (L + (M - L) * u)) + k * ((1 - v) * (V + (X - V) * u) + v * (H + (W - H) * u)), p = E._$z0 + (A._$z0 - E._$z0) * h, f = I._$z0 + (w._$z0 - I._$z0) * h, L = x._$z0 + (O._$z0 - x._$z0) * h, M = D._$z0 + (R._$z0 - D._$z0) * h, V = b._$z0 + (F._$z0 - b._$z0) * h, X = C._$z0 + (N._$z0 - C._$z0) * h, H = B._$z0 + (U._$z0 - B._$z0) * h, W = G._$z0 + (Y._$z0 - G._$z0) * h, e._$Yr._$z0 = (1 - k) * ((1 - v) * (p + (f - p) * u) + v * (L + (M - L) * u)) + k * ((1 - v) * (V + (X - V) * u) + v * (H + (W - H) * u)), p = E._$qT + (A._$qT - E._$qT) * h, f = I._$qT + (w._$qT - I._$qT) * h, L = x._$qT + (O._$qT - x._$qT) * h, M = D._$qT + (R._$qT - D._$qT) * h, V = b._$qT + (F._$qT - b._$qT) * h, X = C._$qT + (N._$qT - C._$qT) * h, H = B._$qT + (U._$qT - B._$qT) * h, W = G._$qT + (Y._$qT - G._$qT) * h, e._$Yr._$qT = (1 - k) * ((1 - v) * (p + (f - p) * u) + v * (L + (M - L) * u)) + k * ((1 - v) * (V + (X - V) * u) + v * (H + (W - H) * u)) - } else { - for (var j = 0 | Math.pow(2, o), q = new Float32Array(j), J = 0; J < j; J++) { - for (var Q = J, Z = 1, K = 0; K < o; K++) Z *= Q % 2 == 0 ? 1 - s[K] : s[K], Q /= 2; - q[J] = Z - } - for (var tt = new Array, it = 0; it < j; it++) tt[it] = this._$Y0[n[it]]; - for (var et = 0, rt = 0, ot = 0, nt = 0, st = 0, it = 0; it < j; it++) et += q[it] * tt[it]._$fL, rt += q[it] * tt[it]._$gL, ot += q[it] * tt[it]._$B0, nt += q[it] * tt[it]._$z0, st += q[it] * tt[it]._$qT; - e._$Yr._$fL = et, e._$Yr._$gL = rt, e._$Yr._$B0 = ot, e._$Yr._$z0 = nt, e._$Yr._$qT = st - } - var _ = this._$Y0[n[0]]; - e._$Yr.reflectX = _.reflectX, e._$Yr.reflectY = _.reflectY - } - }, z.prototype._$2b = function(t, i) { - this != i._$GT() && console.log("### assert!! ### "); - var e = i; - if (e._$hS(!0), this._$32()) { - var r = this.getTargetBaseDataID(); - if (e._$8r == I._$ur && (e._$8r = t.getBaseDataIndex(r)), e._$8r < 0) at._$so && _._$li("_$L _$0P _$G :: %s", r), e._$hS(!1); - else { - var o = t.getBaseData(e._$8r); - if (null != o) { - var n = t._$q2(e._$8r), - s = z._$Xo; - s[0] = e._$Yr._$fL, s[1] = e._$Yr._$gL; - var a = z._$io; - a[0] = 0, a[1] = -.1; - n._$GT().getType() == I._$c2 ? a[1] = -10 : a[1] = -.1; - var h = z._$0o; - this._$Jr(t, o, n, s, a, h); - var l = Lt._$92(a, h); - o._$nb(t, n, s, s, 1, 0, 2), e._$Wr._$fL = s[0], e._$Wr._$gL = s[1], e._$Wr._$B0 = e._$Yr._$B0, e._$Wr._$z0 = e._$Yr._$z0, e._$Wr._$qT = e._$Yr._$qT - l * Lt._$NS; - var $ = n.getTotalScale(); - e.setTotalScale_notForClient($ * e._$Wr._$B0); - var u = n.getTotalOpacity(); - e.setTotalOpacity(u * e.getInterpolatedOpacity()), e._$Wr.reflectX = e._$Yr.reflectX, e._$Wr.reflectY = e._$Yr.reflectY, e._$hS(n._$yo()) - } else e._$hS(!1) - } - } else e.setTotalScale_notForClient(e._$Yr._$B0), e.setTotalOpacity(e.getInterpolatedOpacity()) - }, z.prototype._$nb = function(t, i, e, r, o, n, s) { - this != i._$GT() && console.log("### assert!! ### "); - for (var _, a, h = i, l = null != h._$Wr ? h._$Wr : h._$Yr, $ = Math.sin(Lt._$bS * l._$qT), u = Math.cos(Lt._$bS * l._$qT), p = h.getTotalScale(), f = l.reflectX ? -1 : 1, c = l.reflectY ? -1 : 1, d = u * p * f, g = -$ * p * c, y = $ * p * f, m = u * p * c, T = l._$fL, P = l._$gL, S = o * s, v = n; v < S; v += s) _ = e[v], a = e[v + 1], r[v] = d * _ + g * a + T, r[v + 1] = y * _ + m * a + P - }, z.prototype._$Jr = function(t, i, e, r, o, n) { - i != e._$GT() && console.log("### assert!! ### "); - var s = z._$Lo; - z._$Lo[0] = r[0], z._$Lo[1] = r[1], i._$nb(t, e, s, s, 1, 0, 2); - for (var _ = z._$To, a = z._$Po, h = 1, l = 0; l < 10; l++) { - if (a[0] = r[0] + h * o[0], a[1] = r[1] + h * o[1], i._$nb(t, e, a, _, 1, 0, 2), _[0] -= s[0], _[1] -= s[1], 0 != _[0] || 0 != _[1]) return n[0] = _[0], void(n[1] = _[1]); - if (a[0] = r[0] - h * o[0], a[1] = r[1] - h * o[1], i._$nb(t, e, a, _, 1, 0, 2), _[0] -= s[0], _[1] -= s[1], 0 != _[0] || 0 != _[1]) return _[0] = -_[0], _[0] = -_[0], n[0] = _[0], void(n[1] = _[1]); - h *= .1 - } - at._$so && console.log("_$L0 to transform _$SP\n") - }, H.prototype = new _t, W.prototype = new M, W._$ur = -2, W._$ES = 500, W._$wb = 2, W._$8S = 3, W._$os = 4, W._$52 = W._$ES, W._$R2 = W._$ES, W._$Sb = function(t) { - for (var i = t.length - 1; i >= 0; --i) { - var e = t[i]; - e < W._$52 ? W._$52 = e : e > W._$R2 && (W._$R2 = e) - } - }, W._$or = function() { - return W._$52 - }, W._$Pr = function() { - return W._$R2 - }, W.prototype._$F0 = function(t) { - this._$gP = t._$nP(), this._$dr = t._$nP(), this._$GS = t._$nP(), this._$qb = t._$6L(), this._$Lb = t._$cS(), this._$mS = t._$Tb(), t.getFormatVersion() >= G._$T7 ? (this.clipID = t._$nP(), this.clipIDList = this.convertClipIDForV2_11(this.clipID)) : this.clipIDList = null, W._$Sb(this._$Lb) - }, W.prototype.getClipIDList = function() { - return this.clipIDList - }, W.prototype._$Nr = function(t, i) { - if (i._$IS[0] = !1, i._$Us = v._$Z2(t, this._$GS, i._$IS, this._$Lb), at._$Zs); - else if (i._$IS[0]) return; - i._$7s = v._$br(t, this._$GS, i._$IS, this._$mS) - }, W.prototype._$2b = function(t) {}, W.prototype.getDrawDataID = function() { - return this._$gP - }, W.prototype._$j2 = function(t) { - this._$gP = t - }, W.prototype.getOpacity = function(t, i) { - return i._$7s - }, W.prototype._$zS = function(t, i) { - return i._$Us - }, W.prototype.getTargetBaseDataID = function() { - return this._$dr - }, W.prototype._$gs = function(t) { - this._$dr = t - }, W.prototype._$32 = function() { - return null != this._$dr && this._$dr != yt._$2o() - }, W.prototype.getType = function() {}, j._$42 = 0, j.prototype._$1b = function() { - return this._$3S - }, j.prototype.getDrawDataList = function() { - return this._$aS - }, j.prototype._$F0 = function(t) { - this._$NL = t._$nP(), this._$aS = t._$nP(), this._$3S = t._$nP() - }, j.prototype._$kr = function(t) { - t._$Zo(this._$3S), t._$xo(this._$aS), this._$3S = null, this._$aS = null - }, q.prototype = new i, q.loadModel = function(t) { - var e = new q; - return i._$62(e, t), e - }, q.loadModel = function(t) { - var e = new q; - return i._$62(e, t), e - }, q._$to = function() { - return new q - }, q._$er = function(t) { - var i = new _$5("../_$_r/_$t0/_$Ri/_$_P._$d"); - if (0 == i.exists()) throw new _$ls("_$t0 _$_ _$6 _$Ui :: " + i._$PL()); - for (var e = ["../_$_r/_$t0/_$Ri/_$_P.512/_$CP._$1", "../_$_r/_$t0/_$Ri/_$_P.512/_$vP._$1", "../_$_r/_$t0/_$Ri/_$_P.512/_$EP._$1", "../_$_r/_$t0/_$Ri/_$_P.512/_$pP._$1"], r = q.loadModel(i._$3b()), o = 0; o < e.length; o++) { - var n = new _$5(e[o]); - if (0 == n.exists()) throw new _$ls("_$t0 _$_ _$6 _$Ui :: " + n._$PL()); - r.setTexture(o, _$nL._$_o(t, n._$3b())) - } - return r - }, q.prototype.setGL = function(t) { - this._$zo.setGL(t) - }, q.prototype.setTransform = function(t) { - this._$zo.setTransform(t) - }, q.prototype.draw = function() { - this._$5S.draw(this._$zo) - }, q.prototype._$K2 = function() { - this._$zo._$K2() - }, q.prototype.setTexture = function(t, i) { - null == this._$zo && _._$li("_$Yi for QT _$ki / _$XS() is _$6 _$ui!!"), this._$zo.setTexture(t, i) - }, q.prototype.setTexture = function(t, i) { - null == this._$zo && _._$li("_$Yi for QT _$ki / _$XS() is _$6 _$ui!!"), this._$zo.setTexture(t, i) - }, q.prototype._$Rs = function() { - return this._$zo._$Rs() - }, q.prototype._$Ds = function(t) { - this._$zo._$Ds(t) - }, q.prototype.getDrawParam = function() { - return this._$zo - }, J.prototype = new s, J._$cs = "VISIBLE:", J._$ar = "LAYOUT:", J.MTN_PREFIX_FADEIN = "FADEIN:", J.MTN_PREFIX_FADEOUT = "FADEOUT:", J._$Co = 0, J._$1T = 1, J.loadMotion = function(t) { - var i = k._$C(t); - return J.loadMotion(i) - }, J.loadMotion = function(t) { - t instanceof ArrayBuffer && (t = new DataView(t)); - var i = new J, - e = [0], - r = t.byteLength; - i._$yT = 0; - for (var o = 0; o < r; ++o) { - var n = Q(t, o), - s = n.charCodeAt(0); - if ("\n" != n && "\r" != n) if ("#" != n) if ("$" != n) { - if (97 <= s && s <= 122 || 65 <= s && s <= 90 || "_" == n) { - for (var _ = o, a = -1; o < r && ("\r" != (n = Q(t, o)) && "\n" != n); ++o) if ("=" == n) { - a = o; - break - } - if (a >= 0) { - var h = new B; - O.startsWith(t, _, J._$cs) ? (h._$RP = B._$hs, h._$4P = O.createString(t, _, a - _)) : O.startsWith(t, _, J._$ar) ? (h._$4P = O.createString(t, _ + 7, a - _ - 7), O.startsWith(t, _ + 7, "ANCHOR_X") ? h._$RP = B._$xs : O.startsWith(t, _ + 7, "ANCHOR_Y") ? h._$RP = B._$us : O.startsWith(t, _ + 7, "SCALE_X") ? h._$RP = B._$qs : O.startsWith(t, _ + 7, "SCALE_Y") ? h._$RP = B._$Ys : O.startsWith(t, _ + 7, "X") ? h._$RP = B._$ws : O.startsWith(t, _ + 7, "Y") && (h._$RP = B._$Ns)) : (h._$RP = B._$Fr, h._$4P = O.createString(t, _, a - _)), i.motions.push(h); - var l = 0, - $ = []; - for (o = a + 1; o < r && ("\r" != (n = Q(t, o)) && "\n" != n); ++o) if ("," != n && " " != n && "\t" != n) { - var u = O._$LS(t, r, o, e); - if (e[0] > 0) { - $.push(u), l++; - var p = e[0]; - if (p < o) { - console.log("_$n0 _$hi . @Live2DMotion loadMotion()\n"); - break - } - o = p - 1 - } - } - h._$I0 = new Float32Array($), l > i._$yT && (i._$yT = l) - } - } - } else { - for (var _ = o, a = -1; o < r && ("\r" != (n = Q(t, o)) && "\n" != n); ++o) if ("=" == n) { - a = o; - break - } - var f = !1; - if (a >= 0) for (a == _ + 4 && "f" == Q(t, _ + 1) && "p" == Q(t, _ + 2) && "s" == Q(t, _ + 3) && (f = !0), o = a + 1; o < r && ("\r" != (n = Q(t, o)) && "\n" != n); ++o) if ("," != n && " " != n && "\t" != n) { - var u = O._$LS(t, r, o, e); - e[0] > 0 && f && 5 < u && u < 121 && (i._$D0 = u), o = e[0] - } - for (; o < r && ("\n" != Q(t, o) && "\r" != Q(t, o)); ++o); - } else for (; o < r && ("\n" != Q(t, o) && "\r" != Q(t, o)); ++o); - } - return i._$rr = 1e3 * i._$yT / i._$D0 | 0, i - }, J.prototype.getDurationMSec = function() { - return this._$E ? -1 : this._$rr - }, J.prototype.getLoopDurationMSec = function() { - return this._$rr - }, J.prototype.dump = function() { - for (var t = 0; t < this.motions.length; t++) { - var i = this.motions[t]; - console.log("_$wL[%s] [%d]. ", i._$4P, i._$I0.length); - for (var e = 0; e < i._$I0.length && e < 10; e++) console.log("%5.2f ,", i._$I0[e]); - console.log("\n") - } - }, J.prototype.updateParamExe = function(t, i, e, r) { - for (var o = i - r._$z2, n = o * this._$D0 / 1e3, s = 0 | n, _ = n - s, a = 0; a < this.motions.length; a++) { - var h = this.motions[a], - l = h._$I0.length, - $ = h._$4P; - if (h._$RP == B._$hs) { - var u = h._$I0[s >= l ? l - 1 : s]; - t.setParamFloat($, u) - } else if (B._$ws <= h._$RP && h._$RP <= B._$Ys); - else { - var p, f = t.getParamIndex($), - c = t.getModelContext(), - d = c.getParamMax(f), - g = c.getParamMin(f), - y = .4 * (d - g), - m = c.getParamFloat(f), - T = h._$I0[s >= l ? l - 1 : s], - P = h._$I0[s + 1 >= l ? l - 1 : s + 1]; - p = T < P && P - T > y || T > P && T - P > y ? T : T + (P - T) * _; - var S = m + (p - m) * e; - t.setParamFloat($, S) - } - } - s >= this._$yT && (this._$E ? (r._$z2 = i, this.loopFadeIn && (r._$bs = i)) : r._$9L = !0), this._$eP = e - }, J.prototype._$r0 = function() { - return this._$E - }, J.prototype._$aL = function(t) { - this._$E = t - }, J.prototype._$S0 = function() { - return this._$D0 - }, J.prototype._$U0 = function(t) { - this._$D0 = t - }, J.prototype.isLoopFadeIn = function() { - return this.loopFadeIn - }, J.prototype.setLoopFadeIn = function(t) { - this.loopFadeIn = t - }, N.prototype.clear = function() { - this.size = 0 - }, N.prototype.add = function(t) { - if (this._$P.length <= this.size) { - var i = new Float32Array(2 * this.size); - w._$jT(this._$P, 0, i, 0, this.size), this._$P = i - } - this._$P[this.size++] = t - }, N.prototype._$BL = function() { - var t = new Float32Array(this.size); - return w._$jT(this._$P, 0, t, 0, this.size), t - }, B._$Fr = 0, B._$hs = 1, B._$ws = 100, B._$Ns = 101, B._$xs = 102, B._$us = 103, B._$qs = 104, B._$Ys = 105, Z.prototype = new I, Z._$gT = new Array, Z.prototype._$zP = function() { - this._$GS = new D, this._$GS._$zP() - }, Z.prototype._$F0 = function(t) { - I.prototype._$F0.call(this, t), this._$A = t._$6L(), this._$o = t._$6L(), this._$GS = t._$nP(), this._$Eo = t._$nP(), I.prototype.readV2_opacity.call(this, t) - }, Z.prototype.init = function(t) { - var i = new K(this), - e = (this._$o + 1) * (this._$A + 1); - return null != i._$Cr && (i._$Cr = null), i._$Cr = new Float32Array(2 * e), null != i._$hr && (i._$hr = null), this._$32() ? i._$hr = new Float32Array(2 * e) : i._$hr = null, i - }, Z.prototype._$Nr = function(t, i) { - var e = i; - if (this._$GS._$Ur(t)) { - var r = this._$VT(), - o = Z._$gT; - o[0] = !1, v._$Vr(t, this._$GS, o, r, this._$Eo, e._$Cr, 0, 2), i._$Ib(o[0]), this.interpolateOpacity(t, this._$GS, i, o) - } - }, Z.prototype._$2b = function(t, i) { - var e = i; - if (e._$hS(!0), this._$32()) { - var r = this.getTargetBaseDataID(); - if (e._$8r == I._$ur && (e._$8r = t.getBaseDataIndex(r)), e._$8r < 0) at._$so && _._$li("_$L _$0P _$G :: %s", r), e._$hS(!1); - else { - var o = t.getBaseData(e._$8r), - n = t._$q2(e._$8r); - if (null != o && n._$yo()) { - var s = n.getTotalScale(); - e.setTotalScale_notForClient(s); - var a = n.getTotalOpacity(); - e.setTotalOpacity(a * e.getInterpolatedOpacity()), o._$nb(t, n, e._$Cr, e._$hr, this._$VT(), 0, 2), e._$hS(!0) - } else e._$hS(!1) - } - } else e.setTotalOpacity(e.getInterpolatedOpacity()) - }, Z.prototype._$nb = function(t, i, e, r, o, n, s) { - var _ = i, - a = null != _._$hr ? _._$hr : _._$Cr; - Z.transformPoints_sdk2(e, r, o, n, s, a, this._$o, this._$A) - }, Z.transformPoints_sdk2 = function(i, e, r, o, n, s, _, a) { - for (var h, l, $, u = r * n, p = 0, f = 0, c = 0, d = 0, g = 0, y = 0, m = !1, T = o; T < u; T += n) { - var P, S, v, L; - if (v = i[T], L = i[T + 1], P = v * _, S = L * a, P < 0 || S < 0 || _ <= P || a <= S) { - var M = _ + 1; - if (!m) { - m = !0, p = .25 * (s[2 * (0 + 0 * M)] + s[2 * (_ + 0 * M)] + s[2 * (0 + a * M)] + s[2 * (_ + a * M)]), f = .25 * (s[2 * (0 + 0 * M) + 1] + s[2 * (_ + 0 * M) + 1] + s[2 * (0 + a * M) + 1] + s[2 * (_ + a * M) + 1]); - var E = s[2 * (_ + a * M)] - s[2 * (0 + 0 * M)], - A = s[2 * (_ + a * M) + 1] - s[2 * (0 + 0 * M) + 1], - I = s[2 * (_ + 0 * M)] - s[2 * (0 + a * M)], - w = s[2 * (_ + 0 * M) + 1] - s[2 * (0 + a * M) + 1]; - c = .5 * (E + I), d = .5 * (A + w), g = .5 * (E - I), y = .5 * (A - w), p -= .5 * (c + g), f -= .5 * (d + y) - } - if (-2 < v && v < 3 && -2 < L && L < 3) if (v <= 0) if (L <= 0) { - var x = s[2 * (0 + 0 * M)], - O = s[2 * (0 + 0 * M) + 1], - D = p - 2 * c, - R = f - 2 * d, - b = p - 2 * g, - F = f - 2 * y, - C = p - 2 * c - 2 * g, - N = f - 2 * d - 2 * y, - B = .5 * (v - -2), - U = .5 * (L - -2); - B + U <= 1 ? (e[T] = C + (b - C) * B + (D - C) * U, e[T + 1] = N + (F - N) * B + (R - N) * U) : (e[T] = x + (D - x) * (1 - B) + (b - x) * (1 - U), e[T + 1] = O + (R - O) * (1 - B) + (F - O) * (1 - U)) - } else if (L >= 1) { - var b = s[2 * (0 + a * M)], - F = s[2 * (0 + a * M) + 1], - C = p - 2 * c + 1 * g, - N = f - 2 * d + 1 * y, - x = p + 3 * g, - O = f + 3 * y, - D = p - 2 * c + 3 * g, - R = f - 2 * d + 3 * y, - B = .5 * (v - -2), - U = .5 * (L - 1); - B + U <= 1 ? (e[T] = C + (b - C) * B + (D - C) * U, e[T + 1] = N + (F - N) * B + (R - N) * U) : (e[T] = x + (D - x) * (1 - B) + (b - x) * (1 - U), e[T + 1] = O + (R - O) * (1 - B) + (F - O) * (1 - U)) - } else { - var G = 0 | S; - G == a && (G = a - 1); - var B = .5 * (v - -2), - U = S - G, - Y = G / a, - k = (G + 1) / a, - b = s[2 * (0 + G * M)], - F = s[2 * (0 + G * M) + 1], - x = s[2 * (0 + (G + 1) * M)], - O = s[2 * (0 + (G + 1) * M) + 1], - C = p - 2 * c + Y * g, - N = f - 2 * d + Y * y, - D = p - 2 * c + k * g, - R = f - 2 * d + k * y; - B + U <= 1 ? (e[T] = C + (b - C) * B + (D - C) * U, e[T + 1] = N + (F - N) * B + (R - N) * U) : (e[T] = x + (D - x) * (1 - B) + (b - x) * (1 - U), e[T + 1] = O + (R - O) * (1 - B) + (F - O) * (1 - U)) - } else if (1 <= v) if (L <= 0) { - var D = s[2 * (_ + 0 * M)], - R = s[2 * (_ + 0 * M) + 1], - x = p + 3 * c, - O = f + 3 * d, - C = p + 1 * c - 2 * g, - N = f + 1 * d - 2 * y, - b = p + 3 * c - 2 * g, - F = f + 3 * d - 2 * y, - B = .5 * (v - 1), - U = .5 * (L - -2); - B + U <= 1 ? (e[T] = C + (b - C) * B + (D - C) * U, e[T + 1] = N + (F - N) * B + (R - N) * U) : (e[T] = x + (D - x) * (1 - B) + (b - x) * (1 - U), e[T + 1] = O + (R - O) * (1 - B) + (F - O) * (1 - U)) - } else if (L >= 1) { - var C = s[2 * (_ + a * M)], - N = s[2 * (_ + a * M) + 1], - b = p + 3 * c + 1 * g, - F = f + 3 * d + 1 * y, - D = p + 1 * c + 3 * g, - R = f + 1 * d + 3 * y, - x = p + 3 * c + 3 * g, - O = f + 3 * d + 3 * y, - B = .5 * (v - 1), - U = .5 * (L - 1); - B + U <= 1 ? (e[T] = C + (b - C) * B + (D - C) * U, e[T + 1] = N + (F - N) * B + (R - N) * U) : (e[T] = x + (D - x) * (1 - B) + (b - x) * (1 - U), e[T + 1] = O + (R - O) * (1 - B) + (F - O) * (1 - U)) - } else { - var G = 0 | S; - G == a && (G = a - 1); - var B = .5 * (v - 1), - U = S - G, - Y = G / a, - k = (G + 1) / a, - C = s[2 * (_ + G * M)], - N = s[2 * (_ + G * M) + 1], - D = s[2 * (_ + (G + 1) * M)], - R = s[2 * (_ + (G + 1) * M) + 1], - b = p + 3 * c + Y * g, - F = f + 3 * d + Y * y, - x = p + 3 * c + k * g, - O = f + 3 * d + k * y; - B + U <= 1 ? (e[T] = C + (b - C) * B + (D - C) * U, e[T + 1] = N + (F - N) * B + (R - N) * U) : (e[T] = x + (D - x) * (1 - B) + (b - x) * (1 - U), e[T + 1] = O + (R - O) * (1 - B) + (F - O) * (1 - U)) - } else if (L <= 0) { - var V = 0 | P; - V == _ && (V = _ - 1); - var B = P - V, - U = .5 * (L - -2), - X = V / _, - z = (V + 1) / _, - D = s[2 * (V + 0 * M)], - R = s[2 * (V + 0 * M) + 1], - x = s[2 * (V + 1 + 0 * M)], - O = s[2 * (V + 1 + 0 * M) + 1], - C = p + X * c - 2 * g, - N = f + X * d - 2 * y, - b = p + z * c - 2 * g, - F = f + z * d - 2 * y; - B + U <= 1 ? (e[T] = C + (b - C) * B + (D - C) * U, e[T + 1] = N + (F - N) * B + (R - N) * U) : (e[T] = x + (D - x) * (1 - B) + (b - x) * (1 - U), e[T + 1] = O + (R - O) * (1 - B) + (F - O) * (1 - U)) - } else if (L >= 1) { - var V = 0 | P; - V == _ && (V = _ - 1); - var B = P - V, - U = .5 * (L - 1), - X = V / _, - z = (V + 1) / _, - C = s[2 * (V + a * M)], - N = s[2 * (V + a * M) + 1], - b = s[2 * (V + 1 + a * M)], - F = s[2 * (V + 1 + a * M) + 1], - D = p + X * c + 3 * g, - R = f + X * d + 3 * y, - x = p + z * c + 3 * g, - O = f + z * d + 3 * y; - B + U <= 1 ? (e[T] = C + (b - C) * B + (D - C) * U, e[T + 1] = N + (F - N) * B + (R - N) * U) : (e[T] = x + (D - x) * (1 - B) + (b - x) * (1 - U), e[T + 1] = O + (R - O) * (1 - B) + (F - O) * (1 - U)) - } else t.err.printf("_$li calc : %.4f , %.4f\t\t\t\t\t@@BDBoxGrid\n", v, L); - else e[T] = p + v * c + L * g, e[T + 1] = f + v * d + L * y - } else l = P - (0 | P), $ = S - (0 | S), h = 2 * ((0 | P) + (0 | S) * (_ + 1)), l + $ < 1 ? (e[T] = s[h] * (1 - l - $) + s[h + 2] * l + s[h + 2 * (_ + 1)] * $, e[T + 1] = s[h + 1] * (1 - l - $) + s[h + 3] * l + s[h + 2 * (_ + 1) + 1] * $) : (e[T] = s[h + 2 * (_ + 1) + 2] * (l - 1 + $) + s[h + 2 * (_ + 1)] * (1 - l) + s[h + 2] * (1 - $), e[T + 1] = s[h + 2 * (_ + 1) + 3] * (l - 1 + $) + s[h + 2 * (_ + 1) + 1] * (1 - l) + s[h + 3] * (1 - $)) - } - }, Z.prototype.transformPoints_sdk1 = function(t, i, e, r, o, n, s) { - for (var _, a, h, l, $, u, p, f = i, c = this._$o, d = this._$A, g = o * s, y = null != f._$hr ? f._$hr : f._$Cr, m = n; m < g; m += s) at._$ts ? (_ = e[m], a = e[m + 1], _ < 0 ? _ = 0 : _ > 1 && (_ = 1), a < 0 ? a = 0 : a > 1 && (a = 1), _ *= c, a *= d, h = 0 | _, l = 0 | a, h > c - 1 && (h = c - 1), l > d - 1 && (l = d - 1), u = _ - h, p = a - l, $ = 2 * (h + l * (c + 1))) : (_ = e[m] * c, a = e[m + 1] * d, u = _ - (0 | _), p = a - (0 | a), $ = 2 * ((0 | _) + (0 | a) * (c + 1))), u + p < 1 ? (r[m] = y[$] * (1 - u - p) + y[$ + 2] * u + y[$ + 2 * (c + 1)] * p, r[m + 1] = y[$ + 1] * (1 - u - p) + y[$ + 3] * u + y[$ + 2 * (c + 1) + 1] * p) : (r[m] = y[$ + 2 * (c + 1) + 2] * (u - 1 + p) + y[$ + 2 * (c + 1)] * (1 - u) + y[$ + 2] * (1 - p), r[m + 1] = y[$ + 2 * (c + 1) + 3] * (u - 1 + p) + y[$ + 2 * (c + 1) + 1] * (1 - u) + y[$ + 3] * (1 - p)) - }, Z.prototype._$VT = function() { - return (this._$o + 1) * (this._$A + 1) - }, Z.prototype.getType = function() { - return I._$_b - }, K.prototype = new _t, tt._$42 = 0, tt.prototype._$zP = function() { - this._$3S = new Array, this._$aS = new Array - }, tt.prototype._$F0 = function(t) { - this._$g0 = t._$8L(), this.visible = t._$8L(), this._$NL = t._$nP(), this._$3S = t._$nP(), this._$aS = t._$nP() - }, tt.prototype.init = function(t) { - var i = new it(this); - return i.setPartsOpacity(this.isVisible() ? 1 : 0), i - }, tt.prototype._$6o = function(t) { - if (null == this._$3S) throw new Error("_$3S _$6 _$Wo@_$6o"); - this._$3S.push(t) - }, tt.prototype._$3o = function(t) { - if (null == this._$aS) throw new Error("_$aS _$6 _$Wo@_$3o"); - this._$aS.push(t) - }, tt.prototype._$Zo = function(t) { - this._$3S = t - }, tt.prototype._$xo = function(t) { - this._$aS = t - }, tt.prototype.isVisible = function() { - return this.visible - }, tt.prototype._$uL = function() { - return this._$g0 - }, tt.prototype._$KP = function(t) { - this.visible = t - }, tt.prototype._$ET = function(t) { - this._$g0 = t - }, tt.prototype.getBaseData = function() { - return this._$3S - }, tt.prototype.getDrawData = function() { - return this._$aS - }, tt.prototype._$p2 = function() { - return this._$NL - }, tt.prototype._$ob = function(t) { - this._$NL = t - }, tt.prototype.getPartsID = function() { - return this._$NL - }, tt.prototype._$MP = function(t) { - this._$NL = t - }, it.prototype = new $, it.prototype.getPartsOpacity = function() { - return this._$VS - }, it.prototype.setPartsOpacity = function(t) { - this._$VS = t - }, et._$L7 = function() { - u._$27(), yt._$27(), b._$27(), l._$27() - }, et.prototype.toString = function() { - return this.id - }, rt.prototype._$F0 = function(t) {}, ot.prototype._$1s = function() { - return this._$4S - }, ot.prototype._$zP = function() { - this._$4S = new Array - }, ot.prototype._$F0 = function(t) { - this._$4S = t._$nP() - }, ot.prototype._$Ks = function(t) { - this._$4S.push(t) - }, nt.tr = new gt, nt._$50 = new gt, nt._$Ti = new Array(0, 0), nt._$Pi = new Array(0, 0), nt._$B = new Array(0, 0), nt.prototype._$lP = function(t, i, e, r) { - this.viewport = new Array(t, i, e, r) - }, nt.prototype._$bL = function() { - this.context.save(); - var t = this.viewport; - null != t && (this.context.beginPath(), this.context._$Li(t[0], t[1], t[2], t[3]), this.context.clip()) - }, nt.prototype._$ei = function() { - this.context.restore() - }, nt.prototype.drawElements = function(t, i, e, r, o, n, s, a) { - try { - o != this._$Qo && (this._$Qo = o, this.context.globalAlpha = o); - for (var h = i.length, l = t.width, $ = t.height, u = this.context, p = this._$xP, f = this._$uP, c = this._$6r, d = this._$3r, g = nt.tr, y = nt._$Ti, m = nt._$Pi, T = nt._$B, P = 0; P < h; P += 3) { - u.save(); - var S = i[P], - v = i[P + 1], - L = i[P + 2], - M = p + c * e[2 * S], - E = f + d * e[2 * S + 1], - A = p + c * e[2 * v], - I = f + d * e[2 * v + 1], - w = p + c * e[2 * L], - x = f + d * e[2 * L + 1]; - s && (s._$PS(M, E, T), M = T[0], E = T[1], s._$PS(A, I, T), A = T[0], I = T[1], s._$PS(w, x, T), w = T[0], x = T[1]); - var O = l * r[2 * S], - D = $ - $ * r[2 * S + 1], - R = l * r[2 * v], - b = $ - $ * r[2 * v + 1], - F = l * r[2 * L], - C = $ - $ * r[2 * L + 1], - N = Math.atan2(b - D, R - O), - B = Math.atan2(I - E, A - M), - U = A - M, - G = I - E, - Y = Math.sqrt(U * U + G * G), - k = R - O, - V = b - D, - X = Math.sqrt(k * k + V * V), - z = Y / X; - It._$ni(F, C, O, D, R - O, b - D, -(b - D), R - O, y), It._$ni(w, x, M, E, A - M, I - E, -(I - E), A - M, m); - var H = (m[0] - y[0]) / y[1], - W = Math.min(O, R, F), - j = Math.max(O, R, F), - q = Math.min(D, b, C), - J = Math.max(D, b, C), - Q = Math.floor(W), - Z = Math.floor(q), - K = Math.ceil(j), - tt = Math.ceil(J); - g.identity(), g.translate(M, E), g.rotate(B), g.scale(1, m[1] / y[1]), g.shear(H, 0), g.scale(z, z), g.rotate(-N), g.translate(-O, -D), g.setContext(u); - if (n || (n = 1.2), at.IGNORE_EXPAND && (n = 0), at.USE_CACHED_POLYGON_IMAGE) { - var it = a._$e0; - if (it.gl_cacheImage = it.gl_cacheImage || {}, !it.gl_cacheImage[P]) { - var et = nt.createCanvas(K - Q, tt - Z); - at.DEBUG_DATA.LDGL_CANVAS_MB = at.DEBUG_DATA.LDGL_CANVAS_MB || 0, at.DEBUG_DATA.LDGL_CANVAS_MB += (K - Q) * (tt - Z) * 4; - var rt = et.getContext("2d"); - rt.translate(-Q, -Z), nt.clip(rt, g, n, Y, O, D, R, b, F, C, M, E, A, I, w, x), rt.drawImage(t, 0, 0), it.gl_cacheImage[P] = { - cacheCanvas: et, - cacheContext: rt - } - } - u.drawImage(it.gl_cacheImage[P].cacheCanvas, Q, Z) - } else at.IGNORE_CLIP || nt.clip(u, g, n, Y, O, D, R, b, F, C, M, E, A, I, w, x), at.USE_ADJUST_TRANSLATION && (W = 0, j = l, q = 0, J = $), u.drawImage(t, W, q, j - W, J - q, W, q, j - W, J - q); - u.restore() - } - } catch (t) { - _._$Rb(t) - } - }, nt.clip = function(t, i, e, r, o, n, s, _, a, h, l, $, u, p, f, c) { - e > .02 ? nt.expandClip(t, i, e, r, l, $, u, p, f, c) : nt.clipWithTransform(t, null, o, n, s, _, a, h) - }, nt.expandClip = function(t, i, e, r, o, n, s, _, a, h) { - var l = s - o, - $ = _ - n, - u = a - o, - p = h - n, - f = l * p - $ * u > 0 ? e : -e, - c = -$, - d = l, - g = a - s, - y = h - _, - m = -y, - T = g, - P = Math.sqrt(g * g + y * y), - S = -p, - v = u, - L = Math.sqrt(u * u + p * p), - M = o - f * c / r, - E = n - f * d / r, - A = s - f * c / r, - I = _ - f * d / r, - w = s - f * m / P, - x = _ - f * T / P, - O = a - f * m / P, - D = h - f * T / P, - R = o + f * S / L, - b = n + f * v / L, - F = a + f * S / L, - C = h + f * v / L, - N = nt._$50; - return null != i._$P2(N) && (nt.clipWithTransform(t, N, M, E, A, I, w, x, O, D, F, C, R, b), !0) - }, nt.clipWithTransform = function(t, i, e, r, o, n, s, a) { - if (arguments.length < 7) return void _._$li("err : @LDGL.clip()"); - if (!(arguments[1] instanceof gt)) return void _._$li("err : a[0] is _$6 LDTransform @LDGL.clip()"); - var h = nt._$B, - l = i, - $ = arguments; - if (t.beginPath(), l) { - l._$PS($[2], $[3], h), t.moveTo(h[0], h[1]); - for (var u = 4; u < $.length; u += 2) l._$PS($[u], $[u + 1], h), t.lineTo(h[0], h[1]) - } else { - t.moveTo($[2], $[3]); - for (var u = 4; u < $.length; u += 2) t.lineTo($[u], $[u + 1]) - } - t.clip() - }, nt.createCanvas = function(t, i) { - var e = document.createElement("canvas"); - return e.setAttribute("width", t), e.setAttribute("height", i), e || _._$li("err : " + e), e - }, nt.dumpValues = function() { - for (var t = "", i = 0; i < arguments.length; i++) t += "[" + i + "]= " + arguments[i].toFixed(3) + " , "; - console.log(t) - }, st.prototype._$F0 = function(t) { - this._$TT = t._$_T(), this._$LT = t._$_T(), this._$FS = t._$_T(), this._$wL = t._$nP() - }, st.prototype.getMinValue = function() { - return this._$TT - }, st.prototype.getMaxValue = function() { - return this._$LT - }, st.prototype.getDefaultValue = function() { - return this._$FS - }, st.prototype.getParamID = function() { - return this._$wL - }, _t.prototype._$yo = function() { - return this._$AT && !this._$JS - }, _t.prototype._$hS = function(t) { - this._$AT = t - }, _t.prototype._$GT = function() { - return this._$e0 - }, _t.prototype._$l2 = function(t) { - this._$IP = t - }, _t.prototype.getPartsIndex = function() { - return this._$IP - }, _t.prototype._$x2 = function() { - return this._$JS - }, _t.prototype._$Ib = function(t) { - this._$JS = t - }, _t.prototype.getTotalScale = function() { - return this.totalScale - }, _t.prototype.setTotalScale_notForClient = function(t) { - this.totalScale = t - }, _t.prototype.getInterpolatedOpacity = function() { - return this._$7s - }, _t.prototype.setInterpolatedOpacity = function(t) { - this._$7s = t - }, _t.prototype.getTotalOpacity = function(t) { - return this.totalOpacity - }, _t.prototype.setTotalOpacity = function(t) { - this.totalOpacity = t - }, at._$2s = "2.1.00_1", at._$Kr = 201001e3, at._$sP = !0, at._$so = !0, at._$cb = !1, at._$3T = !0, at._$Ts = !0, at._$fb = !0, at._$ts = !0, at.L2D_DEFORMER_EXTEND = !0, at._$Wb = !1; - at._$yr = !1, at._$Zs = !1, at.L2D_NO_ERROR = 0, at._$i7 = 1e3, at._$9s = 1001, at._$es = 1100, at._$r7 = 2e3, at._$07 = 2001, at._$b7 = 2002, at._$H7 = 4e3, at.L2D_COLOR_BLEND_MODE_MULT = 0, at.L2D_COLOR_BLEND_MODE_ADD = 1, at.L2D_COLOR_BLEND_MODE_INTERPOLATE = 2, at._$6b = !0, at._$cT = 0, at.clippingMaskBufferSize = 256, at.glContext = new Array, at.frameBuffers = new Array, at.fTexture = new Array, at.IGNORE_CLIP = !1, at.IGNORE_EXPAND = !1, at.EXPAND_W = 2, at.USE_ADJUST_TRANSLATION = !0, at.USE_CANVAS_TRANSFORM = !0, at.USE_CACHED_POLYGON_IMAGE = !1, at.DEBUG_DATA = {}, at.PROFILE_IOS_SPEED = { - PROFILE_NAME: "iOS Speed", - USE_ADJUST_TRANSLATION: !0, - USE_CACHED_POLYGON_IMAGE: !0, - EXPAND_W: 4 - }, at.PROFILE_IOS_QUALITY = { - PROFILE_NAME: "iOS HiQ", - USE_ADJUST_TRANSLATION: !0, - USE_CACHED_POLYGON_IMAGE: !1, - EXPAND_W: 2 - }, at.PROFILE_IOS_DEFAULT = at.PROFILE_IOS_QUALITY, at.PROFILE_ANDROID = { - PROFILE_NAME: "Android", - USE_ADJUST_TRANSLATION: !1, - USE_CACHED_POLYGON_IMAGE: !1, - EXPAND_W: 2 - }, at.PROFILE_DESKTOP = { - PROFILE_NAME: "Desktop", - USE_ADJUST_TRANSLATION: !1, - USE_CACHED_POLYGON_IMAGE: !1, - EXPAND_W: 2 - }, at.initProfile = function() { - Et.isIOS() ? at.setupProfile(at.PROFILE_IOS_DEFAULT) : Et.isAndroid() ? at.setupProfile(at.PROFILE_ANDROID) : at.setupProfile(at.PROFILE_DESKTOP) - }, at.setupProfile = function(t, i) { - if ("number" == typeof t) switch (t) { - case 9901: - t = at.PROFILE_IOS_SPEED; - break; - case 9902: - t = at.PROFILE_IOS_QUALITY; - break; - case 9903: - t = at.PROFILE_IOS_DEFAULT; - break; - case 9904: - t = at.PROFILE_ANDROID; - break; - case 9905: - t = at.PROFILE_DESKTOP; - break; - default: - alert("profile _$6 _$Ui : " + t) - } - arguments.length < 2 && (i = !0), i && console.log("profile : " + t.PROFILE_NAME); - for (var e in t) at[e] = t[e], i && console.log(" [" + e + "] = " + t[e]) - }, at.init = function() { - if (at._$6b) { - console.log("Live2D %s", at._$2s), at._$6b = !1; - !0, at.initProfile() - } - }, at.getVersionStr = function() { - return at._$2s - }, at.getVersionNo = function() { - return at._$Kr - }, at._$sT = function(t) { - at._$cT = t - }, at.getError = function() { - var t = at._$cT; - return at._$cT = 0, t - }, at.dispose = function() { - at.glContext = [], at.frameBuffers = [], at.fTexture = [] - }, at.setGL = function(t, i) { - var e = i || 0; - at.glContext[e] = t - }, at.getGL = function(t) { - return at.glContext[t] - }, at.setClippingMaskBufferSize = function(t) { - at.clippingMaskBufferSize = t - }, at.getClippingMaskBufferSize = function() { - return at.clippingMaskBufferSize - }, at.deleteBuffer = function(t) { - at.getGL(t).deleteFramebuffer(at.frameBuffers[t].framebuffer), delete at.frameBuffers[t], delete at.glContext[t] - }, ht._$r2 = function(t) { - return t < 0 ? 0 : t > 1 ? 1 : .5 - .5 * Math.cos(t * Lt.PI_F) - }, lt._$fr = -1, lt.prototype.toString = function() { - return this._$ib - }, $t.prototype = new W, $t._$42 = 0, $t._$Os = 30, $t._$ms = 0, $t._$ns = 1, $t._$_s = 2, $t._$gT = new Array, $t.prototype._$_S = function(t) { - this._$LP = t - }, $t.prototype.getTextureNo = function() { - return this._$LP - }, $t.prototype._$ZL = function() { - return this._$Qi - }, $t.prototype._$H2 = function() { - return this._$JP - }, $t.prototype.getNumPoints = function() { - return this._$d0 - }, $t.prototype.getType = function() { - return W._$wb - }, $t.prototype._$B2 = function(t, i, e) { - var r = i, - o = null != r._$hr ? r._$hr : r._$Cr; - switch (U._$do) { - default: - case U._$Ms: - throw new Error("_$L _$ro "); - case U._$Qs: - for (var n = this._$d0 - 1; n >= 0; --n) o[n * U._$No + 4] = e - } - }, $t.prototype._$zP = function() { - this._$GS = new D, this._$GS._$zP() - }, $t.prototype._$F0 = function(t) { - W.prototype._$F0.call(this, t), this._$LP = t._$6L(), this._$d0 = t._$6L(), this._$Yo = t._$6L(); - var i = t._$nP(); - this._$BP = new Int16Array(3 * this._$Yo); - for (var e = 3 * this._$Yo - 1; e >= 0; --e) this._$BP[e] = i[e]; - if (this._$Eo = t._$nP(), this._$Qi = t._$nP(), t.getFormatVersion() >= G._$s7) { - if (this._$JP = t._$6L(), 0 != this._$JP) { - if (0 != (1 & this._$JP)) { - var r = t._$6L(); - null == this._$5P && (this._$5P = new Object), this._$5P._$Hb = parseInt(r) - } - 0 != (this._$JP & $t._$Os) ? this._$6s = (this._$JP & $t._$Os) >> 1 : this._$6s = $t._$ms, 0 != (32 & this._$JP) && (this.culling = !1) - } - } else this._$JP = 0 - }, $t.prototype.init = function(t) { - var i = new ut(this), - e = this._$d0 * U._$No, - r = this._$32(); - switch (null != i._$Cr && (i._$Cr = null), i._$Cr = new Float32Array(e), null != i._$hr && (i._$hr = null), i._$hr = r ? new Float32Array(e) : null, U._$do) { - default: - case U._$Ms: - if (U._$Ls) for (var o = this._$d0 - 1; o >= 0; --o) { - var n = o << 1; - this._$Qi[n + 1] = 1 - this._$Qi[n + 1] - } - break; - case U._$Qs: - for (var o = this._$d0 - 1; o >= 0; --o) { - var n = o << 1, - s = o * U._$No, - _ = this._$Qi[n], - a = this._$Qi[n + 1]; - i._$Cr[s] = _, i._$Cr[s + 1] = a, i._$Cr[s + 4] = 0, r && (i._$hr[s] = _, i._$hr[s + 1] = a, i._$hr[s + 4] = 0) - } - } - return i - }, $t.prototype._$Nr = function(t, i) { - var e = i; - if (this != e._$GT() && console.log("### assert!! ### "), this._$GS._$Ur(t) && (W.prototype._$Nr.call(this, t, e), !e._$IS[0])) { - var r = $t._$gT; - r[0] = !1, v._$Vr(t, this._$GS, r, this._$d0, this._$Eo, e._$Cr, U._$i2, U._$No) - } - }, $t.prototype._$2b = function(t, i) { - try { - this != i._$GT() && console.log("### assert!! ### "); - var e = !1; - i._$IS[0] && (e = !0); - var r = i; - if (!e && (W.prototype._$2b.call(this, t), this._$32())) { - var o = this.getTargetBaseDataID(); - if (r._$8r == W._$ur && (r._$8r = t.getBaseDataIndex(o)), r._$8r < 0) at._$so && _._$li("_$L _$0P _$G :: %s", o); - else { - var n = t.getBaseData(r._$8r), - s = t._$q2(r._$8r); - null == n || s._$x2() ? r._$AT = !1 : (n._$nb(t, s, r._$Cr, r._$hr, this._$d0, U._$i2, U._$No), r._$AT = !0), r.baseOpacity = s.getTotalOpacity() - } - } - } catch (t) { - throw t - } - }, $t.prototype.draw = function(t, i, e) { - if (this != e._$GT() && console.log("### assert!! ### "), !e._$IS[0]) { - var r = e, - o = this._$LP; - o < 0 && (o = 1); - var n = this.getOpacity(i, r) * e._$VS * e.baseOpacity, - s = null != r._$hr ? r._$hr : r._$Cr; - t.setClipBufPre_clipContextForDraw(e.clipBufPre_clipContext), t._$WP(this.culling), t._$Uo(o, 3 * this._$Yo, this._$BP, s, this._$Qi, n, this._$6s, r) - } - }, $t.prototype.dump = function() { - console.log(" _$yi( %d ) , _$d0( %d ) , _$Yo( %d ) \n", this._$LP, this._$d0, this._$Yo), console.log(" _$Oi _$di = { "); - for (var t = 0; t < this._$BP.length; t++) console.log("%5d ,", this._$BP[t]); - console.log("\n _$5i _$30"); - for (var t = 0; t < this._$Eo.length; t++) { - console.log("\n _$30[%d] = ", t); - for (var i = this._$Eo[t], e = 0; e < i.length; e++) console.log("%6.2f, ", i[e]) - } - console.log("\n") - }, $t.prototype._$72 = function(t) { - return null == this._$5P ? null : this._$5P[t] - }, $t.prototype.getIndexArray = function() { - return this._$BP - }, ut.prototype = new Mt, ut.prototype.getTransformedPoints = function() { - return null != this._$hr ? this._$hr : this._$Cr - }, pt.prototype._$HT = function(t) { - this.x = t.x, this.y = t.y - }, pt.prototype._$HT = function(t, i) { - this.x = t, this.y = i - }, ft.prototype = new i, ft.loadModel = function(t) { - var e = new ft; - return i._$62(e, t), e - }, ft.loadModel = function(t, e) { - var r = e || 0, - o = new ft(r); - return i._$62(o, t), o - }, ft._$to = function() { - return new ft - }, ft._$er = function(t) { - var i = new _$5("../_$_r/_$t0/_$Ri/_$_P._$d"); - if (0 == i.exists()) throw new _$ls("_$t0 _$_ _$6 _$Ui :: " + i._$PL()); - for (var e = ["../_$_r/_$t0/_$Ri/_$_P.512/_$CP._$1", "../_$_r/_$t0/_$Ri/_$_P.512/_$vP._$1", "../_$_r/_$t0/_$Ri/_$_P.512/_$EP._$1", "../_$_r/_$t0/_$Ri/_$_P.512/_$pP._$1"], r = ft.loadModel(i._$3b()), o = 0; o < e.length; o++) { - var n = new _$5(e[o]); - if (0 == n.exists()) throw new _$ls("_$t0 _$_ _$6 _$Ui :: " + n._$PL()); - r.setTexture(o, _$nL._$_o(t, n._$3b())) - } - return r - }, ft.prototype.setGL = function(t) { - at.setGL(t) - }, ft.prototype.setTransform = function(t) { - this.drawParamWebGL.setTransform(t) - }, ft.prototype.update = function() { - this._$5S.update(), this._$5S.preDraw(this.drawParamWebGL) - }, ft.prototype.draw = function() { - this._$5S.draw(this.drawParamWebGL) - }, ft.prototype._$K2 = function() { - this.drawParamWebGL._$K2() - }, ft.prototype.setTexture = function(t, i) { - null == this.drawParamWebGL && _._$li("_$Yi for QT _$ki / _$XS() is _$6 _$ui!!"), this.drawParamWebGL.setTexture(t, i) - }, ft.prototype.setTexture = function(t, i) { - null == this.drawParamWebGL && _._$li("_$Yi for QT _$ki / _$XS() is _$6 _$ui!!"), this.drawParamWebGL.setTexture(t, i) - }, ft.prototype._$Rs = function() { - return this.drawParamWebGL._$Rs() - }, ft.prototype._$Ds = function(t) { - this.drawParamWebGL._$Ds(t) - }, ft.prototype.getDrawParam = function() { - return this.drawParamWebGL - }, ft.prototype.setMatrix = function(t) { - this.drawParamWebGL.setMatrix(t) - }, ft.prototype.setPremultipliedAlpha = function(t) { - this.drawParamWebGL.setPremultipliedAlpha(t) - }, ft.prototype.isPremultipliedAlpha = function() { - return this.drawParamWebGL.isPremultipliedAlpha() - }, ft.prototype.setAnisotropy = function(t) { - this.drawParamWebGL.setAnisotropy(t) - }, ft.prototype.getAnisotropy = function() { - return this.drawParamWebGL.getAnisotropy() - }, ct.prototype._$tb = function() { - return this.motions - }, ct.prototype.startMotion = function(t, i) { - for (var e = null, r = this.motions.length, o = 0; o < r; ++o) null != (e = this.motions[o]) && (e._$qS(e._$w0.getFadeOut()), this._$eb && _._$Ji("MotionQueueManager[size:%2d]->startMotion() / start _$K _$3 (m%d)\n", r, e._$sr)); - if (null == t) return -1; - e = new dt, e._$w0 = t, this.motions.push(e); - var n = e._$sr; - return this._$eb && _._$Ji("MotionQueueManager[size:%2d]->startMotion() / new _$w0 (m%d)\n", r, n), n - }, ct.prototype.updateParam = function(t) { - try { - for (var i = !1, e = 0; e < this.motions.length; e++) { - var r = this.motions[e]; - if (null != r) { - var o = r._$w0; - null != o ? (o.updateParam(t, r), i = !0, r.isFinished() && (this._$eb && _._$Ji("MotionQueueManager[size:%2d]->updateParam() / _$T0 _$w0 (m%d)\n", this.motions.length - 1, r._$sr), this.motions.splice(e, 1), e--)) : (this.motions = this.motions.splice(e, 1), e--) - } else this.motions.splice(e, 1), e-- - } - return i - } catch (t) { - return _._$li(t), !0 - } - }, ct.prototype.isFinished = function(t) { - if (arguments.length >= 1) { - for (var i = 0; i < this.motions.length; i++) { - var e = this.motions[i]; - if (null != e && (e._$sr == t && !e.isFinished())) return !1 - } - return !0 - } - for (var i = 0; i < this.motions.length; i++) { - var e = this.motions[i]; - if (null != e) { - if (null != e._$w0) { - if (!e.isFinished()) return !1 - } else this.motions.splice(i, 1), i-- - } else this.motions.splice(i, 1), i-- - } - return !0 - }, ct.prototype.stopAllMotions = function() { - for (var t = 0; t < this.motions.length; t++) { - var i = this.motions[t]; - if (null != i) { - i._$w0; - this.motions.splice(t, 1), t-- - } else this.motions.splice(t, 1), t-- - } - }, ct.prototype._$Zr = function(t) { - this._$eb = t - }, ct.prototype._$e = function() { - console.log("-- _$R --\n"); - for (var t = 0; t < this.motions.length; t++) { - var i = this.motions[t], - e = i._$w0; - console.log("MotionQueueEnt[%d] :: %s\n", this.motions.length, e.toString()) - } - }, dt._$Gs = 0, dt.prototype.isFinished = function() { - return this._$9L - }, dt.prototype._$qS = function(t) { - var i = w.getUserTimeMSec(), - e = i + t; - (this._$Do < 0 || e < this._$Do) && (this._$Do = e) - }, dt.prototype._$Bs = function() { - return this._$sr - }, gt.prototype.setContext = function(t) { - var i = this.m; - t.transform(i[0], i[1], i[3], i[4], i[6], i[7]) - }, gt.prototype.toString = function() { - for (var t = "LDTransform { ", i = 0; i < 9; i++) t += this.m[i].toFixed(2) + " ,"; - return t += " }" - }, gt.prototype.identity = function() { - var t = this.m; - t[0] = t[4] = t[8] = 1, t[1] = t[2] = t[3] = t[5] = t[6] = t[7] = 0 - }, gt.prototype._$PS = function(t, i, e) { - null == e && (e = new Array(0, 0)); - var r = this.m; - return e[0] = r[0] * t + r[3] * i + r[6], e[1] = r[1] * t + r[4] * i + r[7], e - }, gt.prototype._$P2 = function(t) { - t || (t = new gt); - var i = this.m, - e = i[0], - r = i[1], - o = i[2], - n = i[3], - s = i[4], - _ = i[5], - a = i[6], - h = i[7], - l = i[8], - $ = e * s * l + r * _ * a + o * n * h - e * _ * h - o * s * a - r * n * l; - if (0 == $) return null; - var u = 1 / $; - return t.m[0] = u * (s * l - h * _), t.m[1] = u * (h * o - r * l), t.m[2] = u * (r * _ - s * o), t.m[3] = u * (a * _ - n * l), t.m[4] = u * (e * l - a * o), t.m[5] = u * (n * o - e * _), t.m[6] = u * (n * h - a * s), t.m[7] = u * (a * r - e * h), t.m[8] = u * (e * s - n * r), t - }, gt.prototype.transform = function(t, i, e) { - null == e && (e = new Array(0, 0)); - var r = this.m; - return e[0] = r[0] * t + r[3] * i + r[6], e[1] = r[1] * t + r[4] * i + r[7], e - }, gt.prototype.translate = function(t, i) { - var e = this.m; - e[6] = e[0] * t + e[3] * i + e[6], e[7] = e[1] * t + e[4] * i + e[7], e[8] = e[2] * t + e[5] * i + e[8] - }, gt.prototype.scale = function(t, i) { - var e = this.m; - e[0] *= t, e[1] *= t, e[2] *= t, e[3] *= i, e[4] *= i, e[5] *= i - }, gt.prototype.shear = function(t, i) { - var e = this.m, - r = e[0] + e[3] * i, - o = e[1] + e[4] * i, - n = e[2] + e[5] * i; - e[3] = e[0] * t + e[3], e[4] = e[1] * t + e[4], e[5] = e[2] * t + e[5], e[0] = r, e[1] = o, e[2] = n - }, gt.prototype.rotate = function(t) { - var i = this.m, - e = Math.cos(t), - r = Math.sin(t), - o = i[0] * e + i[3] * r, - n = i[1] * e + i[4] * r, - s = i[2] * e + i[5] * r; - i[3] = -i[0] * r + i[3] * e, i[4] = -i[1] * r + i[4] * e, i[5] = -i[2] * r + i[5] * e, i[0] = o, i[1] = n, i[2] = s - }, gt.prototype.concatenate = function(t) { - var i = this.m, - e = t.m, - r = i[0] * e[0] + i[3] * e[1] + i[6] * e[2], - o = i[1] * e[0] + i[4] * e[1] + i[7] * e[2], - n = i[2] * e[0] + i[5] * e[1] + i[8] * e[2], - s = i[0] * e[3] + i[3] * e[4] + i[6] * e[5], - _ = i[1] * e[3] + i[4] * e[4] + i[7] * e[5], - a = i[2] * e[3] + i[5] * e[4] + i[8] * e[5], - h = i[0] * e[6] + i[3] * e[7] + i[6] * e[8], - l = i[1] * e[6] + i[4] * e[7] + i[7] * e[8], - $ = i[2] * e[6] + i[5] * e[7] + i[8] * e[8]; - m[0] = r, m[1] = o, m[2] = n, m[3] = s, m[4] = _, m[5] = a, m[6] = h, m[7] = l, m[8] = $ - }, yt.prototype = new et, yt._$eT = null, yt._$tP = new Object, yt._$2o = function() { - return null == yt._$eT && (yt._$eT = yt.getID("DST_BASE")), yt._$eT - }, yt._$27 = function() { - yt._$tP.clear(), yt._$eT = null - }, yt.getID = function(t) { - var i = yt._$tP[t]; - return null == i && (i = new yt(t), yt._$tP[t] = i), i - }, yt.prototype._$3s = function() { - return new yt - }, mt.prototype = new E, mt._$9r = function(t) { - return new Float32Array(t) - }, mt._$vb = function(t) { - return new Int16Array(t) - }, mt._$cr = function(t, i) { - return null == t || t._$yL() < i.length ? (t = mt._$9r(2 * i.length), t.put(i), t._$oT(0)) : (t.clear(), t.put(i), t._$oT(0)), t - }, mt._$mb = function(t, i) { - return null == t || t._$yL() < i.length ? (t = mt._$vb(2 * i.length), t.put(i), t._$oT(0)) : (t.clear(), t.put(i), t._$oT(0)), t - }, mt._$Hs = function() { - return this._$Gr - }, mt._$as = function(t) { - this._$Gr = t - }, mt.prototype.getGL = function() { - return this.gl - }, mt.prototype.setGL = function(t) { - this.gl = t - }, mt.prototype.setTransform = function(t) { - this.transform = t - }, mt.prototype._$ZT = function() { - var t = this.gl; - this.firstDraw && (this.initShader(), this.firstDraw = !1, this.anisotropyExt = t.getExtension("EXT_texture_filter_anisotropic") || t.getExtension("WEBKIT_EXT_texture_filter_anisotropic") || t.getExtension("MOZ_EXT_texture_filter_anisotropic"), this.anisotropyExt && (this.maxAnisotropy = t.getParameter(this.anisotropyExt.MAX_TEXTURE_MAX_ANISOTROPY_EXT))), t.disable(t.SCISSOR_TEST), t.disable(t.STENCIL_TEST), t.disable(t.DEPTH_TEST), t.frontFace(t.CW), t.enable(t.BLEND), t.colorMask(1, 1, 1, 1), t.bindBuffer(t.ARRAY_BUFFER, null), t.bindBuffer(t.ELEMENT_ARRAY_BUFFER, null) - }, mt.prototype._$Uo = function(t, i, e, r, o, n, s, _) { - if (!(n < .01 && null == this.clipBufPre_clipContextMask)) { - var a = (n > .9 && at.EXPAND_W, this.gl); - if (null == this.gl) throw new Error("gl is null"); - var h = 1 * this._$C0 * n, - l = 1 * this._$tT * n, - $ = 1 * this._$WL * n, - u = this._$lT * n; - if (null != this.clipBufPre_clipContextMask) { - a.frontFace(a.CCW), a.useProgram(this.shaderProgram), this._$vS = Tt(a, this._$vS, r), this._$no = Pt(a, this._$no, e), a.enableVertexAttribArray(this.a_position_Loc), a.vertexAttribPointer(this.a_position_Loc, 2, a.FLOAT, !1, 0, 0), this._$NT = Tt(a, this._$NT, o), a.activeTexture(a.TEXTURE1), a.bindTexture(a.TEXTURE_2D, this.textures[t]), a.uniform1i(this.s_texture0_Loc, 1), a.enableVertexAttribArray(this.a_texCoord_Loc), a.vertexAttribPointer(this.a_texCoord_Loc, 2, a.FLOAT, !1, 0, 0), a.uniformMatrix4fv(this.u_matrix_Loc, !1, this.getClipBufPre_clipContextMask().matrixForMask); - var p = this.getClipBufPre_clipContextMask().layoutChannelNo, - f = this.getChannelFlagAsColor(p); - a.uniform4f(this.u_channelFlag, f.r, f.g, f.b, f.a); - var c = this.getClipBufPre_clipContextMask().layoutBounds; - a.uniform4f(this.u_baseColor_Loc, 2 * c.x - 1, 2 * c.y - 1, 2 * c._$EL() - 1, 2 * c._$5T() - 1), a.uniform1i(this.u_maskFlag_Loc, !0) - } else if (null != this.getClipBufPre_clipContextDraw()) { - a.useProgram(this.shaderProgramOff), this._$vS = Tt(a, this._$vS, r), this._$no = Pt(a, this._$no, e), a.enableVertexAttribArray(this.a_position_Loc_Off), a.vertexAttribPointer(this.a_position_Loc_Off, 2, a.FLOAT, !1, 0, 0), this._$NT = Tt(a, this._$NT, o), a.activeTexture(a.TEXTURE1), a.bindTexture(a.TEXTURE_2D, this.textures[t]), a.uniform1i(this.s_texture0_Loc_Off, 1), a.enableVertexAttribArray(this.a_texCoord_Loc_Off), a.vertexAttribPointer(this.a_texCoord_Loc_Off, 2, a.FLOAT, !1, 0, 0), a.uniformMatrix4fv(this.u_clipMatrix_Loc_Off, !1, this.getClipBufPre_clipContextDraw().matrixForDraw), a.uniformMatrix4fv(this.u_matrix_Loc_Off, !1, this.matrix4x4), a.activeTexture(a.TEXTURE2), a.bindTexture(a.TEXTURE_2D, at.fTexture[this.glno]), a.uniform1i(this.s_texture1_Loc_Off, 2); - var p = this.getClipBufPre_clipContextDraw().layoutChannelNo, - f = this.getChannelFlagAsColor(p); - a.uniform4f(this.u_channelFlag_Loc_Off, f.r, f.g, f.b, f.a), a.uniform4f(this.u_baseColor_Loc_Off, h, l, $, u) - } else a.useProgram(this.shaderProgram), this._$vS = Tt(a, this._$vS, r), this._$no = Pt(a, this._$no, e), a.enableVertexAttribArray(this.a_position_Loc), a.vertexAttribPointer(this.a_position_Loc, 2, a.FLOAT, !1, 0, 0), this._$NT = Tt(a, this._$NT, o), a.activeTexture(a.TEXTURE1), a.bindTexture(a.TEXTURE_2D, this.textures[t]), a.uniform1i(this.s_texture0_Loc, 1), a.enableVertexAttribArray(this.a_texCoord_Loc), a.vertexAttribPointer(this.a_texCoord_Loc, 2, a.FLOAT, !1, 0, 0), a.uniformMatrix4fv(this.u_matrix_Loc, !1, this.matrix4x4), a.uniform4f(this.u_baseColor_Loc, h, l, $, u), a.uniform1i(this.u_maskFlag_Loc, !1); - this.culling ? this.gl.enable(a.CULL_FACE) : this.gl.disable(a.CULL_FACE), this.gl.enable(a.BLEND); - var d, g, y, m; - if (null != this.clipBufPre_clipContextMask) d = a.ONE, g = a.ONE_MINUS_SRC_ALPHA, y = a.ONE, m = a.ONE_MINUS_SRC_ALPHA; - else switch (s) { - case $t._$ms: - d = a.ONE, g = a.ONE_MINUS_SRC_ALPHA, y = a.ONE, m = a.ONE_MINUS_SRC_ALPHA; - break; - case $t._$ns: - d = a.ONE, g = a.ONE, y = a.ZERO, m = a.ONE; - break; - case $t._$_s: - d = a.DST_COLOR, g = a.ONE_MINUS_SRC_ALPHA, y = a.ZERO, m = a.ONE - } - a.blendEquationSeparate(a.FUNC_ADD, a.FUNC_ADD), a.blendFuncSeparate(d, g, y, m), this.anisotropyExt && a.texParameteri(a.TEXTURE_2D, this.anisotropyExt.TEXTURE_MAX_ANISOTROPY_EXT, this.maxAnisotropy); - var T = e.length; - a.drawElements(a.TRIANGLES, T, a.UNSIGNED_SHORT, 0), a.bindTexture(a.TEXTURE_2D, null) - } - }, mt.prototype._$Rs = function() { - throw new Error("_$Rs") - }, mt.prototype._$Ds = function(t) { - throw new Error("_$Ds") - }, mt.prototype._$K2 = function() { - for (var t = 0; t < this.textures.length; t++) { - 0 != this.textures[t] && (this.gl._$K2(1, this.textures, t), this.textures[t] = null) - } - }, mt.prototype.setTexture = function(t, i) { - this.textures[t] = i - }, mt.prototype.initShader = function() { - var t = this.gl; - this.loadShaders2(), this.a_position_Loc = t.getAttribLocation(this.shaderProgram, "a_position"), this.a_texCoord_Loc = t.getAttribLocation(this.shaderProgram, "a_texCoord"), this.u_matrix_Loc = t.getUniformLocation(this.shaderProgram, "u_mvpMatrix"), this.s_texture0_Loc = t.getUniformLocation(this.shaderProgram, "s_texture0"), this.u_channelFlag = t.getUniformLocation(this.shaderProgram, "u_channelFlag"), this.u_baseColor_Loc = t.getUniformLocation(this.shaderProgram, "u_baseColor"), this.u_maskFlag_Loc = t.getUniformLocation(this.shaderProgram, "u_maskFlag"), this.a_position_Loc_Off = t.getAttribLocation(this.shaderProgramOff, "a_position"), this.a_texCoord_Loc_Off = t.getAttribLocation(this.shaderProgramOff, "a_texCoord"), this.u_matrix_Loc_Off = t.getUniformLocation(this.shaderProgramOff, "u_mvpMatrix"), this.u_clipMatrix_Loc_Off = t.getUniformLocation(this.shaderProgramOff, "u_ClipMatrix"), this.s_texture0_Loc_Off = t.getUniformLocation(this.shaderProgramOff, "s_texture0"), this.s_texture1_Loc_Off = t.getUniformLocation(this.shaderProgramOff, "s_texture1"), this.u_channelFlag_Loc_Off = t.getUniformLocation(this.shaderProgramOff, "u_channelFlag"), this.u_baseColor_Loc_Off = t.getUniformLocation(this.shaderProgramOff, "u_baseColor") - }, mt.prototype.disposeShader = function() { - var t = this.gl; - this.shaderProgram && (t.deleteProgram(this.shaderProgram), this.shaderProgram = null), this.shaderProgramOff && (t.deleteProgram(this.shaderProgramOff), this.shaderProgramOff = null) - }, mt.prototype.compileShader = function(t, i) { - var e = this.gl, - r = i, - o = e.createShader(t); - if (null == o) return _._$Ji("_$L0 to create shader"), null; - if (e.shaderSource(o, r), e.compileShader(o), !e.getShaderParameter(o, e.COMPILE_STATUS)) { - var n = e.getShaderInfoLog(o); - return _._$Ji("_$L0 to compile shader : " + n), e.deleteShader(o), null - } - return o - }, mt.prototype.loadShaders2 = function() { - var t = this.gl; - if (this.shaderProgram = t.createProgram(), !this.shaderProgram) return !1; - if (this.shaderProgramOff = t.createProgram(), !this.shaderProgramOff) return !1; - if (this.vertShader = this.compileShader(t.VERTEX_SHADER, "attribute vec4 a_position;attribute vec2 a_texCoord;varying vec2 v_texCoord;varying vec4 v_ClipPos;uniform mat4 u_mvpMatrix;void main(){ gl_Position = u_mvpMatrix * a_position; v_ClipPos = u_mvpMatrix * a_position; v_texCoord = a_texCoord;}"), !this.vertShader) return _._$Ji("Vertex shader compile _$li!"), !1; - if (this.vertShaderOff = this.compileShader(t.VERTEX_SHADER, "attribute vec4 a_position;attribute vec2 a_texCoord;varying vec2 v_texCoord;varying vec4 v_ClipPos;uniform mat4 u_mvpMatrix;uniform mat4 u_ClipMatrix;void main(){ gl_Position = u_mvpMatrix * a_position; v_ClipPos = u_ClipMatrix * a_position; v_texCoord = a_texCoord ;}"), !this.vertShaderOff) return _._$Ji("OffVertex shader compile _$li!"), !1; - if (this.fragShader = this.compileShader(t.FRAGMENT_SHADER, "precision mediump float;varying vec2 v_texCoord;varying vec4 v_ClipPos;uniform sampler2D s_texture0;uniform vec4 u_channelFlag;uniform vec4 u_baseColor;uniform bool u_maskFlag;void main(){ vec4 smpColor; if(u_maskFlag){ float isInside = step(u_baseColor.x, v_ClipPos.x/v_ClipPos.w) * step(u_baseColor.y, v_ClipPos.y/v_ClipPos.w) * step(v_ClipPos.x/v_ClipPos.w, u_baseColor.z) * step(v_ClipPos.y/v_ClipPos.w, u_baseColor.w); smpColor = u_channelFlag * texture2D(s_texture0 , v_texCoord).a * isInside; }else{ smpColor = texture2D(s_texture0 , v_texCoord) * u_baseColor; } gl_FragColor = smpColor;}"), !this.fragShader) return _._$Ji("Fragment shader compile _$li!"), !1; - if (this.fragShaderOff = this.compileShader(t.FRAGMENT_SHADER, "precision mediump float ;varying vec2 v_texCoord;varying vec4 v_ClipPos;uniform sampler2D s_texture0;uniform sampler2D s_texture1;uniform vec4 u_channelFlag;uniform vec4 u_baseColor ;void main(){ vec4 col_formask = texture2D(s_texture0, v_texCoord) * u_baseColor; vec4 clipMask = texture2D(s_texture1, v_ClipPos.xy / v_ClipPos.w) * u_channelFlag; float maskVal = clipMask.r + clipMask.g + clipMask.b + clipMask.a; col_formask = col_formask * maskVal; gl_FragColor = col_formask;}"), !this.fragShaderOff) return _._$Ji("OffFragment shader compile _$li!"), !1; - if (t.attachShader(this.shaderProgram, this.vertShader), t.attachShader(this.shaderProgram, this.fragShader), t.attachShader(this.shaderProgramOff, this.vertShaderOff), t.attachShader(this.shaderProgramOff, this.fragShaderOff), t.linkProgram(this.shaderProgram), t.linkProgram(this.shaderProgramOff), !t.getProgramParameter(this.shaderProgram, t.LINK_STATUS)) { - var i = t.getProgramInfoLog(this.shaderProgram); - return _._$Ji("_$L0 to link program: " + i), this.vertShader && (t.deleteShader(this.vertShader), this.vertShader = 0), this.fragShader && (t.deleteShader(this.fragShader), this.fragShader = 0), this.shaderProgram && (t.deleteProgram(this.shaderProgram), this.shaderProgram = 0), this.vertShaderOff && (t.deleteShader(this.vertShaderOff), this.vertShaderOff = 0), this.fragShaderOff && (t.deleteShader(this.fragShaderOff), this.fragShaderOff = 0), this.shaderProgramOff && (t.deleteProgram(this.shaderProgramOff), this.shaderProgramOff = 0), !1 - } - return !0 - }, mt.prototype.createFramebuffer = function() { - var t = this.gl, - i = at.clippingMaskBufferSize, - e = t.createFramebuffer(); - t.bindFramebuffer(t.FRAMEBUFFER, e); - var r = t.createRenderbuffer(); - t.bindRenderbuffer(t.RENDERBUFFER, r), t.renderbufferStorage(t.RENDERBUFFER, t.RGBA4, i, i), t.framebufferRenderbuffer(t.FRAMEBUFFER, t.COLOR_ATTACHMENT0, t.RENDERBUFFER, r); - var o = t.createTexture(); - return t.bindTexture(t.TEXTURE_2D, o), t.texImage2D(t.TEXTURE_2D, 0, t.RGBA, i, i, 0, t.RGBA, t.UNSIGNED_BYTE, null), t.texParameteri(t.TEXTURE_2D, t.TEXTURE_MIN_FILTER, t.LINEAR), t.texParameteri(t.TEXTURE_2D, t.TEXTURE_MAG_FILTER, t.LINEAR), t.texParameteri(t.TEXTURE_2D, t.TEXTURE_WRAP_S, t.CLAMP_TO_EDGE), t.texParameteri(t.TEXTURE_2D, t.TEXTURE_WRAP_T, t.CLAMP_TO_EDGE), t.framebufferTexture2D(t.FRAMEBUFFER, t.COLOR_ATTACHMENT0, t.TEXTURE_2D, o, 0), t.bindTexture(t.TEXTURE_2D, null), t.bindRenderbuffer(t.RENDERBUFFER, null), t.bindFramebuffer(t.FRAMEBUFFER, null), at.fTexture[this.glno] = o, { - framebuffer: e, - renderbuffer: r, - texture: at.fTexture[this.glno] - } - }, St.prototype._$fP = function() { - var t, i, e, r = this._$ST(); - if (0 == (128 & r)) return 255 & r; - if (0 == (128 & (t = this._$ST()))) return (127 & r) << 7 | 127 & t; - if (0 == (128 & (i = this._$ST()))) return (127 & r) << 14 | (127 & t) << 7 | 255 & i; - if (0 == (128 & (e = this._$ST()))) return (127 & r) << 21 | (127 & t) << 14 | (127 & i) << 7 | 255 & e; - throw new lt("_$L _$0P _") - }, St.prototype.getFormatVersion = function() { - return this._$S2 - }, St.prototype._$gr = function(t) { - this._$S2 = t - }, St.prototype._$3L = function() { - return this._$fP() - }, St.prototype._$mP = function() { - return this._$zT(), this._$F += 8, this._$T.getFloat64(this._$F - 8) - }, St.prototype._$_T = function() { - return this._$zT(), this._$F += 4, this._$T.getFloat32(this._$F - 4) - }, St.prototype._$6L = function() { - return this._$zT(), this._$F += 4, this._$T.getInt32(this._$F - 4) - }, St.prototype._$ST = function() { - return this._$zT(), this._$T.getInt8(this._$F++) - }, St.prototype._$9T = function() { - return this._$zT(), this._$F += 2, this._$T.getInt16(this._$F - 2) - }, St.prototype._$2T = function() { - throw this._$zT(), this._$F += 8, new lt("_$L _$q read long") - }, St.prototype._$po = function() { - return this._$zT(), 0 != this._$T.getInt8(this._$F++) - }; - var xt = !0; - St.prototype._$bT = function() { - this._$zT(); - var t = this._$3L(), - i = null; - if (xt) try { - var e = new ArrayBuffer(2 * t); - i = new Uint16Array(e); - for (var r = 0; r < t; ++r) i[r] = this._$T.getUint8(this._$F++); - return String.fromCharCode.apply(null, i) - } catch (t) { - xt = !1 - } - try { - var o = new Array; - if (null == i) for (var r = 0; r < t; ++r) o[r] = this._$T.getUint8(this._$F++); - else for (var r = 0; r < t; ++r) o[r] = i[r]; - return String.fromCharCode.apply(null, o) - } catch (t) { - console.log("read utf8 / _$rT _$L0 !! : " + t) - } - }, St.prototype._$cS = function() { - this._$zT(); - for (var t = this._$3L(), i = new Int32Array(t), e = 0; e < t; e++) i[e] = this._$T.getInt32(this._$F), this._$F += 4; - return i - }, St.prototype._$Tb = function() { - this._$zT(); - for (var t = this._$3L(), i = new Float32Array(t), e = 0; e < t; e++) i[e] = this._$T.getFloat32(this._$F), this._$F += 4; - return i - }, St.prototype._$5b = function() { - this._$zT(); - for (var t = this._$3L(), i = new Float64Array(t), e = 0; e < t; e++) i[e] = this._$T.getFloat64(this._$F), this._$F += 8; - return i - }, St.prototype._$nP = function() { - return this._$Jb(-1) - }, St.prototype._$Jb = function(t) { - if (this._$zT(), t < 0 && (t = this._$3L()), t == G._$7P) { - var i = this._$6L(); - if (0 <= i && i < this._$Ko.length) return this._$Ko[i]; - throw new lt("_$sL _$4i @_$m0") - } - var e = this._$4b(t); - return this._$Ko.push(e), e - }, St.prototype._$4b = function(t) { - if (0 == t) return null; - if (50 == t) { - var i = this._$bT(), - e = b.getID(i); - return e - } - if (51 == t) { - var i = this._$bT(), - e = yt.getID(i); - return e - } - if (134 == t) { - var i = this._$bT(), - e = l.getID(i); - return e - } - if (60 == t) { - var i = this._$bT(), - e = u.getID(i); - return e - } - if (t >= 48) { - var r = G._$9o(t); - return null != r ? (r._$F0(this), r) : null - } - switch (t) { - case 1: - return this._$bT(); - case 10: - return new n(this._$6L(), !0); - case 11: - return new S(this._$mP(), this._$mP(), this._$mP(), this._$mP()); - case 12: - return new S(this._$_T(), this._$_T(), this._$_T(), this._$_T()); - case 13: - return new L(this._$mP(), this._$mP()); - case 14: - return new L(this._$_T(), this._$_T()); - case 15: - for (var o = this._$3L(), e = new Array(o), s = 0; s < o; s++) e[s] = this._$nP(); - return e; - case 17: - var e = new F(this._$mP(), this._$mP(), this._$mP(), this._$mP(), this._$mP(), this._$mP()); - return e; - case 21: - return new h(this._$6L(), this._$6L(), this._$6L(), this._$6L()); - case 22: - return new pt(this._$6L(), this._$6L()); - case 23: - throw new Error("_$L _$ro "); - case 16: - case 25: - return this._$cS(); - case 26: - return this._$5b(); - case 27: - return this._$Tb(); - case 2: - case 3: - case 4: - case 5: - case 6: - case 7: - case 8: - case 9: - case 18: - case 19: - case 20: - case 24: - case 28: - throw new lt("_$6 _$q : _$nP() of 2-9 ,18,19,20,24,28 : " + t); - default: - throw new lt("_$6 _$q : _$nP() NO _$i : " + t) - } - }, St.prototype._$8L = function() { - return 0 == this._$hL ? this._$v0 = this._$ST() : 8 == this._$hL && (this._$v0 = this._$ST(), this._$hL = 0), 1 == (this._$v0 >> 7 - this._$hL++ & 1) - }, St.prototype._$zT = function() { - 0 != this._$hL && (this._$hL = 0) - }, vt.prototype._$wP = function(t, i, e) { - for (var r = 0; r < e; r++) { - for (var o = 0; o < i; o++) { - var n = 2 * (o + r * i); - console.log("(% 7.3f , % 7.3f) , ", t[n], t[n + 1]) - } - console.log("\n") - } - console.log("\n") - }, Lt._$2S = Math.PI / 180, Lt._$bS = Math.PI / 180, Lt._$wS = 180 / Math.PI, Lt._$NS = 180 / Math.PI, Lt.PI_F = Math.PI, Lt._$kT = [0, .012368, .024734, .037097, .049454, .061803, .074143, .086471, .098786, .111087, .12337, .135634, .147877, .160098, .172295, .184465, .196606, .208718, .220798, .232844, .244854, .256827, .268761, .280654, .292503, .304308, .316066, .327776, .339436, .351044, .362598, .374097, .385538, .396921, .408243, .419502, .430697, .441826, .452888, .463881, .474802, .485651, .496425, .507124, .517745, .528287, .538748, .549126, .559421, .56963, .579752, .589785, .599728, .609579, .619337, .629, .638567, .648036, .657406, .666676, .675843, .684908, .693867, .70272, .711466, .720103, .72863, .737045, .745348, .753536, .76161, .769566, .777405, .785125, .792725, .800204, .807561, .814793, .821901, .828884, .835739, .842467, .849066, .855535, .861873, .868079, .874153, .880093, .885898, .891567, .897101, .902497, .907754, .912873, .917853, .922692, .92739, .931946, .936359, .940629, .944755, .948737, .952574, .956265, .959809, .963207, .966457, .96956, .972514, .97532, .977976, .980482, .982839, .985045, .987101, .989006, .990759, .992361, .993811, .995109, .996254, .997248, .998088, .998776, .999312, .999694, .999924, 1], Lt._$92 = function(t, i) { - var e = Math.atan2(t[1], t[0]), - r = Math.atan2(i[1], i[0]); - return Lt._$tS(e, r) - }, Lt._$tS = function(t, i) { - for (var e = t - i; e < -Math.PI;) e += 2 * Math.PI; - for (; e > Math.PI;) e -= 2 * Math.PI; - return e - }, Lt._$9 = function(t) { - return Math.sin(t) - }, Lt.fcos = function(t) { - return Math.cos(t) - }, Mt.prototype._$u2 = function() { - return this._$IS[0] - }, Mt.prototype._$yo = function() { - return this._$AT && !this._$IS[0] - }, Mt.prototype._$GT = function() { - return this._$e0 - }, Et._$W2 = 0, Et.SYSTEM_INFO = null, Et.USER_AGENT = navigator.userAgent, Et.isIPhone = function() { - return Et.SYSTEM_INFO || Et.setup(), Et.SYSTEM_INFO._isIPhone - }, Et.isIOS = function() { - return Et.SYSTEM_INFO || Et.setup(), Et.SYSTEM_INFO._isIPhone || Et.SYSTEM_INFO._isIPad - }, Et.isAndroid = function() { - return Et.SYSTEM_INFO || Et.setup(), Et.SYSTEM_INFO._isAndroid - }, Et.getOSVersion = function() { - return Et.SYSTEM_INFO || Et.setup(), Et.SYSTEM_INFO.version - }, Et.getOS = function() { - return Et.SYSTEM_INFO || Et.setup(), Et.SYSTEM_INFO._isIPhone || Et.SYSTEM_INFO._isIPad ? "iOS" : Et.SYSTEM_INFO._isAndroid ? "Android" : "_$Q0 OS" - }, Et.setup = function() { - function t(t, i) { - for (var e = t.substring(i).split(/[ _,;\.]/), r = 0, o = 0; o <= 2 && !isNaN(e[o]); o++) { - var n = parseInt(e[o]); - if (n < 0 || n > 999) { - _._$li("err : " + n + " @UtHtml5.setup()"), r = 0; - break - } - r += n * Math.pow(1e3, 2 - o) - } - return r - } - var i, e = Et.USER_AGENT, - r = Et.SYSTEM_INFO = { - userAgent: e - }; - if ((i = e.indexOf("iPhone OS ")) >= 0) r.os = "iPhone", r._isIPhone = !0, r.version = t(e, i + "iPhone OS ".length); - else if ((i = e.indexOf("iPad")) >= 0) { - if ((i = e.indexOf("CPU OS")) < 0) return void _._$li(" err : " + e + " @UtHtml5.setup()"); - r.os = "iPad", r._isIPad = !0, r.version = t(e, i + "CPU OS ".length) - } else(i = e.indexOf("Android")) >= 0 ? (r.os = "Android", r._isAndroid = !0, r.version = t(e, i + "Android ".length)) : (r.os = "-", r.version = -1) - }, window.UtSystem = w, window.UtDebug = _, window.LDTransform = gt, window.LDGL = nt, window.Live2D = at, window.Live2DModelWebGL = ft, window.Live2DModelJS = q, window.Live2DMotion = J, window.MotionQueueManager = ct, window.PhysicsHair = f, window.AMotion = s, window.PartsDataID = l, window.DrawDataID = b, window.BaseDataID = yt, window.ParamID = u, at.init(); - var At = !1 - }() - }).call(i, e(7)) -}, function(t, i) { - t.exports = { - import: function() { - throw new Error("System.import cannot be used indirectly") - } - } -}, function(t, i, e) { - "use strict"; - - function r(t) { - return t && t.__esModule ? t : { - default: - t - } - } - function o() { - this.models = [], this.count = -1, this.reloadFlg = !1, Live2D.init(), n.Live2DFramework.setPlatformManager(new _. - default) - } - Object.defineProperty(i, "__esModule", { - value: !0 - }), i. -default = o; - var n = e(0), - s = e(9), - _ = r(s), - a = e(10), - h = r(a), - l = e(1), - $ = r(l); - o.prototype.createModel = function() { - var t = new h. - default; - return this.models.push(t), t - }, o.prototype.changeModel = function(t, i) { - if (this.reloadFlg) { - this.reloadFlg = !1; - this.releaseModel(0, t), this.createModel(), this.models[0].load(t, i) - } - }, o.prototype.getModel = function(t) { - return t >= this.models.length ? null : this.models[t] - }, o.prototype.releaseModel = function(t, i) { - this.models.length <= t || (this.models[t].release(i), delete this.models[t], this.models.splice(t, 1)) - }, o.prototype.numModels = function() { - return this.models.length - }, o.prototype.setDrag = function(t, i) { - for (var e = 0; e < this.models.length; e++) this.models[e].setDrag(t, i) - }, o.prototype.maxScaleEvent = function() { - $. - default.DEBUG_LOG && console.log("Max scale event."); - for (var t = 0; t < this.models.length; t++) this.models[t].startRandomMotion($. - default.MOTION_GROUP_PINCH_IN, $. - default.PRIORITY_NORMAL) - }, o.prototype.minScaleEvent = function() { - $. - default.DEBUG_LOG && console.log("Min scale event."); - for (var t = 0; t < this.models.length; t++) this.models[t].startRandomMotion($. - default.MOTION_GROUP_PINCH_OUT, $. - default.PRIORITY_NORMAL) - }, o.prototype.tapEvent = function(t, i) { - $. - default.DEBUG_LOG && console.log("tapEvent view x:" + t + " y:" + i); - for (var e = 0; e < this.models.length; e++) this.models[e].hitTest($. - default.HIT_AREA_HEAD, t, i) ? ($. - default.DEBUG_LOG && console.log("Tap face."), this.models[e].setRandomExpression()): - this.models[e].hitTest($. - default.HIT_AREA_BODY, t, i) ? ($. - default.DEBUG_LOG && console.log("Tap body. models[" + e + "]"), this.models[e].startRandomMotion($. - default.MOTION_GROUP_TAP_BODY, $. - default.PRIORITY_NORMAL)) : this.models[e].hitTestCustom("head", t, i) ? ($. - default.DEBUG_LOG && console.log("Tap face."), this.models[e].startRandomMotion($. - default.MOTION_GROUP_FLICK_HEAD, $. - default.PRIORITY_NORMAL)) : this.models[e].hitTestCustom("body", t, i) && ($. - default.DEBUG_LOG && console.log("Tap body. models[" + e + "]"), this.models[e].startRandomMotion($. - default.MOTION_GROUP_TAP_BODY, $. - default.PRIORITY_NORMAL)); - return !0 - } -}, function(t, i, e) { - "use strict"; - - function r() {} - Object.defineProperty(i, "__esModule", { - value: !0 - }), i. -default = r; - var o = e(2); - var requestCache = {}; - r.prototype.loadBytes = function(t, i) { - // Cache 相同的请求,减少请求数量 - if (requestCache[t] !== undefined) { - i(requestCache[t]); - return; - } - var e = new XMLHttpRequest; - e.open("GET", t, !0), e.responseType = "arraybuffer", e.onload = function() { - switch (e.status) { - case 200: - requestCache[t] = e.response; - i(e.response); - break; - default: - console.error("Failed to load (" + e.status + ") : " + t) - } - }, e.send(null) - }, r.prototype.loadString = function(t) { - this.loadBytes(t, function(t) { - return t - }) - }, r.prototype.loadLive2DModel = function(t, i) { - var e = null; - this.loadBytes(t, function(t) { - e = Live2DModelWebGL.loadModel(t), i(e) - }) - }, r.prototype.loadTexture = function(t, i, e, r) { - var n = new Image; - n.crossOrigin = "Anonymous", n.src = e; - n.onload = function() { - var e = (0, o.getContext)(), - s = e.createTexture(); - if (!s) return console.error("Failed to generate gl texture name."), -1; - 0 == t.isPremultipliedAlpha() && e.pixelStorei(e.UNPACK_PREMULTIPLY_ALPHA_WEBGL, 1), e.pixelStorei(e.UNPACK_FLIP_Y_WEBGL, 1), e.activeTexture(e.TEXTURE0), e.bindTexture(e.TEXTURE_2D, s), e.texImage2D(e.TEXTURE_2D, 0, e.RGBA, e.RGBA, e.UNSIGNED_BYTE, n), e.texParameteri(e.TEXTURE_2D, e.TEXTURE_MAG_FILTER, e.LINEAR), e.texParameteri(e.TEXTURE_2D, e.TEXTURE_MIN_FILTER, e.LINEAR_MIPMAP_NEAREST), e.generateMipmap(e.TEXTURE_2D), t.setTexture(i, s), s = null, "function" == typeof r && r() - }, n.onerror = function() { - console.error("Failed to load image : " + e) - } - }, r.prototype.jsonParseFromBytes = function(t) { - var i, e = new Uint8Array(t, 0, 3); - return i = 239 == e[0] && 187 == e[1] && 191 == e[2] ? String.fromCharCode.apply(null, new Uint8Array(t, 3)) : String.fromCharCode.apply(null, new Uint8Array(t)), JSON.parse(i) - }, r.prototype.log = function(t) {} -}, function(t, i, e) { - "use strict"; - - function r(t) { - return t && t.__esModule ? t : { - default: - t - } - } - function o() { - n.L2DBaseModel.prototype.constructor.call(this), this.modelHomeDir = "", this.modelSetting = null, this.tmpMatrix = [] - } - Object.defineProperty(i, "__esModule", { - value: !0 - }), i. -default = o; - var n = e(0), - s = e(11), - _ = r(s), - a = e(1), - h = r(a), - l = e(3), - $ = r(l); - o.prototype = new n.L2DBaseModel, o.prototype.load = function(t, i, e) { - this.setUpdating(!0), this.setInitialized(!1), this.modelHomeDir = i.substring(0, i.lastIndexOf("/") + 1), this.modelSetting = new _. - default; - var r = this; - this.modelSetting.loadModelSetting(i, function() { - var t = r.modelHomeDir + r.modelSetting.getModelFile(); - r.loadModelData(t, function(t) { - for (var i = 0; i < r.modelSetting.getTextureNum(); i++) { - if (/^https?:\/\/|^\/\//i.test(r.modelSetting.getTextureFile(i))) var o = r.modelSetting.getTextureFile(i); - else var o = r.modelHomeDir + r.modelSetting.getTextureFile(i); - r.loadTexture(i, o, function() { - if (r.isTexLoaded) { - if (r.modelSetting.getExpressionNum() > 0) { - r.expressions = {}; - for (var t = 0; t < r.modelSetting.getExpressionNum(); t++) { - var i = r.modelSetting.getExpressionName(t), - o = r.modelHomeDir + r.modelSetting.getExpressionFile(t); - r.loadExpression(i, o) - } - } else r.expressionManager = null, r.expressions = {}; - if (r.eyeBlink, null != r.modelSetting.getPhysicsFile() ? r.loadPhysics(r.modelHomeDir + r.modelSetting.getPhysicsFile()) : r.physics = null, null != r.modelSetting.getPoseFile() ? r.loadPose(r.modelHomeDir + r.modelSetting.getPoseFile(), function() { - r.pose.updateParam(r.live2DModel) - }) : r.pose = null, null != r.modelSetting.getLayout()) { - var n = r.modelSetting.getLayout(); - null != n.width && r.modelMatrix.setWidth(n.width), null != n.height && r.modelMatrix.setHeight(n.height), null != n.x && r.modelMatrix.setX(n.x), null != n.y && r.modelMatrix.setY(n.y), null != n.center_x && r.modelMatrix.centerX(n.center_x), null != n.center_y && r.modelMatrix.centerY(n.center_y), null != n.top && r.modelMatrix.top(n.top), null != n.bottom && r.modelMatrix.bottom(n.bottom), null != n.left && r.modelMatrix.left(n.left), null != n.right && r.modelMatrix.right(n.right) - } - if (null != r.modelSetting.getHitAreasCustom()) { - var s = r.modelSetting.getHitAreasCustom(); - null != s.head_x && (h. - default.hit_areas_custom_head_x = s.head_x), null != s.head_y && (h. - default.hit_areas_custom_head_y = s.head_y), null != s.body_x && (h. - default.hit_areas_custom_body_x = s.body_x), null != s.body_y && (h. - default.hit_areas_custom_body_y = s.body_y) - } - for (var t = 0; t < r.modelSetting.getInitParamNum(); t++) r.live2DModel.setParamFloat(r.modelSetting.getInitParamID(t), r.modelSetting.getInitParamValue(t)); - for (var t = 0; t < r.modelSetting.getInitPartsVisibleNum(); t++) r.live2DModel.setPartsOpacity(r.modelSetting.getInitPartsVisibleID(t), r.modelSetting.getInitPartsVisibleValue(t)); - r.live2DModel.saveParam(), r.preloadMotionGroup(h. - default.MOTION_GROUP_IDLE), r.preloadMotionGroup(h. - default.MOTION_GROUP_SLEEPY), r.mainMotionManager.stopAllMotions(), r.setUpdating(!1), r.setInitialized(!0), "function" == typeof e && e() - } - }) - } - }) - }) - }, o.prototype.release = function(t) { - var i = n.Live2DFramework.getPlatformManager(); - t.deleteTexture(i.texture) - }, o.prototype.preloadMotionGroup = function(t) { - for (var i = this, e = 0; e < this.modelSetting.getMotionNum(t); e++) { - var r = this.modelSetting.getMotionFile(t, e); - this.loadMotion(r, this.modelHomeDir + r, function(r) { - r.setFadeIn(i.modelSetting.getMotionFadeIn(t, e)), r.setFadeOut(i.modelSetting.getMotionFadeOut(t, e)) - }) - } - }, o.prototype.update = function() { - if (null == this.live2DModel) return void(h. - default.DEBUG_LOG && console.error("Failed to update.")); - var t = UtSystem.getUserTimeMSec() - this.startTimeMSec, - i = t / 1e3, - e = 2 * i * Math.PI; - if (this.mainMotionManager.isFinished()) { - "1" === sessionStorage.getItem("Sleepy") ? this.startRandomMotion(h. - default.MOTION_GROUP_SLEEPY, h. - default.PRIORITY_SLEEPY) : this.startRandomMotion(h. - default.MOTION_GROUP_IDLE, h. - default.PRIORITY_IDLE) - } - this.live2DModel.loadParam(), this.mainMotionManager.updateParam(this.live2DModel) || null != this.eyeBlink && this.eyeBlink.updateParam(this.live2DModel), this.live2DModel.saveParam(), null == this.expressionManager || null == this.expressions || this.expressionManager.isFinished() || this.expressionManager.updateParam(this.live2DModel), this.live2DModel.addToParamFloat("PARAM_ANGLE_X", 30 * this.dragX, 1), this.live2DModel.addToParamFloat("PARAM_ANGLE_Y", 30 * this.dragY, 1), this.live2DModel.addToParamFloat("PARAM_ANGLE_Z", this.dragX * this.dragY * -30, 1), this.live2DModel.addToParamFloat("PARAM_BODY_ANGLE_X", 10 * this.dragX, 1), this.live2DModel.addToParamFloat("PARAM_EYE_BALL_X", this.dragX, 1), this.live2DModel.addToParamFloat("PARAM_EYE_BALL_Y", this.dragY, 1), this.live2DModel.addToParamFloat("PARAM_ANGLE_X", Number(15 * Math.sin(e / 6.5345)), .5), this.live2DModel.addToParamFloat("PARAM_ANGLE_Y", Number(8 * Math.sin(e / 3.5345)), .5), this.live2DModel.addToParamFloat("PARAM_ANGLE_Z", Number(10 * Math.sin(e / 5.5345)), .5), this.live2DModel.addToParamFloat("PARAM_BODY_ANGLE_X", Number(4 * Math.sin(e / 15.5345)), .5), this.live2DModel.setParamFloat("PARAM_BREATH", Number(.5 + .5 * Math.sin(e / 3.2345)), 1), null != this.physics && this.physics.updateParam(this.live2DModel), null == this.lipSync && this.live2DModel.setParamFloat("PARAM_MOUTH_OPEN_Y", this.lipSyncValue), null != this.pose && this.pose.updateParam(this.live2DModel), this.live2DModel.update() - }, o.prototype.setRandomExpression = function() { - var t = []; - for (var i in this.expressions) t.push(i); - var e = parseInt(Math.random() * t.length); - this.setExpression(t[e]) - }, o.prototype.startRandomMotion = function(t, i) { - var e = this.modelSetting.getMotionNum(t), - r = parseInt(Math.random() * e); - this.startMotion(t, r, i) - }, o.prototype.startMotion = function(t, i, e) { - var r = this.modelSetting.getMotionFile(t, i); - if (null == r || "" == r) return void(h. - default.DEBUG_LOG && console.error("Failed to motion.")); - if (e == h. - default.PRIORITY_FORCE) this.mainMotionManager.setReservePriority(e); - else if (!this.mainMotionManager.reserveMotion(e)) return void(h. - default.DEBUG_LOG && console.log("Motion is running.")); - var o, n = this; - null == this.motions[t] ? this.loadMotion(null, this.modelHomeDir + r, function(r) { - o = r, n.setFadeInFadeOut(t, i, e, o) - }) : (o = this.motions[t], n.setFadeInFadeOut(t, i, e, o)) - }, o.prototype.setFadeInFadeOut = function(t, i, e, r) { - var o = this.modelSetting.getMotionFile(t, i); - if (r.setFadeIn(this.modelSetting.getMotionFadeIn(t, i)), r.setFadeOut(this.modelSetting.getMotionFadeOut(t, i)), h. - default.DEBUG_LOG && console.log("Start motion : " + o), null == this.modelSetting.getMotionSound(t, i)) this.mainMotionManager.startMotionPrio(r, e); - else { - var n = this.modelSetting.getMotionSound(t, i), - s = document.createElement("audio"); - s.src = this.modelHomeDir + n, h. - default.DEBUG_LOG && console.log("Start sound : " + n), s.play(), this.mainMotionManager.startMotionPrio(r, e) - } - }, o.prototype.setExpression = function(t) { - var i = this.expressions[t]; - h. - default.DEBUG_LOG && console.log("Expression : " + t), this.expressionManager.startMotion(i, !1) - }, o.prototype.draw = function(t) { - $. - default.push(), $. - default.multMatrix(this.modelMatrix.getArray()), this.tmpMatrix = $. - default.getMatrix(), this.live2DModel.setMatrix(this.tmpMatrix), this.live2DModel.draw(), $. - default.pop() - }, o.prototype.hitTest = function(t, i, e) { - for (var r = this.modelSetting.getHitAreaNum(), o = 0; o < r; o++) if (t == this.modelSetting.getHitAreaName(o)) { - var n = this.modelSetting.getHitAreaID(o); - return this.hitTestSimple(n, i, e) - } - return !1 - }, o.prototype.hitTestCustom = function(t, i, e) { - return "head" == t ? this.hitTestSimpleCustom(h. - default.hit_areas_custom_head_x, h. - default.hit_areas_custom_head_y, i, e) : "body" == t && this.hitTestSimpleCustom(h. - default.hit_areas_custom_body_x, h. - default.hit_areas_custom_body_y, i, e) - } -}, function(t, i, e) { - "use strict"; - - function r() { - this.NAME = "name", this.ID = "id", this.MODEL = "model", this.TEXTURES = "textures", this.HIT_AREAS = "hit_areas", this.PHYSICS = "physics", this.POSE = "pose", this.EXPRESSIONS = "expressions", this.MOTION_GROUPS = "motions", this.SOUND = "sound", this.FADE_IN = "fade_in", this.FADE_OUT = "fade_out", this.LAYOUT = "layout", this.HIT_AREAS_CUSTOM = "hit_areas_custom", this.INIT_PARAM = "init_param", this.INIT_PARTS_VISIBLE = "init_parts_visible", this.VALUE = "val", this.FILE = "file", this.json = {} - } - Object.defineProperty(i, "__esModule", { - value: !0 - }), i. -default = r; - var o = e(0); - r.prototype.loadModelSetting = function(t, i) { - var e = this; - o.Live2DFramework.getPlatformManager().loadBytes(t, function(t) { - var r = String.fromCharCode.apply(null, new Uint8Array(t)); - e.json = JSON.parse(r), i() - }) - }, r.prototype.getTextureFile = function(t) { - return null == this.json[this.TEXTURES] || null == this.json[this.TEXTURES][t] ? null : this.json[this.TEXTURES][t] - }, r.prototype.getModelFile = function() { - return this.json[this.MODEL] - }, r.prototype.getTextureNum = function() { - return null == this.json[this.TEXTURES] ? 0 : this.json[this.TEXTURES].length - }, r.prototype.getHitAreaNum = function() { - return null == this.json[this.HIT_AREAS] ? 0 : this.json[this.HIT_AREAS].length - }, r.prototype.getHitAreaID = function(t) { - return null == this.json[this.HIT_AREAS] || null == this.json[this.HIT_AREAS][t] ? null : this.json[this.HIT_AREAS][t][this.ID] - }, r.prototype.getHitAreaName = function(t) { - return null == this.json[this.HIT_AREAS] || null == this.json[this.HIT_AREAS][t] ? null : this.json[this.HIT_AREAS][t][this.NAME] - }, r.prototype.getPhysicsFile = function() { - return this.json[this.PHYSICS] - }, r.prototype.getPoseFile = function() { - return this.json[this.POSE] - }, r.prototype.getExpressionNum = function() { - return null == this.json[this.EXPRESSIONS] ? 0 : this.json[this.EXPRESSIONS].length - }, r.prototype.getExpressionFile = function(t) { - return null == this.json[this.EXPRESSIONS] ? null : this.json[this.EXPRESSIONS][t][this.FILE] - }, r.prototype.getExpressionName = function(t) { - return null == this.json[this.EXPRESSIONS] ? null : this.json[this.EXPRESSIONS][t][this.NAME] - }, r.prototype.getLayout = function() { - return this.json[this.LAYOUT] - }, r.prototype.getHitAreasCustom = function() { - return this.json[this.HIT_AREAS_CUSTOM] - }, r.prototype.getInitParamNum = function() { - return null == this.json[this.INIT_PARAM] ? 0 : this.json[this.INIT_PARAM].length - }, r.prototype.getMotionNum = function(t) { - return null == this.json[this.MOTION_GROUPS] || null == this.json[this.MOTION_GROUPS][t] ? 0 : this.json[this.MOTION_GROUPS][t].length - }, r.prototype.getMotionFile = function(t, i) { - return null == this.json[this.MOTION_GROUPS] || null == this.json[this.MOTION_GROUPS][t] || null == this.json[this.MOTION_GROUPS][t][i] ? null : this.json[this.MOTION_GROUPS][t][i][this.FILE] - }, r.prototype.getMotionSound = function(t, i) { - return null == this.json[this.MOTION_GROUPS] || null == this.json[this.MOTION_GROUPS][t] || null == this.json[this.MOTION_GROUPS][t][i] || null == this.json[this.MOTION_GROUPS][t][i][this.SOUND] ? null : this.json[this.MOTION_GROUPS][t][i][this.SOUND] - }, r.prototype.getMotionFadeIn = function(t, i) { - return null == this.json[this.MOTION_GROUPS] || null == this.json[this.MOTION_GROUPS][t] || null == this.json[this.MOTION_GROUPS][t][i] || null == this.json[this.MOTION_GROUPS][t][i][this.FADE_IN] ? 1e3 : this.json[this.MOTION_GROUPS][t][i][this.FADE_IN] - }, r.prototype.getMotionFadeOut = function(t, i) { - return null == this.json[this.MOTION_GROUPS] || null == this.json[this.MOTION_GROUPS][t] || null == this.json[this.MOTION_GROUPS][t][i] || null == this.json[this.MOTION_GROUPS][t][i][this.FADE_OUT] ? 1e3 : this.json[this.MOTION_GROUPS][t][i][this.FADE_OUT] - }, r.prototype.getInitParamID = function(t) { - return null == this.json[this.INIT_PARAM] || null == this.json[this.INIT_PARAM][t] ? null : this.json[this.INIT_PARAM][t][this.ID] - }, r.prototype.getInitParamValue = function(t) { - return null == this.json[this.INIT_PARAM] || null == this.json[this.INIT_PARAM][t] ? NaN : this.json[this.INIT_PARAM][t][this.VALUE] - }, r.prototype.getInitPartsVisibleNum = function() { - return null == this.json[this.INIT_PARTS_VISIBLE] ? 0 : this.json[this.INIT_PARTS_VISIBLE].length - }, r.prototype.getInitPartsVisibleID = function(t) { - return null == this.json[this.INIT_PARTS_VISIBLE] || null == this.json[this.INIT_PARTS_VISIBLE][t] ? null : this.json[this.INIT_PARTS_VISIBLE][t][this.ID] - }, r.prototype.getInitPartsVisibleValue = function(t) { - return null == this.json[this.INIT_PARTS_VISIBLE] || null == this.json[this.INIT_PARTS_VISIBLE][t] ? NaN : this.json[this.INIT_PARTS_VISIBLE][t][this.VALUE] - } -}]); -//# sourceMappingURL=live2d.js.map diff --git a/spaces/Loren/Streamlit_OCR_comparator/configs/_base_/recog_models/satrn.py b/spaces/Loren/Streamlit_OCR_comparator/configs/_base_/recog_models/satrn.py deleted file mode 100644 index f7a6de8637c77a18a930e032bfb752434b173ba4..0000000000000000000000000000000000000000 --- a/spaces/Loren/Streamlit_OCR_comparator/configs/_base_/recog_models/satrn.py +++ /dev/null @@ -1,11 +0,0 @@ -label_convertor = dict( - type='AttnConvertor', dict_type='DICT36', with_unknown=True, lower=True) - -model = dict( - type='SATRN', - backbone=dict(type='ShallowCNN'), - encoder=dict(type='SatrnEncoder'), - decoder=dict(type='TFDecoder'), - loss=dict(type='TFLoss'), - label_convertor=label_convertor, - max_seq_len=40) diff --git a/spaces/LuxOAI/zenFace-Recognition-SDK/gradio/demo.py b/spaces/LuxOAI/zenFace-Recognition-SDK/gradio/demo.py deleted file mode 100644 index cf9a69c66c814da496a404d7f7e1519d425a15f4..0000000000000000000000000000000000000000 --- a/spaces/LuxOAI/zenFace-Recognition-SDK/gradio/demo.py +++ /dev/null @@ -1,114 +0,0 @@ -import gradio as gr -import requests -import json -from PIL import Image - -def compare_face(frame1, frame2): - url = "http://127.0.0.1:8000/api/compare_face" - files = {'image1': open(frame1, 'rb'), 'image2': open(frame2, 'rb')} - - r = requests.post(url=url, files=files) - faces = None - - try: - image1 = Image.open(frame1) - image2 = Image.open(frame2) - - face1 = None - face2 = None - data = r.json().get('data') - if data.get('face1') is not None: - face = data.get('face1') - x1 = face.get('x1') - y1 = face.get('y1') - x2 = face.get('x2') - y2 = face.get('y2') - if x1 < 0: - x1 = 0 - if y1 < 0: - y1 = 0 - if x2 >= image1.width: - x2 = image1.width - 1 - if y2 >= image1.height: - y2 = image1.height - 1 - - face1 = image1.crop((x1, y1, x2, y2)) - face_image_ratio = face1.width / float(face1.height) - resized_w = int(face_image_ratio * 150) - resized_h = 150 - - face1 = face1.resize((int(resized_w), int(resized_h))) - - if data.get('face2') is not None: - face = data.get('face2') - x1 = face.get('x1') - y1 = face.get('y1') - x2 = face.get('x2') - y2 = face.get('y2') - - if x1 < 0: - x1 = 0 - if y1 < 0: - y1 = 0 - if x2 >= image2.width: - x2 = image2.width - 1 - if y2 >= image2.height: - y2 = image2.height - 1 - - face2 = image2.crop((x1, y1, x2, y2)) - face_image_ratio = face2.width / float(face2.height) - resized_w = int(face_image_ratio * 150) - resized_h = 150 - - face2 = face2.resize((int(resized_w), int(resized_h))) - - if face1 is not None and face2 is not None: - new_image = Image.new('RGB',(face1.width + face2.width + 10, 150), (80,80,80)) - - new_image.paste(face1,(0,0)) - new_image.paste(face2,(face1.width + 10, 0)) - faces = new_image.copy() - elif face1 is not None and face2 is None: - new_image = Image.new('RGB',(face1.width + face1.width + 10, 150), (80,80,80)) - - new_image.paste(face1,(0,0)) - faces = new_image.copy() - elif face1 is None and face2 is not None: - new_image = Image.new('RGB',(face2.width + face2.width + 10, 150), (80,80,80)) - - new_image.paste(face2,(face2.width + 10, 0)) - faces = new_image.copy() - except: - pass - - return [r.json(), faces] - -with gr.Blocks() as demo: - gr.Markdown( - """ - # Face Recognition - Get your own Face Recognition Server by duplicating this space.
    - Or run on your own machine using docker.
    - ```docker run -it -p 7860:7860 --platform=linux/amd64 \ - -e LICENSE_KEY="YOUR_VALUE_HERE" \ - registry.hf.space/faceonlive-face-recognition-sdk:latest ```

    - Contact us at https://faceonlive.com for issues and support.
    - """ - ) - with gr.Row(): - with gr.Column(): - compare_face_input1 = gr.Image(type='filepath', height=480) - gr.Examples(['gradio/examples/1.jpg', 'gradio/examples/2.jpg'], - inputs=compare_face_input1) - compare_face_button = gr.Button("Compare Face") - with gr.Column(): - compare_face_input2 = gr.Image(type='filepath', height=480) - gr.Examples(['gradio/examples/3.jpg', 'gradio/examples/4.jpg'], - inputs=compare_face_input2) - with gr.Column(): - compare_face_output = gr.Image(type="pil", height=150) - compare_result_output = gr.JSON(label='Result') - - compare_face_button.click(compare_face, inputs=[compare_face_input1, compare_face_input2], outputs=[compare_result_output, compare_face_output]) - -demo.launch(server_name="0.0.0.0", server_port=7860) \ No newline at end of file diff --git a/spaces/MCkernick/Image_Restoration_Colorization/Global/data/base_dataset.py b/spaces/MCkernick/Image_Restoration_Colorization/Global/data/base_dataset.py deleted file mode 100644 index 5f0ac562eacc926b606f70c9dea680021dec2edc..0000000000000000000000000000000000000000 --- a/spaces/MCkernick/Image_Restoration_Colorization/Global/data/base_dataset.py +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -import torch.utils.data as data -from PIL import Image -import torchvision.transforms as transforms -import numpy as np -import random - -class BaseDataset(data.Dataset): - def __init__(self): - super(BaseDataset, self).__init__() - - def name(self): - return 'BaseDataset' - - def initialize(self, opt): - pass - -def get_params(opt, size): - w, h = size - new_h = h - new_w = w - if opt.resize_or_crop == 'resize_and_crop': - new_h = new_w = opt.loadSize - - if opt.resize_or_crop == 'scale_width_and_crop': # we scale the shorter side into 256 - - if w 0.5 - return {'crop_pos': (x, y), 'flip': flip} - -def get_transform(opt, params, method=Image.BICUBIC, normalize=True): - transform_list = [] - if 'resize' in opt.resize_or_crop: - osize = [opt.loadSize, opt.loadSize] - transform_list.append(transforms.Scale(osize, method)) - elif 'scale_width' in opt.resize_or_crop: - # transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.loadSize, method))) ## Here , We want the shorter side to match 256, and Scale will finish it. - transform_list.append(transforms.Scale(256,method)) - - if 'crop' in opt.resize_or_crop: - if opt.isTrain: - transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.fineSize))) - else: - if opt.test_random_crop: - transform_list.append(transforms.RandomCrop(opt.fineSize)) - else: - transform_list.append(transforms.CenterCrop(opt.fineSize)) - - ## when testing, for ablation study, choose center_crop directly. - - - - if opt.resize_or_crop == 'none': - base = float(2 ** opt.n_downsample_global) - if opt.netG == 'local': - base *= (2 ** opt.n_local_enhancers) - transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base, method))) - - if opt.isTrain and not opt.no_flip: - transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip']))) - - transform_list += [transforms.ToTensor()] - - if normalize: - transform_list += [transforms.Normalize((0.5, 0.5, 0.5), - (0.5, 0.5, 0.5))] - return transforms.Compose(transform_list) - -def normalize(): - return transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) - -def __make_power_2(img, base, method=Image.BICUBIC): - ow, oh = img.size - h = int(round(oh / base) * base) - w = int(round(ow / base) * base) - if (h == oh) and (w == ow): - return img - return img.resize((w, h), method) - -def __scale_width(img, target_width, method=Image.BICUBIC): - ow, oh = img.size - if (ow == target_width): - return img - w = target_width - h = int(target_width * oh / ow) - return img.resize((w, h), method) - -def __crop(img, pos, size): - ow, oh = img.size - x1, y1 = pos - tw = th = size - if (ow > tw or oh > th): - return img.crop((x1, y1, x1 + tw, y1 + th)) - return img - -def __flip(img, flip): - if flip: - return img.transpose(Image.FLIP_LEFT_RIGHT) - return img diff --git a/spaces/Manjushri/MusicGen/audiocraft/modules/streaming.py b/spaces/Manjushri/MusicGen/audiocraft/modules/streaming.py deleted file mode 100644 index fdbdf5e90fc0c6560873d66bf273460b38e5ed7e..0000000000000000000000000000000000000000 --- a/spaces/Manjushri/MusicGen/audiocraft/modules/streaming.py +++ /dev/null @@ -1,135 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -""" -Streaming module API that should be implemented by all Streaming components, -""" - -from contextlib import contextmanager -import typing as tp -from torch import nn -import torch - - -State = tp.Dict[str, torch.Tensor] - - -class StreamingModule(nn.Module): - """Common API for streaming components. - - Each streaming component has a streaming state, which is just a dict[str, Tensor]. - By convention, the first dim of each tensor must be the batch size. - Don't use dots in the key names, as this would clash with submodules - (like in state_dict). - - If `self._is_streaming` is True, the component should use and remember - the proper state inside `self._streaming_state`. - - To set a streaming component in streaming state, use - - with module.streaming(): - ... - - This will automatically reset the streaming state when exiting the context manager. - This also automatically propagates to all streaming children module. - - Some module might also implement the `StreamingModule.flush` method, although - this one is trickier, as all parents module must be StreamingModule and implement - it as well for it to work properly. See `StreamingSequential` after. - """ - def __init__(self) -> None: - super().__init__() - self._streaming_state: State = {} - self._is_streaming = False - - def _apply_named_streaming(self, fn: tp.Any): - for name, module in self.named_modules(): - if isinstance(module, StreamingModule): - fn(name, module) - - def _set_streaming(self, streaming: bool): - def _set_streaming(name, module): - module._is_streaming = streaming - self._apply_named_streaming(_set_streaming) - - @contextmanager - def streaming(self): - """Context manager to enter streaming mode. Reset streaming state on exit. - """ - self._set_streaming(True) - try: - yield - finally: - self._set_streaming(False) - self.reset_streaming() - - def reset_streaming(self): - """Reset the streaming state. - """ - def _reset(name: str, module: StreamingModule): - module._streaming_state.clear() - - self._apply_named_streaming(_reset) - - def get_streaming_state(self) -> State: - """Return the streaming state, including that of sub-modules. - """ - state: State = {} - - def _add(name: str, module: StreamingModule): - if name: - name += "." - for key, value in module._streaming_state.items(): - state[name + key] = value - - self._apply_named_streaming(_add) - return state - - def set_streaming_state(self, state: State): - """Set the streaming state, including that of sub-modules. - """ - state = dict(state) - - def _set(name: str, module: StreamingModule): - if name: - name += "." - module._streaming_state.clear() - for key, value in list(state.items()): - # complexity is not ideal here, but probably fine. - if key.startswith(name): - local_key = key[len(name):] - if '.' not in local_key: - module._streaming_state[local_key] = value - del state[key] - - self._apply_named_streaming(_set) - assert len(state) == 0, list(state.keys()) - - def flush(self, x: tp.Optional[torch.Tensor] = None): - """Flush any remaining outputs that were waiting for completion. - Typically, for convolutions, this will add the final padding - and process the last buffer. - - This should take an optional argument `x`, which will be provided - if a module before this one in the streaming pipeline has already - spitted out a flushed out buffer. - """ - if x is None: - return None - else: - return self(x) - - -class StreamingSequential(StreamingModule, nn.Sequential): - """A streaming compatible alternative of `nn.Sequential`. - """ - def flush(self, x: tp.Optional[torch.Tensor] = None): - for module in self: - if isinstance(module, StreamingModule): - x = module.flush(x) - elif x is not None: - x = module(x) - return x diff --git a/spaces/MarcSkovMadsen/awesome-panel/pages/index.py b/spaces/MarcSkovMadsen/awesome-panel/pages/index.py deleted file mode 100644 index e3749a2652f0a6a2c63be323b5f20fb54f5fbd15..0000000000000000000000000000000000000000 --- a/spaces/MarcSkovMadsen/awesome-panel/pages/index.py +++ /dev/null @@ -1,22 +0,0 @@ -import panel as pn - -pn.extension(sizing_mode="stretch_width") - -INTRO = """ -# Awesome Panel on Hugging Face Spaces - -Awesome Panel supports the powerful data app framework Panel and its users. Now on Hugging Face Spaces 🤗. - -You can also deploy Panel apps on Hugging Face Spaces. For details check out the Panel Hugging Face Deployment guide. - -Now check out some of our example apps by clicking the links below. - -[videostream](videostream), ... more is coming soon 🤗 -""" - -some_component = pn.panel(INTRO) - -pn.template.FastListTemplate( - site="Awesome Panel 🤗", title="Hello Hugging Face World", main=[some_component], - favicon="https://sharing.awesome-panel.org/favicon.ico", accent="#fef3c7", header_color="#4b5563" -).servable() diff --git a/spaces/MashiroSA/sovits-emu-voice-transform/preprocess_hubert_f0.py b/spaces/MashiroSA/sovits-emu-voice-transform/preprocess_hubert_f0.py deleted file mode 100644 index 763fb0d65540ed4d62b269914e81c740f3ff6bba..0000000000000000000000000000000000000000 --- a/spaces/MashiroSA/sovits-emu-voice-transform/preprocess_hubert_f0.py +++ /dev/null @@ -1,101 +0,0 @@ -import math -import multiprocessing -import os -import argparse -from random import shuffle - -import torch -from glob import glob -from tqdm import tqdm -from modules.mel_processing import spectrogram_torch - -import utils -import logging - -logging.getLogger("numba").setLevel(logging.WARNING) -import librosa -import numpy as np - -hps = utils.get_hparams_from_file("configs/config.json") -sampling_rate = hps.data.sampling_rate -hop_length = hps.data.hop_length - - -def process_one(filename, hmodel): - # print(filename) - wav, sr = librosa.load(filename, sr=sampling_rate) - soft_path = filename + ".soft.pt" - if not os.path.exists(soft_path): - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - wav16k = librosa.resample(wav, orig_sr=sampling_rate, target_sr=16000) - wav16k = torch.from_numpy(wav16k).to(device) - c = utils.get_hubert_content(hmodel, wav_16k_tensor=wav16k) - torch.save(c.cpu(), soft_path) - - f0_path = filename + ".f0.npy" - if not os.path.exists(f0_path): - f0 = utils.compute_f0_dio( - wav, sampling_rate=sampling_rate, hop_length=hop_length - ) - np.save(f0_path, f0) - - spec_path = filename.replace(".wav", ".spec.pt") - if not os.path.exists(spec_path): - # Process spectrogram - # The following code can't be replaced by torch.FloatTensor(wav) - # because load_wav_to_torch return a tensor that need to be normalized - - audio, sr = utils.load_wav_to_torch(filename) - if sr != hps.data.sampling_rate: - raise ValueError( - "{} SR doesn't match target {} SR".format( - sr, hps.data.sampling_rate - ) - ) - - audio_norm = audio / hps.data.max_wav_value - audio_norm = audio_norm.unsqueeze(0) - - spec = spectrogram_torch( - audio_norm, - hps.data.filter_length, - hps.data.sampling_rate, - hps.data.hop_length, - hps.data.win_length, - center=False, - ) - spec = torch.squeeze(spec, 0) - torch.save(spec, spec_path) - - -def process_batch(filenames): - print("Loading hubert for content...") - device = "cuda" if torch.cuda.is_available() else "cpu" - hmodel = utils.get_hubert_model().to(device) - print("Loaded hubert.") - for filename in tqdm(filenames): - process_one(filename, hmodel) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument( - "--in_dir", type=str, default="dataset/44k", help="path to input dir" - ) - - args = parser.parse_args() - filenames = glob(f"{args.in_dir}/*/*.wav", recursive=True) # [:10] - shuffle(filenames) - multiprocessing.set_start_method("spawn", force=True) - - num_processes = 1 - chunk_size = int(math.ceil(len(filenames) / num_processes)) - chunks = [ - filenames[i : i + chunk_size] for i in range(0, len(filenames), chunk_size) - ] - print([len(c) for c in chunks]) - processes = [ - multiprocessing.Process(target=process_batch, args=(chunk,)) for chunk in chunks - ] - for p in processes: - p.start() diff --git a/spaces/NCTCMumbai/NCTC/models/research/adversarial_text/gen_vocab.py b/spaces/NCTCMumbai/NCTC/models/research/adversarial_text/gen_vocab.py deleted file mode 100644 index 17b91864ce727886adaafa5402fd9e62897563ca..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/research/adversarial_text/gen_vocab.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright 2017 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Generates vocabulary and term frequency files for datasets.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from six import iteritems - -from collections import defaultdict - -# Dependency imports - -import tensorflow as tf - -from data import data_utils -from data import document_generators - -flags = tf.app.flags -FLAGS = flags.FLAGS - -# Flags controlling input are in document_generators.py - -flags.DEFINE_string('output_dir', '', - 'Path to save vocab.txt and vocab_freq.txt.') - -flags.DEFINE_boolean('use_unlabeled', True, 'Whether to use the ' - 'unlabeled sentiment dataset in the vocabulary.') -flags.DEFINE_boolean('include_validation', False, 'Whether to include the ' - 'validation set in the vocabulary.') -flags.DEFINE_integer('doc_count_threshold', 1, 'The minimum number of ' - 'documents a word or bigram should occur in to keep ' - 'it in the vocabulary.') - -MAX_VOCAB_SIZE = 100 * 1000 - - -def fill_vocab_from_doc(doc, vocab_freqs, doc_counts): - """Fills vocabulary and doc counts with tokens from doc. - - Args: - doc: Document to read tokens from. - vocab_freqs: dict - doc_counts: dict - - Returns: - None - """ - doc_seen = set() - - for token in document_generators.tokens(doc): - if doc.add_tokens or token in vocab_freqs: - vocab_freqs[token] += 1 - if token not in doc_seen: - doc_counts[token] += 1 - doc_seen.add(token) - - -def main(_): - tf.logging.set_verbosity(tf.logging.INFO) - vocab_freqs = defaultdict(int) - doc_counts = defaultdict(int) - - # Fill vocabulary frequencies map and document counts map - for doc in document_generators.documents( - dataset='train', - include_unlabeled=FLAGS.use_unlabeled, - include_validation=FLAGS.include_validation): - fill_vocab_from_doc(doc, vocab_freqs, doc_counts) - - # Filter out low-occurring terms - vocab_freqs = dict((term, freq) for term, freq in iteritems(vocab_freqs) - if doc_counts[term] > FLAGS.doc_count_threshold) - - # Sort by frequency - ordered_vocab_freqs = data_utils.sort_vocab_by_frequency(vocab_freqs) - - # Limit vocab size - ordered_vocab_freqs = ordered_vocab_freqs[:MAX_VOCAB_SIZE] - - # Add EOS token - ordered_vocab_freqs.append((data_utils.EOS_TOKEN, 1)) - - # Write - tf.gfile.MakeDirs(FLAGS.output_dir) - data_utils.write_vocab_and_frequency(ordered_vocab_freqs, FLAGS.output_dir) - - -if __name__ == '__main__': - tf.app.run() diff --git a/spaces/NiansuhAI/chat/README.md b/spaces/NiansuhAI/chat/README.md deleted file mode 100644 index 1d2c4bf33cfdfe6fcf60d941a53da82f25225ded..0000000000000000000000000000000000000000 --- a/spaces/NiansuhAI/chat/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: ChatGPT | Niansuh -emoji: 🤖 -colorFrom: blue -colorTo: yellow -sdk: docker -pinned: true -license: mit -app_port: 3000 ---- \ No newline at end of file diff --git a/spaces/OAOA/DifFace/datapipe/prepare/face/make_testing_data_bicubic.py b/spaces/OAOA/DifFace/datapipe/prepare/face/make_testing_data_bicubic.py deleted file mode 100644 index 9459ace94a29d35d4dc42294c71ece885bbb78b5..0000000000000000000000000000000000000000 --- a/spaces/OAOA/DifFace/datapipe/prepare/face/make_testing_data_bicubic.py +++ /dev/null @@ -1,84 +0,0 @@ -#!/usr/bin/env python -# -*- coding:utf-8 -*- -# Power by Zongsheng Yue 2022-07-16 12:42:42 - -import sys -from pathlib import Path -sys.path.append(str(Path(__file__).resolve().parents[3])) - -import os -import math -import torch -import argparse -from einops import rearrange -from datapipe.datasets import DatasetBicubic - -from utils import util_image -from utils import util_common - -parser = argparse.ArgumentParser() -parser.add_argument( - "--files_txt", - type=str, - default='./datapipe/files_txt/celeba512_val.txt', - help="File names") -parser.add_argument( - "--sf", - type=int, - default=8, - help="Number of trainging iamges", - ) -parser.add_argument( - "--bs", - type=int, - default=8, - help="Batch size", - ) -parser.add_argument( - "--save_dir", - type=str, - default='', - help="Folder to save the fake iamges", - ) -parser.add_argument( - "--num_images", - type=int, - default=100, - help="Number of iamges", - ) -args = parser.parse_args() - -save_dir = Path(args.save_dir) -if not save_dir.stem.endswith(f'x{args.sf}'): - save_dir = save_dir.parent / f"{save_dir.stem}_x{args.sf}" -util_common.mkdir(save_dir, delete=True) - -dataset = DatasetBicubic( - files_txt=args.files_txt, - up_back=True, - need_gt_path=True, - sf=args.sf, - length=args.num_images, - ) -dataloader = torch.utils.data.DataLoader( - dataset, - batch_size=args.bs, - drop_last=False, - num_workers=4, - pin_memory=False, - ) - -for ii, data_batch in enumerate(dataloader): - im_lq_batch = data_batch['lq'] - im_path_batch = data_batch['gt_path'] - print(f"Processing: {ii+1}/{math.ceil(len(dataset) / args.bs)}...") - - for jj in range(im_lq_batch.shape[0]): - im_lq = rearrange( - im_lq_batch[jj].clamp(0.0, 1.0).numpy(), - 'c h w -> h w c', - ) - im_name = Path(im_path_batch[jj]).name - im_path = save_dir / im_name - util_image.imwrite(im_lq, im_path, chn='rgb', dtype_in='float32') - diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/truncated_bptt/transformer_xl_model.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/truncated_bptt/transformer_xl_model.py deleted file mode 100644 index a6c8b25a07276c2ee30c0aa5f0e4b0a2837ed5ca..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/truncated_bptt/transformer_xl_model.py +++ /dev/null @@ -1,155 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging -from dataclasses import dataclass, field -from typing import Dict, List, Optional - -import torch -from fairseq.dataclass import FairseqDataclass -from fairseq.models import ( - FairseqIncrementalDecoder, - FairseqLanguageModel, - register_model, -) -from fairseq.modules.checkpoint_activations import checkpoint_wrapper -from omegaconf import II - - -logger = logging.getLogger(__name__) - - -@dataclass -class TransformerXLConfig(FairseqDataclass): - # defaults come from the original Transformer-XL code - cutoffs: List[int] = field(default_factory=lambda: [20000, 40000, 200000]) - d_model: int = 500 - n_head: int = 10 - d_head: int = 50 - d_inner: int = 1000 - div_val: int = 1 - n_layer: int = 12 - mem_len: int = 0 - clamp_len: int = -1 - same_length: bool = False - dropout: float = 0.0 - dropatt: float = 0.0 - checkpoint_activations: bool = False - offload_activations: bool = False - max_target_positions: int = II("task.max_target_positions") - - -@register_model("transformer_xl", dataclass=TransformerXLConfig) -class TransformerXLLanguageModel(FairseqLanguageModel): - @classmethod - def build_model(cls, cfg: TransformerXLConfig, task): - return cls(TransformerXLDecoder(cfg, task)) - - -class TransformerXLDecoder(FairseqIncrementalDecoder): - def __init__(self, cfg, task): - try: - from transformers.models.transfo_xl import ( - TransfoXLConfig, - TransfoXLLMHeadModel, - ) - except ImportError: - from transformers.configuration_transfo_xl import TransfoXLConfig - from transformers.modeling_transfo_xl import TransfoXLLMHeadModel - - super().__init__(task.target_dictionary) - self.cfg = cfg - - # remove any cutoffs larger than the vocab size - cutoffs = [ - cutoff for cutoff in cfg.cutoffs if cutoff < len(task.target_dictionary) - ] - - config = TransfoXLConfig( - vocab_size=len(task.target_dictionary), - cutoffs=cutoffs, - d_model=cfg.d_model, - d_embed=cfg.d_model, - n_head=cfg.n_head, - d_head=cfg.d_head, - d_inner=cfg.d_inner, - div_val=cfg.div_val, - n_layer=cfg.n_layer, - mem_len=cfg.mem_len, - clamp_len=cfg.clamp_len, - same_length=cfg.same_length, - dropout=cfg.dropout, - dropatt=cfg.dropatt, - ) - logger.info(config) - self.model = TransfoXLLMHeadModel(config) - - # Workaround a bug in huggingface's ``ProjectedAdaptiveLogSoftmax`` - # which adds ``None`` values to an ``nn.ParameterList``, which is not - # supported in PyTorch. Instead we can replace this with an - # ``nn.ModuleList``, which does support ``None`` values. - try: - if all(p is None for p in self.model.crit.out_projs._parameters.values()): - self.model.crit.out_projs = torch.nn.ModuleList( - [None] * len(self.model.crit.out_projs._parameters) - ) - except Exception: - pass - - if cfg.checkpoint_activations or cfg.offload_activations: - for i in range(len(self.model.transformer.layers)): - self.model.transformer.layers[i] = checkpoint_wrapper( - self.model.transformer.layers[i], - offload_to_cpu=cfg.offload_activations, - ) - # TODO: may save mem to wrap(layer.pos_ff.CoreNet[3]) - - self._mems = None - - def forward( - self, - src_tokens, - src_lengths=None, # unused - incremental_state: Optional[Dict[str, List[torch.Tensor]]] = None, - encoder_out=None, - ): - if incremental_state is not None: # used during inference - mems = self.get_incremental_state(incremental_state, "mems") - src_tokens = src_tokens[:, -1:] # only keep the most recent token - else: - mems = self._mems - - output = self.model( - input_ids=src_tokens, - mems=mems, - return_dict=False, - ) - - if len(output) >= 2: - if incremental_state is not None: - self.set_incremental_state(incremental_state, "mems", output[1]) - else: - self._mems = output[1] - - return (output[0],) - - def max_positions(self): - return self.cfg.max_target_positions - - def reorder_incremental_state( - self, - incremental_state: Dict[str, Dict[str, Optional[torch.Tensor]]], - new_order: torch.Tensor, - ): - """Reorder incremental state. - - This will be called when the order of the input has changed from the - previous time step. A typical use case is beam search, where the input - order changes between time steps based on the selection of beams. - """ - mems = self.get_incremental_state(incremental_state, "mems") - if mems is not None: - new_mems = [mems_i.index_select(1, new_order) for mems_i in mems] - self.set_incremental_state(incremental_state, "mems", new_mems) diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/models/roberta/__init__.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/models/roberta/__init__.py deleted file mode 100644 index 4cd723ae96aec8e3182773483f123109d23b620e..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/models/roberta/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from .hub_interface import * # noqa -from .model import * # noqa -from .enc_dec import * # noqa -from .model_camembert import * # noqa -from .model_gottbert import * # noqa -from .model_xlmr import * # noqa diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/modules/unfold.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/modules/unfold.py deleted file mode 100644 index 138272f1ef4f673b29e36aed4531106f7ce95968..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/modules/unfold.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch.nn.functional as F - - -def unfold1d(x, kernel_size, padding_l, pad_value=0): - """unfold T x B x C to T x B x C x K""" - if kernel_size > 1: - T, B, C = x.size() - x = F.pad( - x, (0, 0, 0, 0, padding_l, kernel_size - 1 - padding_l), value=pad_value - ) - x = x.as_strided((T, B, C, kernel_size), (B * C, C, 1, B * C)) - else: - x = x.unsqueeze(3) - return x diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/multilingual/data_scripts/download_wat19_my.sh b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/multilingual/data_scripts/download_wat19_my.sh deleted file mode 100644 index c1e2d47287a29af4576e7a63641e8152ecb63c44..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/multilingual/data_scripts/download_wat19_my.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -if [ -z $WORKDIR_ROOT ] ; -then - echo "please specify your working directory root in environment variable WORKDIR_ROOT. Exitting..." - exit -fi - - -SRCDIR=$WORKDIR_ROOT/indic_languages_corpus -DESTDIR=$WORKDIR_ROOT/ML50/raw -mkdir -p $SRCDIR -mkdir -p $DESTDIR - -WAT_MY_EN=wat2020.my-en.zip -cd $SRCDIR -# please refer to http://lotus.kuee.kyoto-u.ac.jp/WAT/my-en-data/ for latest URL if the following url expired -#- The data used for WAT2020 are identical to those used in WAT2019. -wget http://lotus.kuee.kyoto-u.ac.jp/WAT/my-en-data/$WAT_MY_EN -unzip $WAT_MY_EN - - -SRC_EXTRACT_DIR=$SRCDIR/wat2020.my-en/alt - -cp $SRC_EXTRACT_DIR/train.alt.en $DESTDIR/train.my_MM-en_XX.en_XX -cp $SRC_EXTRACT_DIR/train.alt.my $DESTDIR/train.my_MM-en_XX.my_MM -cp $SRC_EXTRACT_DIR/dev.alt.en $DESTDIR/valid.my_MM-en_XX.en_XX -cp $SRC_EXTRACT_DIR/dev.alt.my $DESTDIR/valid.my_MM-en_XX.my_MM -cp $SRC_EXTRACT_DIR/test.alt.en $DESTDIR/test.my_MM-en_XX.en_XX -cp $SRC_EXTRACT_DIR/test.alt.my $DESTDIR/test.my_MM-en_XX.my_MM diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/textless_nlp/gslm/metrics/asr_metrics/misc/cut_as.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/textless_nlp/gslm/metrics/asr_metrics/misc/cut_as.py deleted file mode 100644 index 5b7e1e968564b84c47049c5cc69c9d6b8fafe0e9..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/textless_nlp/gslm/metrics/asr_metrics/misc/cut_as.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - - -import torchaudio -import argparse -import json -import pathlib - - -def get_args(): - parser = argparse.ArgumentParser( - "Assuring generated audio have the same length as ground-truth audio") - parser.add_argument('--samples_dir', required=True, type=str) - parser.add_argument('--out_dir', required=True, type=str) - parser.add_argument('--prompts_description', required=True, type=str) - return parser.parse_args() - - -def cut(src, tgt, l): - x, sr = torchaudio.load(str(src)) - assert sr == 16_000 - - x = x.squeeze() - target_frames = int(l * sr) - - flag = 0 - if target_frames <= x.size(0): - x = x[:target_frames] - flag = 1 - else: - flag = 0 - torchaudio.save(str(tgt), x.unsqueeze(0), sr) - return flag - - -def main(): - args = get_args() - tgt_dir = pathlib.Path(args.out_dir) - tgt_dir.mkdir(exist_ok=True, parents=True) - - total_files, sufficiently_long = 0, 0 - - with open(args.prompts_description, 'r') as f: - description = json.loads(f.read()) - - for src_f in pathlib.Path(args.samples_dir).glob('*.wav'): - name_prompt = src_f.with_suffix('').name.split('__')[0] - - assert name_prompt in description, f'Cannot find {name_prompt}!' - - target_length = description[name_prompt][0] - tgt_f = tgt_dir / (src_f.name) - - is_long_enough = cut(src_f, tgt_f, target_length) - sufficiently_long += is_long_enough - if not is_long_enough: - print(f'{src_f} is not long enough') - - total_files += 1 - - print( - f'Total files: {total_files}; sufficiently long: {sufficiently_long}') - - -if __name__ == '__main__': - main() diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/criterions/model_criterion.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/criterions/model_criterion.py deleted file mode 100644 index 30350f13b1c00498de6784579250d6b342ced7dd..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/criterions/model_criterion.py +++ /dev/null @@ -1,138 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging -from dataclasses import dataclass, field -from typing import Dict, List - -from fairseq import metrics, utils -from fairseq.criterions import FairseqCriterion, register_criterion -from fairseq.dataclass import FairseqDataclass - - -logger = logging.getLogger(__name__) - - -@dataclass -class ModelCriterionConfig(FairseqDataclass): - loss_weights: Dict[str, float] = field( - default_factory=dict, - metadata={"help": "weights for the loss terms"}, - ) - log_keys: List[str] = field( - default_factory=list, - metadata={"help": "additional output keys to log"}, - ) - - -@register_criterion("model", dataclass=ModelCriterionConfig) -class ModelCriterion(FairseqCriterion): - """ - This criterion relies on the model to supply losses. - The losses should be a dictionary of name -> scalar returned by - the model either by including it in the net_output dict or by - implementing a get_losses(net_output, sample) method. The final loss is - a scaled sum of all losses according to weights in loss_weights. - If no weights are provided, then all losses are scaled by 1.0. - - The losses will be automatically logged. Additional keys from - net_output dict can be logged via the log_keys parameter. - """ - - def __init__(self, task, loss_weights=None, log_keys=None): - super().__init__(task) - self.loss_weights = loss_weights - self.log_keys = log_keys - - def forward(self, model, sample, reduce=True): - net_output = model(**sample["net_input"]) - - sample_size = net_output["sample_size"] - scaled_losses = {} - - if hasattr(model, "get_losses"): - losses = model.get_losses(net_output, sample) - elif isinstance(net_output, dict) and "losses" in net_output: - losses = net_output["losses"] - else: - raise Exception("Could not retrieve losses") - - for lk, p in losses.items(): - try: - coef = 1.0 if len(self.loss_weights) == 0 else self.loss_weights[lk] - except KeyError: - logger.error( - f"weight for loss {lk} is not in loss_weights ({self.loss_weights})" - ) - raise - if coef != 0 and p is not None: - scaled_losses[lk] = coef * p.float() - - loss = sum(scaled_losses.values()) - if reduce and loss.numel() > 1: - loss = loss.sum() - - logging_output = { - "loss": loss.data, - "ntokens": sample_size, - "nsentences": sample["id"].numel(), - "sample_size": sample_size, - "_world_size": 1, - } - - for lk in self.log_keys: - if lk in net_output and net_output[lk] is not None: - logging_output[lk] = float(net_output[lk]) - - if len(scaled_losses) > 1: - for lk, l in scaled_losses.items(): - logging_output[f"loss_{lk}"] = l.item() - - return loss, sample_size, logging_output - - @staticmethod - def reduce_metrics(logging_outputs) -> None: - """Aggregate logging outputs from data parallel training.""" - loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs)) - ntokens = utils.item(sum(log.get("ntokens", 0) for log in logging_outputs)) - nsentences = utils.item( - sum(log.get("nsentences", 0) for log in logging_outputs) - ) - sample_size = utils.item( - sum(log.get("sample_size", 0) for log in logging_outputs) - ) - - metrics.log_scalar("loss", loss_sum / sample_size, sample_size, round=3) - metrics.log_scalar("ntokens", ntokens) - metrics.log_scalar("nsentences", nsentences) - - builtin_keys = { - "loss", - "ntokens", - "nsentences", - "sample_size", - "_world_size", - } - - world_size = utils.item( - sum(log.get("_world_size", 0) for log in logging_outputs) - ) - - for k in logging_outputs[0]: - if k not in builtin_keys: - val = sum(log.get(k, 0) for log in logging_outputs) - if k.startswith("loss_"): - metrics.log_scalar(k, val / sample_size, sample_size, round=3) - else: - metrics.log_scalar(k, val / world_size, round=3) - - @staticmethod - def logging_outputs_can_be_summed() -> bool: - """ - Whether the logging outputs returned by `forward` can be summed - across workers prior to calling `reduce_metrics`. Setting this - to True will improves distributed training speed. - """ - return True diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/laser/laser_src/__init__.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/laser/laser_src/__init__.py deleted file mode 100644 index 9ffbd656d8786e421008fb4cb0d1d8911dc8330c..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/laser/laser_src/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from .laser_task import * # noqa -from .laser_lstm import * # noqa -from .laser_transformer import * # noqa diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/m2m_100/tokenizers/tokenize_indic.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/m2m_100/tokenizers/tokenize_indic.py deleted file mode 100644 index a44fad07f7c718f99cccd445f33c62b0e3c562f4..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/m2m_100/tokenizers/tokenize_indic.py +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -# Use: echo {text} | python tokenize_indic.py {language} - -import sys - -from indicnlp.normalize.indic_normalize import IndicNormalizerFactory -from indicnlp.tokenize.indic_tokenize import trivial_tokenize - - -factory = IndicNormalizerFactory() -normalizer = factory.get_normalizer( - sys.argv[1], remove_nuktas=False, nasals_mode="do_nothing" -) - -for line in sys.stdin: - normalized_line = normalizer.normalize(line.strip()) - tokenized_line = " ".join(trivial_tokenize(normalized_line, sys.argv[1])) - print(tokenized_line) diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/speech_recognition/data/asr_dataset.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/speech_recognition/data/asr_dataset.py deleted file mode 100644 index 63a6fcac85d73b1fce8e4d044b4209b1b67fa8ce..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/speech_recognition/data/asr_dataset.py +++ /dev/null @@ -1,122 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import os - -import numpy as np -from fairseq.data import FairseqDataset - -from . import data_utils -from .collaters import Seq2SeqCollater - - -class AsrDataset(FairseqDataset): - """ - A dataset representing speech and corresponding transcription. - - Args: - aud_paths: (List[str]): A list of str with paths to audio files. - aud_durations_ms (List[int]): A list of int containing the durations of - audio files. - tgt (List[torch.LongTensor]): A list of LongTensors containing the indices - of target transcriptions. - tgt_dict (~fairseq.data.Dictionary): target vocabulary. - ids (List[str]): A list of utterance IDs. - speakers (List[str]): A list of speakers corresponding to utterances. - num_mel_bins (int): Number of triangular mel-frequency bins (default: 80) - frame_length (float): Frame length in milliseconds (default: 25.0) - frame_shift (float): Frame shift in milliseconds (default: 10.0) - """ - - def __init__( - self, - aud_paths, - aud_durations_ms, - tgt, - tgt_dict, - ids, - speakers, - num_mel_bins=80, - frame_length=25.0, - frame_shift=10.0, - ): - assert frame_length > 0 - assert frame_shift > 0 - assert all(x > frame_length for x in aud_durations_ms) - self.frame_sizes = [ - int(1 + (d - frame_length) / frame_shift) for d in aud_durations_ms - ] - - assert len(aud_paths) > 0 - assert len(aud_paths) == len(aud_durations_ms) - assert len(aud_paths) == len(tgt) - assert len(aud_paths) == len(ids) - assert len(aud_paths) == len(speakers) - self.aud_paths = aud_paths - self.tgt_dict = tgt_dict - self.tgt = tgt - self.ids = ids - self.speakers = speakers - self.num_mel_bins = num_mel_bins - self.frame_length = frame_length - self.frame_shift = frame_shift - - self.s2s_collater = Seq2SeqCollater( - 0, - 1, - pad_index=self.tgt_dict.pad(), - eos_index=self.tgt_dict.eos(), - move_eos_to_beginning=True, - ) - - def __getitem__(self, index): - import torchaudio - import torchaudio.compliance.kaldi as kaldi - - tgt_item = self.tgt[index] if self.tgt is not None else None - - path = self.aud_paths[index] - if not os.path.exists(path): - raise FileNotFoundError("Audio file not found: {}".format(path)) - sound, sample_rate = torchaudio.load_wav(path) - output = kaldi.fbank( - sound, - num_mel_bins=self.num_mel_bins, - frame_length=self.frame_length, - frame_shift=self.frame_shift, - ) - output_cmvn = data_utils.apply_mv_norm(output) - - return {"id": index, "data": [output_cmvn.detach(), tgt_item]} - - def __len__(self): - return len(self.aud_paths) - - def collater(self, samples): - """Merge a list of samples to form a mini-batch. - - Args: - samples (List[int]): sample indices to collate - - Returns: - dict: a mini-batch suitable for forwarding with a Model - """ - return self.s2s_collater.collate(samples) - - def num_tokens(self, index): - return self.frame_sizes[index] - - def size(self, index): - """Return an example's size as a float or tuple. This value is used when - filtering a dataset with ``--max-positions``.""" - return ( - self.frame_sizes[index], - len(self.tgt[index]) if self.tgt is not None else 0, - ) - - def ordered_indices(self): - """Return an ordered list of indices. Batches will be constructed based - on this order.""" - return np.arange(len(self)) diff --git a/spaces/OdiaGenAI/Olive_Farm/Roleplay/user_roles.py b/spaces/OdiaGenAI/Olive_Farm/Roleplay/user_roles.py deleted file mode 100644 index cc56535384301a184f516aae7e3bc3364ce2cd85..0000000000000000000000000000000000000000 --- a/spaces/OdiaGenAI/Olive_Farm/Roleplay/user_roles.py +++ /dev/null @@ -1,10 +0,0 @@ -user_role_list = ['AI Researcher', 'Accountant', 'Actor', 'Actress', 'Archaeologist', 'Archivist', 'Architect', 'Artist', 'Astronomer', - 'Athlete', 'Author', 'Barista', 'Bartender', 'Biologist', 'Biomedical Engineer', 'Blogger', 'CEO', 'Carpenter', 'Cartoonist', - 'Chef', 'Chauffeur', 'Counselor', 'Customer', 'DJ', 'Dancer', 'Data Analyst', 'Dentist', 'Designer', 'Director', 'Doctor', 'Electrician', - 'Engineer', 'Entrepreneur', 'Environmentalist', 'Ethical Hacker', 'Event Planner', 'Explorer', 'Farmer', 'Fashion Model', 'Financial Analyst', - 'Firefighter', 'Fitness Coach', 'Florist', 'Gamer', 'Geologist', 'Graphic Designer', 'Hair Stylist', 'Historian', 'Housekeeper', 'Illustrator', - 'Lawyer', 'Librarian', 'Magician', 'Magistrate', 'Makeup Artist', 'Manager', 'Mechanic', 'Meteorologist', 'Musician', 'News Reporter', 'Novelist', - 'Nurse', 'Nutritionist', 'Parent', 'Personal Trainer', 'Pharmacist', 'Philanthropist', 'Philosopher', 'Photographer', 'Pilot', 'Plumber', 'Poet', - 'Police Officer', 'Producer', 'Psychologist', 'Real Estate Agent', 'Receptionist', 'Researcher', 'Salesperson', 'Scientist', 'Superintendent', 'Security guard', - 'Social Media Influencer', 'Social Worker', 'Software Engineer', 'Sommelier', 'Student', 'Taxi Driver', 'Teacher', 'Therapist', 'Tour Guide', 'Translator', - 'Traveler', 'Veterinarian', 'Waiter', 'Web Developer', 'Wedding Planner', 'Writer', 'Yoga Instructor', 'Zoologist'] diff --git a/spaces/OpenGVLab/DragGAN/stylegan2/op/fused_bias_act.cpp b/spaces/OpenGVLab/DragGAN/stylegan2/op/fused_bias_act.cpp deleted file mode 100644 index 71f612cdbaaca03822eedc002a980d055d2f485c..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/DragGAN/stylegan2/op/fused_bias_act.cpp +++ /dev/null @@ -1,32 +0,0 @@ - -#include -#include - -torch::Tensor fused_bias_act_op(const torch::Tensor &input, - const torch::Tensor &bias, - const torch::Tensor &refer, int act, int grad, - float alpha, float scale); - -#define CHECK_CUDA(x) \ - TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor") -#define CHECK_CONTIGUOUS(x) \ - TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") -#define CHECK_INPUT(x) \ - CHECK_CUDA(x); \ - CHECK_CONTIGUOUS(x) - -torch::Tensor fused_bias_act(const torch::Tensor &input, - const torch::Tensor &bias, - const torch::Tensor &refer, int act, int grad, - float alpha, float scale) { - CHECK_INPUT(input); - CHECK_INPUT(bias); - - at::DeviceGuard guard(input.device()); - - return fused_bias_act_op(input, bias, refer, act, grad, alpha, scale); -} - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { - m.def("fused_bias_act", &fused_bias_act, "fused bias act (CUDA)"); -} \ No newline at end of file diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/modeling/roi_heads/cascade_rcnn.py b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/modeling/roi_heads/cascade_rcnn.py deleted file mode 100644 index a0ca70fe23a1d406ee9bed6204a987d7e0708b91..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/modeling/roi_heads/cascade_rcnn.py +++ /dev/null @@ -1,299 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -from typing import List -import torch -from torch import nn -from torch.autograd.function import Function - -from detectron2.config import configurable -from detectron2.layers import ShapeSpec -from detectron2.structures import Boxes, Instances, pairwise_iou -from detectron2.utils.events import get_event_storage - -from ..box_regression import Box2BoxTransform -from ..matcher import Matcher -from ..poolers import ROIPooler -from .box_head import build_box_head -from .fast_rcnn import FastRCNNOutputLayers, fast_rcnn_inference -from .roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads - - -class _ScaleGradient(Function): - @staticmethod - def forward(ctx, input, scale): - ctx.scale = scale - return input - - @staticmethod - def backward(ctx, grad_output): - return grad_output * ctx.scale, None - - -@ROI_HEADS_REGISTRY.register() -class CascadeROIHeads(StandardROIHeads): - """ - The ROI heads that implement :paper:`Cascade R-CNN`. - """ - - @configurable - def __init__( - self, - *, - box_in_features: List[str], - box_pooler: ROIPooler, - box_heads: List[nn.Module], - box_predictors: List[nn.Module], - proposal_matchers: List[Matcher], - **kwargs, - ): - """ - NOTE: this interface is experimental. - - Args: - box_pooler (ROIPooler): pooler that extracts region features from given boxes - box_heads (list[nn.Module]): box head for each cascade stage - box_predictors (list[nn.Module]): box predictor for each cascade stage - proposal_matchers (list[Matcher]): matcher with different IoU thresholds to - match boxes with ground truth for each stage. The first matcher matches - RPN proposals with ground truth, the other matchers use boxes predicted - by the previous stage as proposals and match them with ground truth. - """ - assert "proposal_matcher" not in kwargs, ( - "CascadeROIHeads takes 'proposal_matchers=' for each stage instead " - "of one 'proposal_matcher='." - ) - # The first matcher matches RPN proposals with ground truth, done in the base class - kwargs["proposal_matcher"] = proposal_matchers[0] - num_stages = self.num_cascade_stages = len(box_heads) - box_heads = nn.ModuleList(box_heads) - box_predictors = nn.ModuleList(box_predictors) - assert len(box_predictors) == num_stages, f"{len(box_predictors)} != {num_stages}!" - assert len(proposal_matchers) == num_stages, f"{len(proposal_matchers)} != {num_stages}!" - super().__init__( - box_in_features=box_in_features, - box_pooler=box_pooler, - box_head=box_heads, - box_predictor=box_predictors, - **kwargs, - ) - self.proposal_matchers = proposal_matchers - - @classmethod - def from_config(cls, cfg, input_shape): - ret = super().from_config(cfg, input_shape) - ret.pop("proposal_matcher") - return ret - - @classmethod - def _init_box_head(cls, cfg, input_shape): - # fmt: off - in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES - pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION - pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features) - sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO - pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE - cascade_bbox_reg_weights = cfg.MODEL.ROI_BOX_CASCADE_HEAD.BBOX_REG_WEIGHTS - cascade_ious = cfg.MODEL.ROI_BOX_CASCADE_HEAD.IOUS - assert len(cascade_bbox_reg_weights) == len(cascade_ious) - assert cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG, \ - "CascadeROIHeads only support class-agnostic regression now!" - assert cascade_ious[0] == cfg.MODEL.ROI_HEADS.IOU_THRESHOLDS[0] - # fmt: on - - in_channels = [input_shape[f].channels for f in in_features] - # Check all channel counts are equal - assert len(set(in_channels)) == 1, in_channels - in_channels = in_channels[0] - - box_pooler = ROIPooler( - output_size=pooler_resolution, - scales=pooler_scales, - sampling_ratio=sampling_ratio, - pooler_type=pooler_type, - ) - pooled_shape = ShapeSpec( - channels=in_channels, width=pooler_resolution, height=pooler_resolution - ) - - box_heads, box_predictors, proposal_matchers = [], [], [] - for match_iou, bbox_reg_weights in zip(cascade_ious, cascade_bbox_reg_weights): - box_head = build_box_head(cfg, pooled_shape) - box_heads.append(box_head) - box_predictors.append( - FastRCNNOutputLayers( - cfg, - box_head.output_shape, - box2box_transform=Box2BoxTransform(weights=bbox_reg_weights), - ) - ) - proposal_matchers.append(Matcher([match_iou], [0, 1], allow_low_quality_matches=False)) - return { - "box_in_features": in_features, - "box_pooler": box_pooler, - "box_heads": box_heads, - "box_predictors": box_predictors, - "proposal_matchers": proposal_matchers, - } - - def forward(self, images, features, proposals, targets=None): - del images - if self.training: - proposals = self.label_and_sample_proposals(proposals, targets) - - if self.training: - # Need targets to box head - losses = self._forward_box(features, proposals, targets) - losses.update(self._forward_mask(features, proposals)) - losses.update(self._forward_keypoint(features, proposals)) - return proposals, losses - else: - pred_instances = self._forward_box(features, proposals) - pred_instances = self.forward_with_given_boxes(features, pred_instances) - return pred_instances, {} - - def _forward_box(self, features, proposals, targets=None): - """ - Args: - features, targets: the same as in - Same as in :meth:`ROIHeads.forward`. - proposals (list[Instances]): the per-image object proposals with - their matching ground truth. - Each has fields "proposal_boxes", and "objectness_logits", - "gt_classes", "gt_boxes". - """ - features = [features[f] for f in self.box_in_features] - head_outputs = [] # (predictor, predictions, proposals) - prev_pred_boxes = None - image_sizes = [x.image_size for x in proposals] - for k in range(self.num_cascade_stages): - if k > 0: - # The output boxes of the previous stage are used to create the input - # proposals of the next stage. - proposals = self._create_proposals_from_boxes(prev_pred_boxes, image_sizes) - if self.training: - proposals = self._match_and_label_boxes(proposals, k, targets) - predictions = self._run_stage(features, proposals, k) - prev_pred_boxes = self.box_predictor[k].predict_boxes(predictions, proposals) - head_outputs.append((self.box_predictor[k], predictions, proposals)) - - if self.training: - losses = {} - storage = get_event_storage() - for stage, (predictor, predictions, proposals) in enumerate(head_outputs): - with storage.name_scope("stage{}".format(stage)): - stage_losses = predictor.losses(predictions, proposals) - losses.update({k + "_stage{}".format(stage): v for k, v in stage_losses.items()}) - return losses - else: - # Each is a list[Tensor] of length #image. Each tensor is Ri x (K+1) - scores_per_stage = [h[0].predict_probs(h[1], h[2]) for h in head_outputs] - - # Average the scores across heads - scores = [ - sum(list(scores_per_image)) * (1.0 / self.num_cascade_stages) - for scores_per_image in zip(*scores_per_stage) - ] - # Use the boxes of the last head - predictor, predictions, proposals = head_outputs[-1] - boxes = predictor.predict_boxes(predictions, proposals) - pred_instances, _ = fast_rcnn_inference( - boxes, - scores, - image_sizes, - predictor.test_score_thresh, - predictor.test_nms_thresh, - predictor.test_topk_per_image, - ) - return pred_instances - - @torch.no_grad() - def _match_and_label_boxes(self, proposals, stage, targets): - """ - Match proposals with groundtruth using the matcher at the given stage. - Label the proposals as foreground or background based on the match. - - Args: - proposals (list[Instances]): One Instances for each image, with - the field "proposal_boxes". - stage (int): the current stage - targets (list[Instances]): the ground truth instances - - Returns: - list[Instances]: the same proposals, but with fields "gt_classes" and "gt_boxes" - """ - num_fg_samples, num_bg_samples = [], [] - for proposals_per_image, targets_per_image in zip(proposals, targets): - match_quality_matrix = pairwise_iou( - targets_per_image.gt_boxes, proposals_per_image.proposal_boxes - ) - # proposal_labels are 0 or 1 - matched_idxs, proposal_labels = self.proposal_matchers[stage](match_quality_matrix) - if len(targets_per_image) > 0: - gt_classes = targets_per_image.gt_classes[matched_idxs] - # Label unmatched proposals (0 label from matcher) as background (label=num_classes) - gt_classes[proposal_labels == 0] = self.num_classes - gt_boxes = targets_per_image.gt_boxes[matched_idxs] - else: - gt_classes = torch.zeros_like(matched_idxs) + self.num_classes - gt_boxes = Boxes( - targets_per_image.gt_boxes.tensor.new_zeros((len(proposals_per_image), 4)) - ) - proposals_per_image.gt_classes = gt_classes - proposals_per_image.gt_boxes = gt_boxes - - num_fg_samples.append((proposal_labels == 1).sum().item()) - num_bg_samples.append(proposal_labels.numel() - num_fg_samples[-1]) - - # Log the number of fg/bg samples in each stage - storage = get_event_storage() - storage.put_scalar( - "stage{}/roi_head/num_fg_samples".format(stage), - sum(num_fg_samples) / len(num_fg_samples), - ) - storage.put_scalar( - "stage{}/roi_head/num_bg_samples".format(stage), - sum(num_bg_samples) / len(num_bg_samples), - ) - return proposals - - def _run_stage(self, features, proposals, stage): - """ - Args: - features (list[Tensor]): #lvl input features to ROIHeads - proposals (list[Instances]): #image Instances, with the field "proposal_boxes" - stage (int): the current stage - - Returns: - Same output as `FastRCNNOutputLayers.forward()`. - """ - box_features = self.box_pooler(features, [x.proposal_boxes for x in proposals]) - # The original implementation averages the losses among heads, - # but scale up the parameter gradients of the heads. - # This is equivalent to adding the losses among heads, - # but scale down the gradients on features. - if self.training: - box_features = _ScaleGradient.apply(box_features, 1.0 / self.num_cascade_stages) - box_features = self.box_head[stage](box_features) - return self.box_predictor[stage](box_features) - - def _create_proposals_from_boxes(self, boxes, image_sizes): - """ - Args: - boxes (list[Tensor]): per-image predicted boxes, each of shape Ri x 4 - image_sizes (list[tuple]): list of image shapes in (h, w) - - Returns: - list[Instances]: per-image proposals with the given boxes. - """ - # Just like RPN, the proposals should not have gradients - boxes = [Boxes(b.detach()) for b in boxes] - proposals = [] - for boxes_per_image, image_size in zip(boxes, image_sizes): - boxes_per_image.clip(image_size) - if self.training: - # do not filter empty boxes at inference time, - # because the scores from each stage need to be aligned and added later - boxes_per_image = boxes_per_image[boxes_per_image.nonempty()] - prop = Instances(image_size) - prop.proposal_boxes = boxes_per_image - proposals.append(prop) - return proposals diff --git a/spaces/OpenGVLab/InternGPT/third-party/lama/bin/saicinpainting/evaluation/__init__.py b/spaces/OpenGVLab/InternGPT/third-party/lama/bin/saicinpainting/evaluation/__init__.py deleted file mode 100644 index e9c8117565b252ca069a808b31b8c52aaddd2289..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/third-party/lama/bin/saicinpainting/evaluation/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -import logging - -import torch - -from saicinpainting.evaluation.evaluator import InpaintingEvaluatorOnline, ssim_fid100_f1, lpips_fid100_f1 -from saicinpainting.evaluation.losses.base_loss import SSIMScore, LPIPSScore, FIDScore - - -def make_evaluator(kind='default', ssim=True, lpips=True, fid=True, integral_kind=None, **kwargs): - logging.info(f'Make evaluator {kind}') - device = "cuda" if torch.cuda.is_available() else "cpu" - metrics = {} - if ssim: - metrics['ssim'] = SSIMScore() - if lpips: - metrics['lpips'] = LPIPSScore() - if fid: - metrics['fid'] = FIDScore().to(device) - - if integral_kind is None: - integral_func = None - elif integral_kind == 'ssim_fid100_f1': - integral_func = ssim_fid100_f1 - elif integral_kind == 'lpips_fid100_f1': - integral_func = lpips_fid100_f1 - else: - raise ValueError(f'Unexpected integral_kind={integral_kind}') - - if kind == 'default': - return InpaintingEvaluatorOnline(scores=metrics, - integral_func=integral_func, - integral_title=integral_kind, - **kwargs) diff --git a/spaces/OpenMotionLab/MotionGPT/mGPT/data/transforms/rots2joints/smplx.py b/spaces/OpenMotionLab/MotionGPT/mGPT/data/transforms/rots2joints/smplx.py deleted file mode 100644 index 107eb57735a0344bb0d32a341310f0b6c0e6035b..0000000000000000000000000000000000000000 --- a/spaces/OpenMotionLab/MotionGPT/mGPT/data/transforms/rots2joints/smplx.py +++ /dev/null @@ -1,201 +0,0 @@ -# -*- coding: utf-8 -*- - -# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is -# holder of all proprietary rights on this computer program. -# You can only use this computer program if you have closed -# a license agreement with MPG or you get the right to use the computer -# program from someone who is authorized to grant you that right. -# Any use of the computer program without a valid license is prohibited and -# liable to prosecution. -# -# Copyright©2020 Max-Planck-Gesellschaft zur Förderung -# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute -# for Intelligent Systems. All rights reserved. -# -# Contact: ps-license@tuebingen.mpg.de - -import contextlib -from typing import Optional -import torch -from torch import Tensor -from mGPT.utils.joints import smplh_to_mmm_scaling_factor, smplh2mmm_indexes, get_root_idx -from mGPT.utils.easyconvert import rep_to_rep -from .base import Rots2Joints - - -def slice_or_none(data, cslice): - if data is None: - return data - else: - return data[cslice] - - -class SMPLX(Rots2Joints): - def __init__(self, - path: str, - jointstype: str = "mmm", - input_pose_rep: str = "matrix", - batch_size: int = 512, - gender="neutral", - **kwargs) -> None: - super().__init__(path=None, normalization=False) - self.batch_size = batch_size - self.input_pose_rep = input_pose_rep - self.jointstype = jointstype - self.training = False - - from smplx.body_models import SMPLXLayer - import os - # rel_p = path.split('/') - # rel_p = rel_p[rel_p.index('data'):] - # rel_p = '/'.join(rel_p) - - # Remove annoying print - with contextlib.redirect_stdout(None): - self.smplx = SMPLXLayer(path, - ext="npz", - gender=gender, - batch_size=batch_size).eval() - - self.faces = self.smplx.faces - for p in self.parameters(): - p.requires_grad = False - - def train(self, *args, **kwargs): - return self - - def forward(self, - smpl_data: dict, - jointstype: Optional[str] = None, - input_pose_rep: Optional[str] = None, - batch_size: Optional[int] = None) -> Tensor: - - # Take values from init if not specified there - jointstype = self.jointstype if jointstype is None else jointstype - batch_size = self.batch_size if batch_size is None else batch_size - input_pose_rep = self.input_pose_rep if input_pose_rep is None else input_pose_rep - - poses = smpl_data.rots - trans = smpl_data.trans - - from functools import reduce - import operator - save_shape_bs_len = poses.shape[:-3] - nposes = reduce(operator.mul, save_shape_bs_len, 1) - - - matrix_poses = rep_to_rep(self.input_pose_rep, input_pose_rep, poses) - - # Reshaping - matrix_poses = matrix_poses.reshape((nposes, *matrix_poses.shape[-3:])) - - global_orient = matrix_poses[:, 0] - - if trans is None: - trans = torch.zeros((*save_shape_bs_len, 3), - dtype=poses.dtype, - device=poses.device) - - trans_all = trans.reshape((nposes, *trans.shape[-1:])) - - body_pose = matrix_poses[:, 1:22] - - if poses.shape[-3] == 55: - nohands = False - nofaces = False - elif poses.shape[-3] == 52: - nohands = False - nofaces = True - elif poses.shape[-3] == 22: - nohands = True - nofaces = True - else: - raise NotImplementedError("Could not parse the poses.") - - if nohands: - left_hand_pose = None - right_hand_pose = None - else: - left_hand_pose = matrix_poses[:, 25:40] - right_hand_pose = matrix_poses[:, 40:55] - - if nofaces: - jaw_pose = None - leye_pose = None - reye_pose = None - else: - jaw_pose = matrix_poses[:, 22:23] - leye_pose = matrix_poses[:, 23:24] - reye_pose = matrix_poses[:, 24:25] - - n = len(body_pose) - outputs = [] - for chunk in range(int((n - 1) / batch_size) + 1): - chunk_slice = slice(chunk * batch_size, (chunk + 1) * batch_size) - smpl_output = self.smplx( - global_orient=slice_or_none(global_orient, chunk_slice), - body_pose=slice_or_none(body_pose, chunk_slice), - left_hand_pose=slice_or_none(left_hand_pose, chunk_slice), - right_hand_pose=slice_or_none(right_hand_pose, chunk_slice), - jaw_pose=slice_or_none(jaw_pose, chunk_slice), - leye_pose=slice_or_none(leye_pose, chunk_slice), - reye_pose=slice_or_none(reye_pose, chunk_slice), - transl=slice_or_none(trans_all, chunk_slice)) - - if jointstype == "vertices": - output_chunk = smpl_output.vertices - else: - joints = smpl_output.joints - output_chunk = joints - - outputs.append(output_chunk) - - outputs = torch.cat(outputs) - outputs = outputs.reshape((*save_shape_bs_len, *outputs.shape[1:])) - - # Change topology if needed - outputs = smplx_to(jointstype, outputs, trans) - - return outputs - - def inverse(self, joints: Tensor) -> Tensor: - raise NotImplementedError("Cannot inverse SMPLX layer.") - - -def smplx_to(jointstype, data, trans): - - if "mmm" in jointstype: - indexes = smplh2mmm_indexes - data = data[..., indexes, :] - - # make it compatible with mmm - if jointstype == "mmm": - data *= smplh_to_mmm_scaling_factor - - if jointstype == "smplmmm": - pass - elif jointstype in ["mmm", "mmmns"]: - # swap axis - data = data[..., [1, 2, 0]] - # revert left and right - data[..., 2] = -data[..., 2] - - elif jointstype == "smplnh": - from mGPT.utils.joints import smplh2smplnh_indexes - indexes = smplh2smplnh_indexes - data = data[..., indexes, :] - elif jointstype == "smplh": - pass - elif jointstype == "vertices": - pass - else: - raise NotImplementedError(f"SMPLX to {jointstype} is not implemented.") - - if jointstype != "vertices": - # shift the output in each batch - # such that it is centered on the pelvis/root on the first frame - root_joint_idx = get_root_idx(jointstype) - shift = trans[..., 0, :] - data[..., 0, root_joint_idx, :] - data += shift[..., None, None, :] - - return data diff --git a/spaces/OpenMotionLab/MotionGPT/pyrender/pyrender/platforms/egl.py b/spaces/OpenMotionLab/MotionGPT/pyrender/pyrender/platforms/egl.py deleted file mode 100644 index ae2478d29c9a538c53ad83fa31f8e2277cd897c8..0000000000000000000000000000000000000000 --- a/spaces/OpenMotionLab/MotionGPT/pyrender/pyrender/platforms/egl.py +++ /dev/null @@ -1,219 +0,0 @@ -import ctypes -import os - -import OpenGL.platform - -from .base import Platform - -EGL_PLATFORM_DEVICE_EXT = 0x313F -EGL_DRM_DEVICE_FILE_EXT = 0x3233 - - -def _ensure_egl_loaded(): - plugin = OpenGL.platform.PlatformPlugin.by_name('egl') - if plugin is None: - raise RuntimeError("EGL platform plugin is not available.") - - plugin_class = plugin.load() - plugin.loaded = True - # create instance of this platform implementation - plugin = plugin_class() - - plugin.install(vars(OpenGL.platform)) - - -_ensure_egl_loaded() -from OpenGL import EGL as egl - - -def _get_egl_func(func_name, res_type, *arg_types): - address = egl.eglGetProcAddress(func_name) - if address is None: - return None - - proto = ctypes.CFUNCTYPE(res_type) - proto.argtypes = arg_types - func = proto(address) - return func - - -def _get_egl_struct(struct_name): - from OpenGL._opaque import opaque_pointer_cls - return opaque_pointer_cls(struct_name) - - -# These are not defined in PyOpenGL by default. -_EGLDeviceEXT = _get_egl_struct('EGLDeviceEXT') -_eglGetPlatformDisplayEXT = _get_egl_func('eglGetPlatformDisplayEXT', egl.EGLDisplay) -_eglQueryDevicesEXT = _get_egl_func('eglQueryDevicesEXT', egl.EGLBoolean) -_eglQueryDeviceStringEXT = _get_egl_func('eglQueryDeviceStringEXT', ctypes.c_char_p) - - -def query_devices(): - if _eglQueryDevicesEXT is None: - raise RuntimeError("EGL query extension is not loaded or is not supported.") - - num_devices = egl.EGLint() - success = _eglQueryDevicesEXT(0, None, ctypes.pointer(num_devices)) - if not success or num_devices.value < 1: - return [] - - devices = (_EGLDeviceEXT * num_devices.value)() # array of size num_devices - success = _eglQueryDevicesEXT(num_devices.value, devices, ctypes.pointer(num_devices)) - if not success or num_devices.value < 1: - return [] - - return [EGLDevice(devices[i]) for i in range(num_devices.value)] - - -def get_default_device(): - # Fall back to not using query extension. - if _eglQueryDevicesEXT is None: - return EGLDevice(None) - - return query_devices()[0] - - -def get_device_by_index(device_id): - if _eglQueryDevicesEXT is None and device_id == 0: - return get_default_device() - - devices = query_devices() - if device_id >= len(devices): - raise ValueError('Invalid device ID ({})'.format(device_id, len(devices))) - return devices[device_id] - - -class EGLDevice: - - def __init__(self, display=None): - self._display = display - - def get_display(self): - if self._display is None: - return egl.eglGetDisplay(egl.EGL_DEFAULT_DISPLAY) - - return _eglGetPlatformDisplayEXT(EGL_PLATFORM_DEVICE_EXT, self._display, None) - - @property - def name(self): - if self._display is None: - return 'default' - - name = _eglQueryDeviceStringEXT(self._display, EGL_DRM_DEVICE_FILE_EXT) - if name is None: - return None - - return name.decode('ascii') - - def __repr__(self): - return "".format(self.name) - - -class EGLPlatform(Platform): - """Renders using EGL. - """ - - def __init__(self, viewport_width, viewport_height, device: EGLDevice = None): - super(EGLPlatform, self).__init__(viewport_width, viewport_height) - if device is None: - device = get_default_device() - - self._egl_device = device - self._egl_display = None - self._egl_context = None - - def init_context(self): - _ensure_egl_loaded() - - from OpenGL.EGL import ( - EGL_SURFACE_TYPE, EGL_PBUFFER_BIT, EGL_BLUE_SIZE, - EGL_RED_SIZE, EGL_GREEN_SIZE, EGL_DEPTH_SIZE, - EGL_COLOR_BUFFER_TYPE, EGL_RGB_BUFFER, - EGL_RENDERABLE_TYPE, EGL_OPENGL_BIT, EGL_CONFORMANT, - EGL_NONE, EGL_DEFAULT_DISPLAY, EGL_NO_CONTEXT, - EGL_OPENGL_API, EGL_CONTEXT_MAJOR_VERSION, - EGL_CONTEXT_MINOR_VERSION, - EGL_CONTEXT_OPENGL_PROFILE_MASK, - EGL_CONTEXT_OPENGL_CORE_PROFILE_BIT, - eglGetDisplay, eglInitialize, eglChooseConfig, - eglBindAPI, eglCreateContext, EGLConfig - ) - from OpenGL import arrays - - config_attributes = arrays.GLintArray.asArray([ - EGL_SURFACE_TYPE, EGL_PBUFFER_BIT, - EGL_BLUE_SIZE, 8, - EGL_RED_SIZE, 8, - EGL_GREEN_SIZE, 8, - EGL_DEPTH_SIZE, 24, - EGL_COLOR_BUFFER_TYPE, EGL_RGB_BUFFER, - EGL_RENDERABLE_TYPE, EGL_OPENGL_BIT, - EGL_CONFORMANT, EGL_OPENGL_BIT, - EGL_NONE - ]) - context_attributes = arrays.GLintArray.asArray([ - EGL_CONTEXT_MAJOR_VERSION, 4, - EGL_CONTEXT_MINOR_VERSION, 1, - EGL_CONTEXT_OPENGL_PROFILE_MASK, - EGL_CONTEXT_OPENGL_CORE_PROFILE_BIT, - EGL_NONE - ]) - major, minor = ctypes.c_long(), ctypes.c_long() - num_configs = ctypes.c_long() - configs = (EGLConfig * 1)() - - # Cache DISPLAY if necessary and get an off-screen EGL display - orig_dpy = None - if 'DISPLAY' in os.environ: - orig_dpy = os.environ['DISPLAY'] - del os.environ['DISPLAY'] - - self._egl_display = self._egl_device.get_display() - if orig_dpy is not None: - os.environ['DISPLAY'] = orig_dpy - - # Initialize EGL - assert eglInitialize(self._egl_display, major, minor) - assert eglChooseConfig( - self._egl_display, config_attributes, configs, 1, num_configs - ) - - # Bind EGL to the OpenGL API - assert eglBindAPI(EGL_OPENGL_API) - - # Create an EGL context - self._egl_context = eglCreateContext( - self._egl_display, configs[0], - EGL_NO_CONTEXT, context_attributes - ) - - # Make it current - self.make_current() - - def make_current(self): - from OpenGL.EGL import eglMakeCurrent, EGL_NO_SURFACE - assert eglMakeCurrent( - self._egl_display, EGL_NO_SURFACE, EGL_NO_SURFACE, - self._egl_context - ) - - def make_uncurrent(self): - """Make the OpenGL context uncurrent. - """ - pass - - def delete_context(self): - from OpenGL.EGL import eglDestroyContext, eglTerminate - if self._egl_display is not None: - if self._egl_context is not None: - eglDestroyContext(self._egl_display, self._egl_context) - self._egl_context = None - eglTerminate(self._egl_display) - self._egl_display = None - - def supports_framebuffers(self): - return True - - -__all__ = ['EGLPlatform'] diff --git a/spaces/OpenMotionLab/MotionGPT/pyrender/tests/unit/test_egl.py b/spaces/OpenMotionLab/MotionGPT/pyrender/tests/unit/test_egl.py deleted file mode 100644 index e2f4bef39e33c2794e6837b5a1bb127d8d4dba06..0000000000000000000000000000000000000000 --- a/spaces/OpenMotionLab/MotionGPT/pyrender/tests/unit/test_egl.py +++ /dev/null @@ -1,16 +0,0 @@ -# from pyrender.platforms import egl - - -def tmp_test_default_device(): - egl.get_default_device() - - -def tmp_test_query_device(): - devices = egl.query_devices() - assert len(devices) > 0 - - -def tmp_test_init_context(): - device = egl.query_devices()[0] - platform = egl.EGLPlatform(128, 128, device=device) - platform.init_context() diff --git a/spaces/PKaushik/humandetect/yolov6/data/voc2yolo.py b/spaces/PKaushik/humandetect/yolov6/data/voc2yolo.py deleted file mode 100644 index 9019e1fcd23b66bc6afab9bb52a60349c79d71c8..0000000000000000000000000000000000000000 --- a/spaces/PKaushik/humandetect/yolov6/data/voc2yolo.py +++ /dev/null @@ -1,100 +0,0 @@ -import xml.etree.ElementTree as ET -from tqdm import tqdm -import os -import shutil -import argparse - -# VOC dataset (refer https://github.com/ultralytics/yolov5/blob/master/data/VOC.yaml) -# VOC2007 trainval: 446MB, 5012 images -# VOC2007 test: 438MB, 4953 images -# VOC2012 trainval: 1.95GB, 17126 images - -VOC_NAMES = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', - 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'] - - -def convert_label(path, lb_path, year, image_id): - def convert_box(size, box): - dw, dh = 1. / size[0], 1. / size[1] - x, y, w, h = (box[0] + box[1]) / 2.0 - 1, (box[2] + box[3]) / 2.0 - 1, box[1] - box[0], box[3] - box[2] - return x * dw, y * dh, w * dw, h * dh - in_file = open(os.path.join(path, f'VOC{year}/Annotations/{image_id}.xml')) - out_file = open(lb_path, 'w') - tree = ET.parse(in_file) - root = tree.getroot() - size = root.find('size') - w = int(size.find('width').text) - h = int(size.find('height').text) - for obj in root.iter('object'): - cls = obj.find('name').text - if cls in VOC_NAMES and not int(obj.find('difficult').text) == 1: - xmlbox = obj.find('bndbox') - bb = convert_box((w, h), [float(xmlbox.find(x).text) for x in ('xmin', 'xmax', 'ymin', 'ymax')]) - cls_id = VOC_NAMES.index(cls) # class id - out_file.write(" ".join([str(a) for a in (cls_id, *bb)]) + '\n') - - -def gen_voc07_12(voc_path): - ''' - Generate voc07+12 setting dataset: - train: # train images 16551 images - - images/train2012 - - images/train2007 - - images/val2012 - - images/val2007 - val: # val images (relative to 'path') 4952 images - - images/test2007 - ''' - dataset_root = os.path.join(voc_path, 'voc_07_12') - if not os.path.exists(dataset_root): - os.makedirs(dataset_root) - - dataset_settings = {'train': ['train2007', 'val2007', 'train2012', 'val2012'], 'val':['test2007']} - for item in ['images', 'labels']: - for data_type, data_list in dataset_settings.items(): - for data_name in data_list: - ori_path = os.path.join(voc_path, item, data_name) - new_path = os.path.join(dataset_root, item, data_type) - if not os.path.exists(new_path): - os.makedirs(new_path) - - print(f'[INFO]: Copying {ori_path} to {new_path}') - for file in os.listdir(ori_path): - shutil.copy(os.path.join(ori_path, file), new_path) - - -def main(args): - voc_path = args.voc_path - for year, image_set in ('2012', 'train'), ('2012', 'val'), ('2007', 'train'), ('2007', 'val'), ('2007', 'test'): - imgs_path = os.path.join(voc_path, 'images', f'{image_set}') - lbs_path = os.path.join(voc_path, 'labels', f'{image_set}') - - try: - with open(os.path.join(voc_path, f'VOC{year}/ImageSets/Main/{image_set}.txt'), 'r') as f: - image_ids = f.read().strip().split() - if not os.path.exists(imgs_path): - os.makedirs(imgs_path) - if not os.path.exists(lbs_path): - os.makedirs(lbs_path) - - for id in tqdm(image_ids, desc=f'{image_set}{year}'): - f = os.path.join(voc_path, f'VOC{year}/JPEGImages/{id}.jpg') # old img path - lb_path = os.path.join(lbs_path, f'{id}.txt') # new label path - convert_label(voc_path, lb_path, year, id) # convert labels to YOLO format - if os.path.exists(f): - shutil.move(f, imgs_path) # move image - except Exception as e: - print(f'[Warning]: {e} {year}{image_set} convert fail!') - - gen_voc07_12(voc_path) - - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--voc_path', default='VOCdevkit') - - args = parser.parse_args() - print(args) - - main(args) diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/ops/tin_shift.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/ops/tin_shift.py deleted file mode 100644 index 472c9fcfe45a124e819b7ed5653e585f94a8811e..0000000000000000000000000000000000000000 --- a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/ops/tin_shift.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -# Code reference from "Temporal Interlacing Network" -# https://github.com/deepcs233/TIN/blob/master/cuda_shift/rtc_wrap.py -# Hao Shao, Shengju Qian, Yu Liu -# shaoh19@mails.tsinghua.edu.cn, sjqian@cse.cuhk.edu.hk, yuliu@ee.cuhk.edu.hk - -import torch -import torch.nn as nn -from torch.autograd import Function - -from ..utils import ext_loader - -ext_module = ext_loader.load_ext('_ext', - ['tin_shift_forward', 'tin_shift_backward']) - - -class TINShiftFunction(Function): - - @staticmethod - def forward(ctx, input, shift): - C = input.size(2) - num_segments = shift.size(1) - if C // num_segments <= 0 or C % num_segments != 0: - raise ValueError('C should be a multiple of num_segments, ' - f'but got C={C} and num_segments={num_segments}.') - - ctx.save_for_backward(shift) - - out = torch.zeros_like(input) - ext_module.tin_shift_forward(input, shift, out) - - return out - - @staticmethod - def backward(ctx, grad_output): - - shift = ctx.saved_tensors[0] - data_grad_input = grad_output.new(*grad_output.size()).zero_() - shift_grad_input = shift.new(*shift.size()).zero_() - ext_module.tin_shift_backward(grad_output, shift, data_grad_input) - - return data_grad_input, shift_grad_input - - -tin_shift = TINShiftFunction.apply - - -class TINShift(nn.Module): - """Temporal Interlace Shift. - - Temporal Interlace shift is a differentiable temporal-wise frame shifting - which is proposed in "Temporal Interlacing Network" - - Please refer to https://arxiv.org/abs/2001.06499 for more details. - Code is modified from https://github.com/mit-han-lab/temporal-shift-module - """ - - def forward(self, input, shift): - """Perform temporal interlace shift. - - Args: - input (Tensor): Feature map with shape [N, num_segments, C, H * W]. - shift (Tensor): Shift tensor with shape [N, num_segments]. - - Returns: - Feature map after temporal interlace shift. - """ - return tin_shift(input, shift) diff --git a/spaces/Plachta/VITS-Umamusume-voice-synthesizer/ONNXVITS_to_onnx.py b/spaces/Plachta/VITS-Umamusume-voice-synthesizer/ONNXVITS_to_onnx.py deleted file mode 100644 index 846e39849535ed08accb10d7001f2431a851d372..0000000000000000000000000000000000000000 --- a/spaces/Plachta/VITS-Umamusume-voice-synthesizer/ONNXVITS_to_onnx.py +++ /dev/null @@ -1,31 +0,0 @@ -import ONNXVITS_models -import utils -from text import text_to_sequence -import torch -import commons - -def get_text(text, hps): - text_norm = text_to_sequence(text, hps.symbols, hps.data.text_cleaners) - if hps.data.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = torch.LongTensor(text_norm) - return text_norm - -hps = utils.get_hparams_from_file("../vits/pretrained_models/uma87.json") -symbols = hps.symbols -net_g = ONNXVITS_models.SynthesizerTrn( - len(symbols), - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - n_speakers=hps.data.n_speakers, - **hps.model) -_ = net_g.eval() -_ = utils.load_checkpoint("../vits/pretrained_models/uma_1153000.pth", net_g) - -text1 = get_text("ありがとうございます。", hps) -stn_tst = text1 -with torch.no_grad(): - x_tst = stn_tst.unsqueeze(0) - x_tst_lengths = torch.LongTensor([stn_tst.size(0)]) - sid = torch.tensor([0]) - o = net_g(x_tst, x_tst_lengths, sid=sid, noise_scale=.667, noise_scale_w=0.8, length_scale=1) \ No newline at end of file diff --git a/spaces/Podtekatel/JoJo_Style_Transfer/inference/box_utils.py b/spaces/Podtekatel/JoJo_Style_Transfer/inference/box_utils.py deleted file mode 100644 index ddbd40bc4861ba88a242129f195b0de9ff82fe40..0000000000000000000000000000000000000000 --- a/spaces/Podtekatel/JoJo_Style_Transfer/inference/box_utils.py +++ /dev/null @@ -1,31 +0,0 @@ -import numpy as np - - -def convert_to_square(bboxes): - """Convert bounding boxes to a square form. - Arguments: - bboxes: a float numpy array of shape [n, 4]. - Returns: - a float numpy array of shape [4], - squared bounding boxes. - """ - - square_bboxes = np.zeros_like(bboxes) - x1, y1, x2, y2 = bboxes - h = y2 - y1 + 1.0 - w = x2 - x1 + 1.0 - max_side = np.maximum(h, w) - square_bboxes[0] = x1 + w * 0.5 - max_side * 0.5 - square_bboxes[1] = y1 + h * 0.5 - max_side * 0.5 - square_bboxes[2] = square_bboxes[0] + max_side - 1.0 - square_bboxes[3] = square_bboxes[1] + max_side - 1.0 - return square_bboxes - - -def scale_box(box, scale): - x1, y1, x2, y2 = box - center_x, center_y = (x1 + x2) / 2, (y1 + y2) / 2 - w, h = x2 - x1, y2 - y1 - new_w, new_h = w * scale, h * scale - y1, y2, x1, x2 = center_y - new_h / 2, center_y + new_h / 2, center_x - new_w / 2, center_x + new_w / 2, - return np.array((x1, y1, x2, y2)) diff --git a/spaces/PranayVerma/IRIS/README.md b/spaces/PranayVerma/IRIS/README.md deleted file mode 100644 index 334dd29b1543e25fd5d22b1b71a16e16128f3f6e..0000000000000000000000000000000000000000 --- a/spaces/PranayVerma/IRIS/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: IRIS -emoji: 📉 -colorFrom: yellow -colorTo: yellow -sdk: gradio -sdk_version: 3.24.1 -app_file: app.py -pinned: true -duplicated_from: PranayVerma/demo ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/RamAnanth1/iclr2023/app.py b/spaces/RamAnanth1/iclr2023/app.py deleted file mode 100644 index 8e1f8ecc3b8062e361294cce4d52f0a2b0268b24..0000000000000000000000000000000000000000 --- a/spaces/RamAnanth1/iclr2023/app.py +++ /dev/null @@ -1,109 +0,0 @@ -import streamlit as st -import json -import requests -import csv -import pandas as pd -import tqdm - -import cohere -import os - - -from topically import Topically -from bertopic import BERTopic -from sklearn.cluster import KMeans -import numpy as np - -venue = 'ICLR.cc/2023/Conference' -venue_short = 'iclr2023' - -def get_conference_notes(venue, blind_submission=False): - """ - Get all notes of a conference (data) from OpenReview API. - If results are not final, you should set blind_submission=True. - """ - - blind_param = '-/Blind_Submission' if blind_submission else '' - offset = 0 - notes = [] - while True: - print('Offset:', offset, 'Data:', len(notes)) - url = f'https://api.openreview.net/notes?invitation={venue}/{blind_param}&offset={offset}' - response = requests.get(url) - data = response.json() - if len(data['notes']) == 0: - break - offset += 1000 - notes.extend(data['notes']) - return notes - -raw_notes = get_conference_notes(venue, blind_submission=True) - - -st.title("ICLR2023 Papers Visualization") -st.write("Number of submissions at ICLR 2023:", len(raw_notes)) - -df_raw = pd.json_normalize(raw_notes) -# set index as first column -# df_raw.set_index(df_raw.columns[0], inplace=True) -accepted_venues = ['ICLR 2023 poster', 'ICLR 2023 notable top 5%', 'ICLR 2023 notable top 25%'] -df = df_raw[df_raw["content.venue"].isin(accepted_venues)] -st.write("Number of submissions accepted at ICLR 2023:", len(df)) - -df_filtered = df[['content.title', 'content.keywords', 'content.abstract', 'content.venue']] -df = df_filtered -if "CO_API_KEY" not in os.environ: - raise KeyError("CO_API_KEY not found in st.secrets or os.environ. Please set it in " - ".streamlit/secrets.toml or as an environment variable.") - -co = cohere.Client(os.environ["CO_API_KEY"]) - -def to_html(df: pd.DataFrame, table_header: str) -> str: - table_data = ''.join(df.html_table_content) - html = f''' - - {table_header} - {table_data} -
    ''' - return html - - -def get_visualizations(): - table_header = ''' - - Title - Keywords - Venue - Abstract - ''' - list_of_titles = list(df["content.title"].values) - embeds = co.embed(texts=list_of_titles, - model="small").embeddings - - embeds_npy = np.array(embeds) - - # Load and initialize BERTopic to use KMeans clustering with 8 clusters only. - cluster_model = KMeans(n_clusters=8) - topic_model = BERTopic(hdbscan_model=cluster_model) - - # df is a dataframe. df['title'] is the column of text we're modeling - df['topic'], probabilities = topic_model.fit_transform(df['content.title'], embeds_npy) - - app = Topically(os.environ["CO_API_KEY"]) - - df['topic_name'], topic_names = app.name_topics((df['content.title'], df['topic']), num_generations=5) - - #st.write("Topics extracted are:", topic_names) - - topic_model.set_topic_labels(topic_names) - fig1 = topic_model.visualize_documents(df['content.title'].values, - embeddings=embeds_npy, - topics = list(range(8)), - custom_labels=True) - topic_model.set_topic_labels(topic_names) - fig2 = topic_model.visualize_barchart(custom_labels=True) - st.plotly_chart(fig1) - st.plotly_chart(fig2) - - -st.button("Run Visualization", on_click=get_visualizations) diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/pyparsing/common.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/pyparsing/common.py deleted file mode 100644 index 1859fb79cc4e78850b69742fca56698041ce59f8..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/pyparsing/common.py +++ /dev/null @@ -1,424 +0,0 @@ -# common.py -from .core import * -from .helpers import delimited_list, any_open_tag, any_close_tag -from datetime import datetime - - -# some other useful expressions - using lower-case class name since we are really using this as a namespace -class pyparsing_common: - """Here are some common low-level expressions that may be useful in - jump-starting parser development: - - - numeric forms (:class:`integers`, :class:`reals`, - :class:`scientific notation`) - - common :class:`programming identifiers` - - network addresses (:class:`MAC`, - :class:`IPv4`, :class:`IPv6`) - - ISO8601 :class:`dates` and - :class:`datetime` - - :class:`UUID` - - :class:`comma-separated list` - - :class:`url` - - Parse actions: - - - :class:`convertToInteger` - - :class:`convertToFloat` - - :class:`convertToDate` - - :class:`convertToDatetime` - - :class:`stripHTMLTags` - - :class:`upcaseTokens` - - :class:`downcaseTokens` - - Example:: - - pyparsing_common.number.runTests(''' - # any int or real number, returned as the appropriate type - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - ''') - - pyparsing_common.fnumber.runTests(''' - # any int or real number, returned as float - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - ''') - - pyparsing_common.hex_integer.runTests(''' - # hex numbers - 100 - FF - ''') - - pyparsing_common.fraction.runTests(''' - # fractions - 1/2 - -3/4 - ''') - - pyparsing_common.mixed_integer.runTests(''' - # mixed fractions - 1 - 1/2 - -3/4 - 1-3/4 - ''') - - import uuid - pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) - pyparsing_common.uuid.runTests(''' - # uuid - 12345678-1234-5678-1234-567812345678 - ''') - - prints:: - - # any int or real number, returned as the appropriate type - 100 - [100] - - -100 - [-100] - - +100 - [100] - - 3.14159 - [3.14159] - - 6.02e23 - [6.02e+23] - - 1e-12 - [1e-12] - - # any int or real number, returned as float - 100 - [100.0] - - -100 - [-100.0] - - +100 - [100.0] - - 3.14159 - [3.14159] - - 6.02e23 - [6.02e+23] - - 1e-12 - [1e-12] - - # hex numbers - 100 - [256] - - FF - [255] - - # fractions - 1/2 - [0.5] - - -3/4 - [-0.75] - - # mixed fractions - 1 - [1] - - 1/2 - [0.5] - - -3/4 - [-0.75] - - 1-3/4 - [1.75] - - # uuid - 12345678-1234-5678-1234-567812345678 - [UUID('12345678-1234-5678-1234-567812345678')] - """ - - convert_to_integer = token_map(int) - """ - Parse action for converting parsed integers to Python int - """ - - convert_to_float = token_map(float) - """ - Parse action for converting parsed numbers to Python float - """ - - integer = Word(nums).set_name("integer").set_parse_action(convert_to_integer) - """expression that parses an unsigned integer, returns an int""" - - hex_integer = ( - Word(hexnums).set_name("hex integer").set_parse_action(token_map(int, 16)) - ) - """expression that parses a hexadecimal integer, returns an int""" - - signed_integer = ( - Regex(r"[+-]?\d+") - .set_name("signed integer") - .set_parse_action(convert_to_integer) - ) - """expression that parses an integer with optional leading sign, returns an int""" - - fraction = ( - signed_integer().set_parse_action(convert_to_float) - + "/" - + signed_integer().set_parse_action(convert_to_float) - ).set_name("fraction") - """fractional expression of an integer divided by an integer, returns a float""" - fraction.add_parse_action(lambda tt: tt[0] / tt[-1]) - - mixed_integer = ( - fraction | signed_integer + Opt(Opt("-").suppress() + fraction) - ).set_name("fraction or mixed integer-fraction") - """mixed integer of the form 'integer - fraction', with optional leading integer, returns float""" - mixed_integer.add_parse_action(sum) - - real = ( - Regex(r"[+-]?(?:\d+\.\d*|\.\d+)") - .set_name("real number") - .set_parse_action(convert_to_float) - ) - """expression that parses a floating point number and returns a float""" - - sci_real = ( - Regex(r"[+-]?(?:\d+(?:[eE][+-]?\d+)|(?:\d+\.\d*|\.\d+)(?:[eE][+-]?\d+)?)") - .set_name("real number with scientific notation") - .set_parse_action(convert_to_float) - ) - """expression that parses a floating point number with optional - scientific notation and returns a float""" - - # streamlining this expression makes the docs nicer-looking - number = (sci_real | real | signed_integer).setName("number").streamline() - """any numeric expression, returns the corresponding Python type""" - - fnumber = ( - Regex(r"[+-]?\d+\.?\d*([eE][+-]?\d+)?") - .set_name("fnumber") - .set_parse_action(convert_to_float) - ) - """any int or real number, returned as float""" - - identifier = Word(identchars, identbodychars).set_name("identifier") - """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')""" - - ipv4_address = Regex( - r"(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}" - ).set_name("IPv4 address") - "IPv4 address (``0.0.0.0 - 255.255.255.255``)" - - _ipv6_part = Regex(r"[0-9a-fA-F]{1,4}").set_name("hex_integer") - _full_ipv6_address = (_ipv6_part + (":" + _ipv6_part) * 7).set_name( - "full IPv6 address" - ) - _short_ipv6_address = ( - Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6)) - + "::" - + Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6)) - ).set_name("short IPv6 address") - _short_ipv6_address.add_condition( - lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8 - ) - _mixed_ipv6_address = ("::ffff:" + ipv4_address).set_name("mixed IPv6 address") - ipv6_address = Combine( - (_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).set_name( - "IPv6 address" - ) - ).set_name("IPv6 address") - "IPv6 address (long, short, or mixed form)" - - mac_address = Regex( - r"[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}" - ).set_name("MAC address") - "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)" - - @staticmethod - def convert_to_date(fmt: str = "%Y-%m-%d"): - """ - Helper to create a parse action for converting parsed date string to Python datetime.date - - Params - - - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%d"``) - - Example:: - - date_expr = pyparsing_common.iso8601_date.copy() - date_expr.setParseAction(pyparsing_common.convertToDate()) - print(date_expr.parseString("1999-12-31")) - - prints:: - - [datetime.date(1999, 12, 31)] - """ - - def cvt_fn(ss, ll, tt): - try: - return datetime.strptime(tt[0], fmt).date() - except ValueError as ve: - raise ParseException(ss, ll, str(ve)) - - return cvt_fn - - @staticmethod - def convert_to_datetime(fmt: str = "%Y-%m-%dT%H:%M:%S.%f"): - """Helper to create a parse action for converting parsed - datetime string to Python datetime.datetime - - Params - - - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``) - - Example:: - - dt_expr = pyparsing_common.iso8601_datetime.copy() - dt_expr.setParseAction(pyparsing_common.convertToDatetime()) - print(dt_expr.parseString("1999-12-31T23:59:59.999")) - - prints:: - - [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] - """ - - def cvt_fn(s, l, t): - try: - return datetime.strptime(t[0], fmt) - except ValueError as ve: - raise ParseException(s, l, str(ve)) - - return cvt_fn - - iso8601_date = Regex( - r"(?P\d{4})(?:-(?P\d\d)(?:-(?P\d\d))?)?" - ).set_name("ISO8601 date") - "ISO8601 date (``yyyy-mm-dd``)" - - iso8601_datetime = Regex( - r"(?P\d{4})-(?P\d\d)-(?P\d\d)[T ](?P\d\d):(?P\d\d)(:(?P\d\d(\.\d*)?)?)?(?PZ|[+-]\d\d:?\d\d)?" - ).set_name("ISO8601 datetime") - "ISO8601 datetime (``yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)``) - trailing seconds, milliseconds, and timezone optional; accepts separating ``'T'`` or ``' '``" - - uuid = Regex(r"[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}").set_name("UUID") - "UUID (``xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx``)" - - _html_stripper = any_open_tag.suppress() | any_close_tag.suppress() - - @staticmethod - def strip_html_tags(s: str, l: int, tokens: ParseResults): - """Parse action to remove HTML tags from web page HTML source - - Example:: - - # strip HTML links from normal text - text = 'More info at the pyparsing wiki page' - td, td_end = makeHTMLTags("TD") - table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end - print(table_text.parseString(text).body) - - Prints:: - - More info at the pyparsing wiki page - """ - return pyparsing_common._html_stripper.transform_string(tokens[0]) - - _commasepitem = ( - Combine( - OneOrMore( - ~Literal(",") - + ~LineEnd() - + Word(printables, exclude_chars=",") - + Opt(White(" \t") + ~FollowedBy(LineEnd() | ",")) - ) - ) - .streamline() - .set_name("commaItem") - ) - comma_separated_list = delimited_list( - Opt(quoted_string.copy() | _commasepitem, default="") - ).set_name("comma separated list") - """Predefined expression of 1 or more printable words or quoted strings, separated by commas.""" - - upcase_tokens = staticmethod(token_map(lambda t: t.upper())) - """Parse action to convert tokens to upper case.""" - - downcase_tokens = staticmethod(token_map(lambda t: t.lower())) - """Parse action to convert tokens to lower case.""" - - # fmt: off - url = Regex( - # https://mathiasbynens.be/demo/url-regex - # https://gist.github.com/dperini/729294 - r"^" + - # protocol identifier (optional) - # short syntax // still required - r"(?:(?:(?Phttps?|ftp):)?\/\/)" + - # user:pass BasicAuth (optional) - r"(?:(?P\S+(?::\S*)?)@)?" + - r"(?P" + - # IP address exclusion - # private & local networks - r"(?!(?:10|127)(?:\.\d{1,3}){3})" + - r"(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})" + - r"(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})" + - # IP address dotted notation octets - # excludes loopback network 0.0.0.0 - # excludes reserved space >= 224.0.0.0 - # excludes network & broadcast addresses - # (first & last IP address of each class) - r"(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])" + - r"(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}" + - r"(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))" + - r"|" + - # host & domain names, may end with dot - # can be replaced by a shortest alternative - # (?![-_])(?:[-\w\u00a1-\uffff]{0,63}[^-_]\.)+ - r"(?:" + - r"(?:" + - r"[a-z0-9\u00a1-\uffff]" + - r"[a-z0-9\u00a1-\uffff_-]{0,62}" + - r")?" + - r"[a-z0-9\u00a1-\uffff]\." + - r")+" + - # TLD identifier name, may end with dot - r"(?:[a-z\u00a1-\uffff]{2,}\.?)" + - r")" + - # port number (optional) - r"(:(?P\d{2,5}))?" + - # resource path (optional) - r"(?P\/[^?# ]*)?" + - # query string (optional) - r"(\?(?P[^#]*))?" + - # fragment (optional) - r"(#(?P\S*))?" + - r"$" - ).set_name("url") - # fmt: on - - # pre-PEP8 compatibility names - convertToInteger = convert_to_integer - convertToFloat = convert_to_float - convertToDate = convert_to_date - convertToDatetime = convert_to_datetime - stripHTMLTags = strip_html_tags - upcaseTokens = upcase_tokens - downcaseTokens = downcase_tokens - - -_builtin_exprs = [ - v for v in vars(pyparsing_common).values() if isinstance(v, ParserElement) -] diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_imp.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_imp.py deleted file mode 100644 index 47efd792b3cd04f0646adf7d3ef1811d201f8873..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_imp.py +++ /dev/null @@ -1,82 +0,0 @@ -""" -Re-implementation of find_module and get_frozen_object -from the deprecated imp module. -""" - -import os -import importlib.util -import importlib.machinery - -from .py34compat import module_from_spec - - -PY_SOURCE = 1 -PY_COMPILED = 2 -C_EXTENSION = 3 -C_BUILTIN = 6 -PY_FROZEN = 7 - - -def find_spec(module, paths): - finder = ( - importlib.machinery.PathFinder().find_spec - if isinstance(paths, list) else - importlib.util.find_spec - ) - return finder(module, paths) - - -def find_module(module, paths=None): - """Just like 'imp.find_module()', but with package support""" - spec = find_spec(module, paths) - if spec is None: - raise ImportError("Can't find %s" % module) - if not spec.has_location and hasattr(spec, 'submodule_search_locations'): - spec = importlib.util.spec_from_loader('__init__.py', spec.loader) - - kind = -1 - file = None - static = isinstance(spec.loader, type) - if spec.origin == 'frozen' or static and issubclass( - spec.loader, importlib.machinery.FrozenImporter): - kind = PY_FROZEN - path = None # imp compabilty - suffix = mode = '' # imp compatibility - elif spec.origin == 'built-in' or static and issubclass( - spec.loader, importlib.machinery.BuiltinImporter): - kind = C_BUILTIN - path = None # imp compabilty - suffix = mode = '' # imp compatibility - elif spec.has_location: - path = spec.origin - suffix = os.path.splitext(path)[1] - mode = 'r' if suffix in importlib.machinery.SOURCE_SUFFIXES else 'rb' - - if suffix in importlib.machinery.SOURCE_SUFFIXES: - kind = PY_SOURCE - elif suffix in importlib.machinery.BYTECODE_SUFFIXES: - kind = PY_COMPILED - elif suffix in importlib.machinery.EXTENSION_SUFFIXES: - kind = C_EXTENSION - - if kind in {PY_SOURCE, PY_COMPILED}: - file = open(path, mode) - else: - path = None - suffix = mode = '' - - return file, path, (suffix, mode, kind) - - -def get_frozen_object(module, paths=None): - spec = find_spec(module, paths) - if not spec: - raise ImportError("Can't find %s" % module) - return spec.loader.get_code(module) - - -def get_module(module, paths, info): - spec = find_spec(module, paths) - if not spec: - raise ImportError("Can't find %s" % module) - return module_from_spec(spec) diff --git a/spaces/Realcat/image-matching-webui/third_party/ASpanFormer/src/utils/dataloader.py b/spaces/Realcat/image-matching-webui/third_party/ASpanFormer/src/utils/dataloader.py deleted file mode 100644 index b980dfd344714870ecdacd9e7a9742f51c3ee14d..0000000000000000000000000000000000000000 --- a/spaces/Realcat/image-matching-webui/third_party/ASpanFormer/src/utils/dataloader.py +++ /dev/null @@ -1,24 +0,0 @@ -import numpy as np - - -# --- PL-DATAMODULE --- - - -def get_local_split(items: list, world_size: int, rank: int, seed: int): - """The local rank only loads a split of the dataset.""" - n_items = len(items) - items_permute = np.random.RandomState(seed).permutation(items) - if n_items % world_size == 0: - padded_items = items_permute - else: - padding = np.random.RandomState(seed).choice( - items, world_size - (n_items % world_size), replace=True - ) - padded_items = np.concatenate([items_permute, padding]) - assert ( - len(padded_items) % world_size == 0 - ), f"len(padded_items): {len(padded_items)}; world_size: {world_size}; len(padding): {len(padding)}" - n_per_rank = len(padded_items) // world_size - local_items = padded_items[n_per_rank * rank : n_per_rank * (rank + 1)] - - return local_items diff --git a/spaces/Realcat/image-matching-webui/third_party/Roma/roma/models/transformer/layers/patch_embed.py b/spaces/Realcat/image-matching-webui/third_party/Roma/roma/models/transformer/layers/patch_embed.py deleted file mode 100644 index 837f952cf9a463444feeb146e0d5b539102ee26c..0000000000000000000000000000000000000000 --- a/spaces/Realcat/image-matching-webui/third_party/Roma/roma/models/transformer/layers/patch_embed.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -# References: -# https://github.com/facebookresearch/dino/blob/master/vision_transformer.py -# https://github.com/rwightman/pytorch-image-models/tree/master/timm/layers/patch_embed.py - -from typing import Callable, Optional, Tuple, Union - -from torch import Tensor -import torch.nn as nn - - -def make_2tuple(x): - if isinstance(x, tuple): - assert len(x) == 2 - return x - - assert isinstance(x, int) - return (x, x) - - -class PatchEmbed(nn.Module): - """ - 2D image to patch embedding: (B,C,H,W) -> (B,N,D) - - Args: - img_size: Image size. - patch_size: Patch token size. - in_chans: Number of input image channels. - embed_dim: Number of linear projection output channels. - norm_layer: Normalization layer. - """ - - def __init__( - self, - img_size: Union[int, Tuple[int, int]] = 224, - patch_size: Union[int, Tuple[int, int]] = 16, - in_chans: int = 3, - embed_dim: int = 768, - norm_layer: Optional[Callable] = None, - flatten_embedding: bool = True, - ) -> None: - super().__init__() - - image_HW = make_2tuple(img_size) - patch_HW = make_2tuple(patch_size) - patch_grid_size = ( - image_HW[0] // patch_HW[0], - image_HW[1] // patch_HW[1], - ) - - self.img_size = image_HW - self.patch_size = patch_HW - self.patches_resolution = patch_grid_size - self.num_patches = patch_grid_size[0] * patch_grid_size[1] - - self.in_chans = in_chans - self.embed_dim = embed_dim - - self.flatten_embedding = flatten_embedding - - self.proj = nn.Conv2d( - in_chans, embed_dim, kernel_size=patch_HW, stride=patch_HW - ) - self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() - - def forward(self, x: Tensor) -> Tensor: - _, _, H, W = x.shape - patch_H, patch_W = self.patch_size - - assert ( - H % patch_H == 0 - ), f"Input image height {H} is not a multiple of patch height {patch_H}" - assert ( - W % patch_W == 0 - ), f"Input image width {W} is not a multiple of patch width: {patch_W}" - - x = self.proj(x) # B C H W - H, W = x.size(2), x.size(3) - x = x.flatten(2).transpose(1, 2) # B HW C - x = self.norm(x) - if not self.flatten_embedding: - x = x.reshape(-1, H, W, self.embed_dim) # B H W C - return x - - def flops(self) -> float: - Ho, Wo = self.patches_resolution - flops = ( - Ho - * Wo - * self.embed_dim - * self.in_chans - * (self.patch_size[0] * self.patch_size[1]) - ) - if self.norm is not None: - flops += Ho * Wo * self.embed_dim - return flops diff --git a/spaces/Realcat/image-matching-webui/third_party/TopicFM/viz/__init__.py b/spaces/Realcat/image-matching-webui/third_party/TopicFM/viz/__init__.py deleted file mode 100644 index f0efac33299da6fb8195ce70bcb9eb210f6cf658..0000000000000000000000000000000000000000 --- a/spaces/Realcat/image-matching-webui/third_party/TopicFM/viz/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .methods.patch2pix import VizPatch2Pix -from .methods.loftr import VizLoFTR -from .methods.topicfm import VizTopicFM diff --git a/spaces/RitaParadaRamos/SmallCapDemo/README.md b/spaces/RitaParadaRamos/SmallCapDemo/README.md deleted file mode 100644 index b3f3700742d9ea16d20d7a6760a5bc66c00dd83a..0000000000000000000000000000000000000000 --- a/spaces/RitaParadaRamos/SmallCapDemo/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: SmallCapDemo -emoji: 🔥 -colorFrom: yellow -colorTo: purple -sdk: gradio -sdk_version: 3.34.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/apis/__init__.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/apis/__init__.py deleted file mode 100644 index 1d8035b74877fdeccaa41cbc10a9f1f9924eac85..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/apis/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -from .inference import (async_inference_detector, inference_detector, - init_detector, show_result_pyplot) -from .test import multi_gpu_test, single_gpu_test -from .train import get_root_logger, set_random_seed, train_detector - -__all__ = [ - 'get_root_logger', 'set_random_seed', 'train_detector', 'init_detector', - 'async_inference_detector', 'inference_detector', 'show_result_pyplot', - 'multi_gpu_test', 'single_gpu_test' -] diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/necks/nas_fpn.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/necks/nas_fpn.py deleted file mode 100644 index 8e333ce65d4d06c47c29af489526ba3142736ad7..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/necks/nas_fpn.py +++ /dev/null @@ -1,160 +0,0 @@ -import torch.nn as nn -from mmcv.cnn import ConvModule, caffe2_xavier_init -from mmcv.ops.merge_cells import GlobalPoolingCell, SumCell - -from ..builder import NECKS - - -@NECKS.register_module() -class NASFPN(nn.Module): - """NAS-FPN. - - Implementation of `NAS-FPN: Learning Scalable Feature Pyramid Architecture - for Object Detection `_ - - Args: - in_channels (List[int]): Number of input channels per scale. - out_channels (int): Number of output channels (used at each scale) - num_outs (int): Number of output scales. - stack_times (int): The number of times the pyramid architecture will - be stacked. - start_level (int): Index of the start input backbone level used to - build the feature pyramid. Default: 0. - end_level (int): Index of the end input backbone level (exclusive) to - build the feature pyramid. Default: -1, which means the last level. - add_extra_convs (bool): It decides whether to add conv - layers on top of the original feature maps. Default to False. - If True, its actual mode is specified by `extra_convs_on_inputs`. - """ - - def __init__(self, - in_channels, - out_channels, - num_outs, - stack_times, - start_level=0, - end_level=-1, - add_extra_convs=False, - norm_cfg=None): - super(NASFPN, self).__init__() - assert isinstance(in_channels, list) - self.in_channels = in_channels - self.out_channels = out_channels - self.num_ins = len(in_channels) # num of input feature levels - self.num_outs = num_outs # num of output feature levels - self.stack_times = stack_times - self.norm_cfg = norm_cfg - - if end_level == -1: - self.backbone_end_level = self.num_ins - assert num_outs >= self.num_ins - start_level - else: - # if end_level < inputs, no extra level is allowed - self.backbone_end_level = end_level - assert end_level <= len(in_channels) - assert num_outs == end_level - start_level - self.start_level = start_level - self.end_level = end_level - self.add_extra_convs = add_extra_convs - - # add lateral connections - self.lateral_convs = nn.ModuleList() - for i in range(self.start_level, self.backbone_end_level): - l_conv = ConvModule( - in_channels[i], - out_channels, - 1, - norm_cfg=norm_cfg, - act_cfg=None) - self.lateral_convs.append(l_conv) - - # add extra downsample layers (stride-2 pooling or conv) - extra_levels = num_outs - self.backbone_end_level + self.start_level - self.extra_downsamples = nn.ModuleList() - for i in range(extra_levels): - extra_conv = ConvModule( - out_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None) - self.extra_downsamples.append( - nn.Sequential(extra_conv, nn.MaxPool2d(2, 2))) - - # add NAS FPN connections - self.fpn_stages = nn.ModuleList() - for _ in range(self.stack_times): - stage = nn.ModuleDict() - # gp(p6, p4) -> p4_1 - stage['gp_64_4'] = GlobalPoolingCell( - in_channels=out_channels, - out_channels=out_channels, - out_norm_cfg=norm_cfg) - # sum(p4_1, p4) -> p4_2 - stage['sum_44_4'] = SumCell( - in_channels=out_channels, - out_channels=out_channels, - out_norm_cfg=norm_cfg) - # sum(p4_2, p3) -> p3_out - stage['sum_43_3'] = SumCell( - in_channels=out_channels, - out_channels=out_channels, - out_norm_cfg=norm_cfg) - # sum(p3_out, p4_2) -> p4_out - stage['sum_34_4'] = SumCell( - in_channels=out_channels, - out_channels=out_channels, - out_norm_cfg=norm_cfg) - # sum(p5, gp(p4_out, p3_out)) -> p5_out - stage['gp_43_5'] = GlobalPoolingCell(with_out_conv=False) - stage['sum_55_5'] = SumCell( - in_channels=out_channels, - out_channels=out_channels, - out_norm_cfg=norm_cfg) - # sum(p7, gp(p5_out, p4_2)) -> p7_out - stage['gp_54_7'] = GlobalPoolingCell(with_out_conv=False) - stage['sum_77_7'] = SumCell( - in_channels=out_channels, - out_channels=out_channels, - out_norm_cfg=norm_cfg) - # gp(p7_out, p5_out) -> p6_out - stage['gp_75_6'] = GlobalPoolingCell( - in_channels=out_channels, - out_channels=out_channels, - out_norm_cfg=norm_cfg) - self.fpn_stages.append(stage) - - def init_weights(self): - """Initialize the weights of module.""" - for m in self.modules(): - if isinstance(m, nn.Conv2d): - caffe2_xavier_init(m) - - def forward(self, inputs): - """Forward function.""" - # build P3-P5 - feats = [ - lateral_conv(inputs[i + self.start_level]) - for i, lateral_conv in enumerate(self.lateral_convs) - ] - # build P6-P7 on top of P5 - for downsample in self.extra_downsamples: - feats.append(downsample(feats[-1])) - - p3, p4, p5, p6, p7 = feats - - for stage in self.fpn_stages: - # gp(p6, p4) -> p4_1 - p4_1 = stage['gp_64_4'](p6, p4, out_size=p4.shape[-2:]) - # sum(p4_1, p4) -> p4_2 - p4_2 = stage['sum_44_4'](p4_1, p4, out_size=p4.shape[-2:]) - # sum(p4_2, p3) -> p3_out - p3 = stage['sum_43_3'](p4_2, p3, out_size=p3.shape[-2:]) - # sum(p3_out, p4_2) -> p4_out - p4 = stage['sum_34_4'](p3, p4_2, out_size=p4.shape[-2:]) - # sum(p5, gp(p4_out, p3_out)) -> p5_out - p5_tmp = stage['gp_43_5'](p4, p3, out_size=p5.shape[-2:]) - p5 = stage['sum_55_5'](p5, p5_tmp, out_size=p5.shape[-2:]) - # sum(p7, gp(p5_out, p4_2)) -> p7_out - p7_tmp = stage['gp_54_7'](p5, p4_2, out_size=p7.shape[-2:]) - p7 = stage['sum_77_7'](p7, p7_tmp, out_size=p7.shape[-2:]) - # gp(p7_out, p5_out) -> p6_out - p6 = stage['gp_75_6'](p7, p5, out_size=p6.shape[-2:]) - - return p3, p4, p5, p6, p7 diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/configs/_base_/models/psanet_r50-d8.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/configs/_base_/models/psanet_r50-d8.py deleted file mode 100644 index 689513fa9d2a40f14bf0ae4ae61f38f0dcc1b3da..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/configs/_base_/models/psanet_r50-d8.py +++ /dev/null @@ -1,49 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='EncoderDecoder', - pretrained='open-mmlab://resnet50_v1c', - backbone=dict( - type='ResNetV1c', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - dilations=(1, 1, 2, 4), - strides=(1, 2, 1, 1), - norm_cfg=norm_cfg, - norm_eval=False, - style='pytorch', - contract_dilation=True), - decode_head=dict( - type='PSAHead', - in_channels=2048, - in_index=3, - channels=512, - mask_size=(97, 97), - psa_type='bi-direction', - compact=False, - shrink_factor=2, - normalization_factor=1.0, - psa_softmax=True, - dropout_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - auxiliary_head=dict( - type='FCNHead', - in_channels=1024, - in_index=2, - channels=256, - num_convs=1, - concat_input=False, - dropout_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), - # model training and testing settings - train_cfg=dict(), - test_cfg=dict(mode='whole')) diff --git a/spaces/SDbiaseval/identities-knn/README.md b/spaces/SDbiaseval/identities-knn/README.md deleted file mode 100644 index 22f84047528f37788c5e2f6163e7b34ada5c9aaa..0000000000000000000000000000000000000000 --- a/spaces/SDbiaseval/identities-knn/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Identities Knn -emoji: 📉 -colorFrom: green -colorTo: red -sdk: gradio -sdk_version: 3.16.2 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/SERER/VITS-Umamusume-voice-synthesizer/text/japanese.py b/spaces/SERER/VITS-Umamusume-voice-synthesizer/text/japanese.py deleted file mode 100644 index 375e4d50872d5c68ee57ca17470a2ca425425eba..0000000000000000000000000000000000000000 --- a/spaces/SERER/VITS-Umamusume-voice-synthesizer/text/japanese.py +++ /dev/null @@ -1,153 +0,0 @@ -import re -from unidecode import unidecode -import pyopenjtalk - - -# Regular expression matching Japanese without punctuation marks: -_japanese_characters = re.compile( - r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# Regular expression matching non-Japanese characters or punctuation marks: -_japanese_marks = re.compile( - r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# List of (symbol, Japanese) pairs for marks: -_symbols_to_japanese = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('%', 'パーセント') -]] - -# List of (romaji, ipa) pairs for marks: -_romaji_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ts', 'ʦ'), - ('u', 'ɯ'), - ('j', 'ʥ'), - ('y', 'j'), - ('ni', 'n^i'), - ('nj', 'n^'), - ('hi', 'çi'), - ('hj', 'ç'), - ('f', 'ɸ'), - ('I', 'i*'), - ('U', 'ɯ*'), - ('r', 'ɾ') -]] - -# List of (romaji, ipa2) pairs for marks: -_romaji_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('u', 'ɯ'), - ('ʧ', 'tʃ'), - ('j', 'dʑ'), - ('y', 'j'), - ('ni', 'n^i'), - ('nj', 'n^'), - ('hi', 'çi'), - ('hj', 'ç'), - ('f', 'ɸ'), - ('I', 'i*'), - ('U', 'ɯ*'), - ('r', 'ɾ') -]] - -# List of (consonant, sokuon) pairs: -_real_sokuon = [(re.compile('%s' % x[0]), x[1]) for x in [ - (r'Q([↑↓]*[kg])', r'k#\1'), - (r'Q([↑↓]*[tdjʧ])', r't#\1'), - (r'Q([↑↓]*[sʃ])', r's\1'), - (r'Q([↑↓]*[pb])', r'p#\1') -]] - -# List of (consonant, hatsuon) pairs: -_real_hatsuon = [(re.compile('%s' % x[0]), x[1]) for x in [ - (r'N([↑↓]*[pbm])', r'm\1'), - (r'N([↑↓]*[ʧʥj])', r'n^\1'), - (r'N([↑↓]*[tdn])', r'n\1'), - (r'N([↑↓]*[kg])', r'ŋ\1') -]] - - -def symbols_to_japanese(text): - for regex, replacement in _symbols_to_japanese: - text = re.sub(regex, replacement, text) - return text - - -def japanese_to_romaji_with_accent(text): - '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html''' - text = symbols_to_japanese(text) - sentences = re.split(_japanese_marks, text) - marks = re.findall(_japanese_marks, text) - text = '' - for i, sentence in enumerate(sentences): - if re.match(_japanese_characters, sentence): - if text != '': - text += ' ' - labels = pyopenjtalk.extract_fullcontext(sentence) - for n, label in enumerate(labels): - phoneme = re.search(r'\-([^\+]*)\+', label).group(1) - if phoneme not in ['sil', 'pau']: - text += phoneme.replace('ch', 'ʧ').replace('sh', - 'ʃ').replace('cl', 'Q') - else: - continue - # n_moras = int(re.search(r'/F:(\d+)_', label).group(1)) - a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1)) - a2 = int(re.search(r"\+(\d+)\+", label).group(1)) - a3 = int(re.search(r"\+(\d+)/", label).group(1)) - if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil', 'pau']: - a2_next = -1 - else: - a2_next = int( - re.search(r"\+(\d+)\+", labels[n + 1]).group(1)) - # Accent phrase boundary - if a3 == 1 and a2_next == 1: - text += ' ' - # Falling - elif a1 == 0 and a2_next == a2 + 1: - text += '↓' - # Rising - elif a2 == 1 and a2_next == 2: - text += '↑' - if i < len(marks): - text += unidecode(marks[i]).replace(' ', '') - return text - - -def get_real_sokuon(text): - for regex, replacement in _real_sokuon: - text = re.sub(regex, replacement, text) - return text - - -def get_real_hatsuon(text): - for regex, replacement in _real_hatsuon: - text = re.sub(regex, replacement, text) - return text - - -def japanese_to_ipa(text): - text = japanese_to_romaji_with_accent(text).replace('...', '…') - text = re.sub( - r'([aiueo])\1+', lambda x: x.group(0)[0]+'ː'*(len(x.group(0))-1), text) - text = get_real_sokuon(text) - text = get_real_hatsuon(text) - for regex, replacement in _romaji_to_ipa: - text = re.sub(regex, replacement, text) - return text - - -def japanese_to_ipa2(text): - text = japanese_to_romaji_with_accent(text).replace('...', '…') - text = get_real_sokuon(text) - text = get_real_hatsuon(text) - for regex, replacement in _romaji_to_ipa2: - text = re.sub(regex, replacement, text) - return text - - -def japanese_to_ipa3(text): - text = japanese_to_ipa2(text).replace('n^', 'ȵ').replace( - 'ʃ', 'ɕ').replace('*', '\u0325').replace('#', '\u031a') - text = re.sub( - r'([aiɯeo])\1+', lambda x: x.group(0)[0]+'ː'*(len(x.group(0))-1), text) - text = re.sub(r'((?:^|\s)(?:ts|tɕ|[kpt]))', r'\1ʰ', text) - return text diff --git a/spaces/SIGGRAPH2022/sketch2pose/src/pose.py b/spaces/SIGGRAPH2022/sketch2pose/src/pose.py deleted file mode 100644 index 886b33b1fc47b5ab6a9df8b3c6fe1fabfa77fff1..0000000000000000000000000000000000000000 --- a/spaces/SIGGRAPH2022/sketch2pose/src/pose.py +++ /dev/null @@ -1,1486 +0,0 @@ -import argparse -import math -from pathlib import Path - -import cv2 -import numpy as np -import PIL.Image as Image -import selfcontact -import selfcontact.losses -import shapely.geometry -import torch -import torch.nn as nn -import torch.optim as optim -import torchgeometry -import tqdm -import trimesh -from skimage import measure - -import fist_pose -import hist_cub -import losses -import pose_estimation -import spin - -PE_KSP_TO_SPIN = { - "Head": "Head", - "Neck": "Neck", - "Right Shoulder": "Right ForeArm", - "Right Arm": "Right Arm", - "Right Hand": "Right Hand", - "Left Shoulder": "Left ForeArm", - "Left Arm": "Left Arm", - "Left Hand": "Left Hand", - "Spine": "Spine1", - "Hips": "Hips", - "Right Upper Leg": "Right Upper Leg", - "Right Leg": "Right Leg", - "Right Foot": "Right Foot", - "Left Upper Leg": "Left Upper Leg", - "Left Leg": "Left Leg", - "Left Foot": "Left Foot", - "Left Toe": "Left Toe", - "Right Toe": "Right Toe", -} -MODELS_DIR = "models" - - -def parse_args(): - parser = argparse.ArgumentParser() - - parser.add_argument( - "--pose-estimation-model-path", - type=str, - default=f"./{MODELS_DIR}/hrn_w48_384x288.onnx", - help="Pose Estimation model", - ) - - parser.add_argument( - "--contact-model-path", - type=str, - default=f"./{MODELS_DIR}/contact_hrn_w32_256x192.onnx", - help="Contact model", - ) - - parser.add_argument( - "--device", - type=str, - default="cuda", - choices=["cpu", "cuda"], - help="Torch device", - ) - - parser.add_argument( - "--spin-model-path", - type=str, - default=f"./{MODELS_DIR}/spin_model_smplx_eft_18.pt", - help="SPIN model path", - ) - - parser.add_argument( - "--smpl-type", - type=str, - default="smplx", - choices=["smplx"], - help="SMPL model type", - ) - parser.add_argument( - "--smpl-model-dir", - type=str, - default=f"./{MODELS_DIR}/models/smplx", - help="SMPL model dir", - ) - parser.add_argument( - "--smpl-mean-params-path", - type=str, - default=f"./{MODELS_DIR}/data/smpl_mean_params.npz", - help="SMPL mean params", - ) - parser.add_argument( - "--essentials-dir", - type=str, - default=f"./{MODELS_DIR}/smplify-xmc-essentials", - help="SMPL Essentials folder for contacts", - ) - - parser.add_argument( - "--parametrization-path", - type=str, - default=f"./{MODELS_DIR}/smplx_parametrization/parametrization.npy", - help="Parametrization path", - ) - parser.add_argument( - "--bone-parametrization-path", - type=str, - default=f"./{MODELS_DIR}/smplx_parametrization/bone_to_param2.npy", - help="Bone parametrization path", - ) - parser.add_argument( - "--foot-inds-path", - type=str, - default=f"./{MODELS_DIR}/smplx_parametrization/foot_inds.npy", - help="Foot indinces", - ) - - parser.add_argument( - "--save-path", - type=str, - required=True, - help="Path to save the results", - ) - - parser.add_argument( - "--img-path", - type=str, - required=True, - help="Path to img to test", - ) - - parser.add_argument( - "--use-contacts", - action="store_true", - help="Use contact model", - ) - parser.add_argument( - "--use-msc", - action="store_true", - help="Use MSC loss", - ) - parser.add_argument( - "--use-natural", - action="store_true", - help="Use regularity", - ) - parser.add_argument( - "--use-cos", - action="store_true", - help="Use cos model", - ) - parser.add_argument( - "--use-angle-transf", - action="store_true", - help="Use cube foreshortening transformation", - ) - - parser.add_argument( - "--c-mse", - type=float, - default=0, - help="MSE weight", - ) - parser.add_argument( - "--c-par", - type=float, - default=10, - help="Parallel weight", - ) - - parser.add_argument( - "--c-f", - type=float, - default=1000, - help="Cos coef", - ) - parser.add_argument( - "--c-parallel", - type=float, - default=100, - help="Parallel weight", - ) - parser.add_argument( - "--c-reg", - type=float, - default=1000, - help="Regularity weight", - ) - parser.add_argument( - "--c-cont2d", - type=float, - default=1, - help="Contact 2D weight", - ) - parser.add_argument( - "--c-msc", - type=float, - default=17_500, - help="MSC weight", - ) - - parser.add_argument( - "--fist", - nargs="+", - type=str, - choices=list(fist_pose.INT_TO_FIST), - ) - - args = parser.parse_args() - - return args - - -def freeze_layers(model): - for module in model.modules(): - if type(module) is False: - continue - - if isinstance(module, nn.modules.batchnorm._BatchNorm): - module.eval() - for m in module.parameters(): - m.requires_grad = False - - if isinstance(module, nn.Dropout): - module.eval() - for m in module.parameters(): - m.requires_grad = False - - -def project_and_normalize_to_spin(vertices_3d, camera): - vertices_2d = vertices_3d # [:, :2] - - scale, translate = camera[0], camera[1:] - translate = scale.new_zeros(3) - translate[:2] = camera[1:] - - vertices_2d = vertices_2d + translate - vertices_2d = scale * vertices_2d + 1 - vertices_2d = spin.constants.IMG_RES / 2 * vertices_2d - - return vertices_2d - - -def project_and_normalize_to_spin_legs(vertices_3d, A, camera): - A, J = A - A = A[0] - J = J[0] - L = vertices_3d.new_tensor( - [ - [0.98619063, 0.16560926, 0.00127302], - [-0.16560601, 0.98603675, 0.01749799], - [0.00164258, -0.01746717, 0.99984609], - ] - ) - R = vertices_3d.new_tensor( - [ - [0.9910211, -0.13368178, -0.0025208], - [0.13367888, 0.99027076, 0.03864949], - [-0.00267045, -0.03863944, 0.99924965], - ] - ) - scale = camera[0] - R = A[2, :3, :3] @ R # 2 - right - L = A[1, :3, :3] @ L # 1 - left - r = J[5] - J[2] - l = J[4] - J[1] - - rleg = scale * spin.constants.IMG_RES / 2 * R @ r - lleg = scale * spin.constants.IMG_RES / 2 * L @ l - - rleg = rleg[:2] - lleg = lleg[:2] - - return rleg, lleg - - -def rotation_matrix_to_angle_axis(rotmat): - bs, n_joints, *_ = rotmat.size() - rotmat = torch.cat( - [ - rotmat.view(-1, 3, 3), - rotmat.new_tensor([0, 0, 1], dtype=torch.float32) - .view(bs, 3, 1) - .expand(n_joints, -1, -1), - ], - dim=-1, - ) - aa = torchgeometry.rotation_matrix_to_angle_axis(rotmat) - aa = aa.reshape(bs, 3 * n_joints) - - return aa - - -def get_smpl_output(smpl, rotmat, betas, use_betas=True, zero_hands=False): - if smpl.name() == "SMPL": - smpl_output = smpl( - betas=betas if use_betas else None, - body_pose=rotmat[:, 1:], - global_orient=rotmat[:, 0].unsqueeze(1), - pose2rot=False, - ) - elif smpl.name() == "SMPL-X": - rotmat = rotation_matrix_to_angle_axis(rotmat) - if zero_hands: - for i in [20, 21]: - rotmat[:, 3 * i : 3 * (i + 1)] = 0 - - for i in [12, 15]: # neck, head - rotmat[:, 3 * i + 1] = 0 # y - smpl_output = smpl( - betas=betas if use_betas else None, - body_pose=rotmat[:, 3:], - global_orient=rotmat[:, :3], - pose2rot=True, - ) - else: - raise NotImplementedError - - return smpl_output, rotmat - - -def get_predictions(model_hmr, smpl, input_img, use_betas=True, zero_hands=False): - input_img = input_img.unsqueeze(0) - rotmat, betas, camera = model_hmr(input_img) - - smpl_output, rotmat = get_smpl_output( - smpl, rotmat, betas, use_betas=use_betas, zero_hands=zero_hands - ) - - rotmat = rotmat.squeeze(0) - betas = betas.squeeze(0) - camera = camera.squeeze(0) - z = smpl_output.joints - z = z.squeeze(0) - - return rotmat, betas, camera, smpl_output, z - - -def get_pred_and_data( - model_hmr, smpl, selector, input_img, use_betas=True, zero_hands=False -): - rotmat, betas, camera, smpl_output, zz = get_predictions( - model_hmr, smpl, input_img, use_betas=use_betas, zero_hands=zero_hands - ) - - joints = smpl_output.joints.squeeze(0) - joints_2d = project_and_normalize_to_spin(joints, camera) - rleg, lleg = project_and_normalize_to_spin_legs(joints, smpl_output.A, camera) - joints_2d_orig = joints_2d - joints_2d = joints_2d[selector] - - vertices = smpl_output.vertices.squeeze(0) - vertices_2d = project_and_normalize_to_spin(vertices, camera) - - zz = zz[selector] - - return ( - rotmat, - betas, - camera, - joints_2d, - zz, - vertices_2d, - smpl_output, - (rleg, lleg), - joints_2d_orig, - ) - - -def normalize_keypoints_to_spin(keypoints_2d, img_size): - h, w = img_size - if h > w: # vertically - ax1 = 1 - ax2 = 0 - else: # horizontal - ax1 = 0 - ax2 = 1 - - shift = (img_size[ax1] - img_size[ax2]) / 2 - scale = spin.constants.IMG_RES / img_size[ax2] - keypoints_2d_normalized = np.copy(keypoints_2d) - keypoints_2d_normalized[:, ax2] -= shift - keypoints_2d_normalized *= scale - - return keypoints_2d_normalized, shift, scale, ax2 - - -def unnormalize_keypoints_from_spin(keypoints_2d, shift, scale, ax2): - keypoints_2d_normalized = np.copy(keypoints_2d) - keypoints_2d_normalized /= scale - keypoints_2d_normalized[:, ax2] += shift - - return keypoints_2d_normalized - - -def get_vertices_in_heatmap(contact_heatmap): - contact_heatmap_size = contact_heatmap.shape[:2] - label = measure.label(contact_heatmap) - - y_data_conts = [] - for i in range(1, label.max() + 1): - predicted_kps_contact = np.vstack(np.nonzero(label == i)[::-1]).T.astype( - "float" - ) - predicted_kps_contact_scaled, *_ = normalize_keypoints_to_spin( - predicted_kps_contact, contact_heatmap_size - ) - y_data_cont = torch.from_numpy(predicted_kps_contact_scaled).int().tolist() - y_data_cont = shapely.geometry.MultiPoint(y_data_cont).convex_hull - y_data_conts.append(y_data_cont) - - return y_data_conts - - -def get_contact_heatmap(model_contact, img_path, thresh=0.5): - contact_heatmap = pose_estimation.infer_single_image( - model_contact, - img_path, - input_img_size=(192, 256), - return_kps=False, - ) - contact_heatmap = contact_heatmap.squeeze(0) - contact_heatmap_orig = contact_heatmap.copy() - - mi = contact_heatmap.min() - ma = contact_heatmap.max() - contact_heatmap = (contact_heatmap - mi) / (ma - mi) - contact_heatmap_ = ((contact_heatmap > thresh) * 255).astype("uint8") - - contact_heatmap = np.repeat(contact_heatmap[..., None], repeats=3, axis=-1) - contact_heatmap = (contact_heatmap * 255).astype("uint8") - - return contact_heatmap_, contact_heatmap, contact_heatmap_orig - - -def discretize(parametrization, n_bins=100): - bins = np.linspace(0, 1, n_bins + 1) - inds = np.digitize(parametrization, bins) - disc_parametrization = bins[inds - 1] - - return disc_parametrization - - -def get_mapping_from_params_to_verts(verts, params): - mapping = {} - for v, t in zip(verts, params): - mapping.setdefault(t, []).append(v) - - return mapping - - -def find_contacts(y_data_conts, keypoints_2d, bone_to_params, thresh=12, step=0.0072246375): - n_bins = int(math.ceil(1 / step)) - 1 # mean face's circumradius - contact = [] - contact_2d = [] - for_mask = [] - for y_data_cont in y_data_conts: - contact_loc = [] - contact_2d_loc = [] - buffer = y_data_cont.buffer(thresh) - mask_add = False - for i, j in pose_estimation.SKELETON: - verts, t3d = bone_to_params[(i, j)] - if len(verts) == 0: - continue - - t3d = discretize(t3d, n_bins=n_bins) - t3d_to_verts = get_mapping_from_params_to_verts(verts, t3d) - t3d_to_verts_sorted = sorted(t3d_to_verts.items(), key=lambda x: x[0]) - t3d_sorted_np = np.array([x for x, _ in t3d_to_verts_sorted]) - - line = shapely.geometry.LineString([keypoints_2d[i], keypoints_2d[j]]) - lint = buffer.intersection(line) - if len(lint.boundary.geoms) < 2: - continue - - t2d_start = line.project(lint.boundary.geoms[0], normalized=True) - t2d_end = line.project(lint.boundary.geoms[1], normalized=True) - assert t2d_start <= t2d_end - - t2ds = discretize( - np.linspace(t2d_start, t2d_end, n_bins + 1), n_bins=n_bins - ) - to_add = False - for t2d in t2ds: - if t2d < t3d_sorted_np[0] or t2d > t3d_sorted_np[-1]: - continue - - t2d_ind = np.searchsorted(t3d_sorted_np, t2d) - c = t3d_to_verts_sorted[t2d_ind][1] - - contact_loc.extend(c) - to_add = True - mask_add = True - - if t2d_ind + 1 < len(t3d_to_verts_sorted): - c = t3d_to_verts_sorted[t2d_ind + 1][1] - contact_loc.extend(c) - - if t2d_ind > 0: - c = t3d_to_verts_sorted[t2d_ind - 1][1] - contact_loc.extend(c) - - if to_add: - contact_2d_loc.append((i, j, t2d_start + 0.5 * (t2d_end - t2d_start))) - - if mask_add: - for_mask.append(buffer.exterior.coords.xy) - - contact_loc = sorted(set(contact_loc)) - contact_loc = np.array(contact_loc, dtype="int") - contact.append(contact_loc) - contact_2d.append(contact_2d_loc) - - for_mask = [np.stack((x, y), axis=0).T[:, None].astype("int") for x, y in for_mask] - - return contact, contact_2d, for_mask - - -def optimize( - model_hmr, - smpl, - selector, - input_img, - keypoints_2d, - optimizer, - args, - loss_mse=None, - loss_parallel=None, - c_mse=0.0, - c_new_mse=1.0, - c_beta=1e-3, - sc_crit=None, - msc_crit=None, - contact=None, - n_steps=60, - i_ini=0, -): - mean_zfoot_val = {} - with tqdm.trange(n_steps) as pbar: - for i in pbar: - global_step = i + i_ini - optimizer.zero_grad() - - ( - rotmat_pred, - betas_pred, - camera_pred, - keypoints_3d_pred, - z, - vertices_2d_pred, - smpl_output, - (rleg, lleg), - joints_2d_orig, - ) = get_pred_and_data( - model_hmr, - smpl, - selector, - input_img, - ) - keypoints_2d_pred = keypoints_3d_pred[:, :2] - - loss = l2 = 0.0 - if c_mse > 0 and loss_mse is not None: - l2 = loss_mse(keypoints_2d_pred, keypoints_2d) - loss = loss + c_mse * l2 - - vertices_pred = smpl_output.vertices - - lpar = z_loss = loss_sh = 0.0 - if c_new_mse > 0 and loss_parallel is not None: - Ltan, Lcos, Lpar, Lspine, Lgr, Lstraight3d, Lcon2d = loss_parallel( - keypoints_3d_pred, - keypoints_2d, - z, - (rleg, lleg), - global_step=global_step, - ) - lpar = ( - Ltan - + c_new_mse * (args.c_f * Lcos + args.c_parallel * Lpar) - + Lspine - + args.c_reg * Lgr - + args.c_reg * Lstraight3d - + args.c_cont2d * Lcon2d - ) - loss = loss + 300 * lpar - - for side in ["left", "right"]: - attr = f"{side}_foot_inds" - if hasattr(loss_parallel, attr): - foot_inds = getattr(loss_parallel, attr) - zind = 1 - if attr not in mean_zfoot_val: - with torch.no_grad(): - mean_zfoot_val[attr] = torch.median( - vertices_pred[0, foot_inds, zind], dim=0 - ).values - - loss_foot = ( - (vertices_pred[0, foot_inds, zind] - mean_zfoot_val[attr]) - ** 2 - ).sum() - loss = loss + args.c_reg * loss_foot - - if hasattr(loss_parallel, "silhuette_vertices_inds"): - inds = loss_parallel.silhuette_vertices_inds - loss_sh = ( - (vertices_pred[0, inds, 1] - loss_parallel.ground) ** 2 - ).sum() - loss = loss + args.c_reg * loss_sh - - lbeta = (betas_pred**2).mean() - lcam = ((torch.exp(-camera_pred[0] * 10)) ** 2).mean() - loss = loss + c_beta * lbeta + lcam - - lgsc_a = gsc_contact_loss = faces_angle_loss = 0.0 - if sc_crit is not None: - gsc_contact_loss, faces_angle_loss = sc_crit( - vertices_pred, - ) - lgsc_a = 1000 * gsc_contact_loss + 0.1 * faces_angle_loss - loss = loss + lgsc_a - - msc_loss = 0.0 - if contact is not None and len(contact) > 0 and msc_crit is not None: - if not isinstance(contact, list): - contact = [contact] - - for cntct in contact: - msc_loss = msc_crit( - cntct, - vertices_pred, - ) - loss = loss + args.c_msc * msc_loss - - loss.backward() - optimizer.step() - - epoch_loss = loss.item() - pbar.set_postfix( - **{ - "l": f"{epoch_loss:.3}", - "l2": f"{l2:.3}", - "par": f"{lpar:.3}", - "beta": f"{lbeta:.3}", - "cam": f"{lcam:.3}", - "z": f"{z_loss:.3}", - "gsc_contact": f"{float(gsc_contact_loss):.3}", - "faces_angle": f"{float(faces_angle_loss):.3}", - "msc": f"{float(msc_loss):.3}", - } - ) - - with torch.no_grad(): - ( - rotmat_pred, - betas_pred, - camera_pred, - keypoints_3d_pred, - z, - vertices_2d_pred, - smpl_output, - (rleg, lleg), - joints_2d_orig, - ) = get_pred_and_data( - model_hmr, - smpl, - selector, - input_img, - zero_hands=True, - ) - - return ( - rotmat_pred, - betas_pred, - camera_pred, - keypoints_3d_pred, - vertices_2d_pred, - smpl_output, - z, - joints_2d_orig, - ) - - -def optimize_ft( - theta, - camera, - smpl, - selector, - keypoints_2d, - args, - loss_mse=None, - loss_parallel=None, - c_mse=0.0, - c_new_mse=1.0, - sc_crit=None, - msc_crit=None, - contact=None, - n_steps=60, - i_ini=0, - zero_hands=False, - fist=None, -): - mean_zfoot_val = {} - - theta = theta.detach().clone() - camera = camera.detach().clone() - rotmat_pred = nn.Parameter(theta) - camera_pred = nn.Parameter(camera) - optimizer = torch.optim.Adam( - [ - rotmat_pred, - camera_pred, - ], - lr=1e-3, - ) - global_step = i_ini - - with tqdm.trange(n_steps) as pbar: - for i in pbar: - global_step = i + i_ini - optimizer.zero_grad() - - global_orient = rotmat_pred[:3] - body_pose = rotmat_pred[3:] - smpl_output = smpl( - global_orient=global_orient.unsqueeze(0), - body_pose=body_pose.unsqueeze(0), - pose2rot=True, - ) - - z = smpl_output.joints - z = z.squeeze(0) - - joints = smpl_output.joints.squeeze(0) - joints_2d = project_and_normalize_to_spin(joints, camera_pred) - rleg, lleg = project_and_normalize_to_spin_legs( - joints, smpl_output.A, camera_pred - ) - joints_2d = joints_2d[selector] - z = z[selector] - keypoints_3d_pred = joints_2d - - keypoints_2d_pred = keypoints_3d_pred[:, :2] - - lprior = ((rotmat_pred - theta) ** 2).sum() + ( - (camera_pred - camera) ** 2 - ).sum() - loss = lprior - - l2 = 0.0 - if c_mse > 0 and loss_mse is not None: - l2 = loss_mse(keypoints_2d_pred, keypoints_2d) - loss = loss + c_mse * l2 - - vertices_pred = smpl_output.vertices - - lpar = z_loss = loss_sh = 0.0 - if c_new_mse > 0 and loss_parallel is not None: - Ltan, Lcos, Lpar, Lspine, Lgr, Lstraight3d, Lcon2d = loss_parallel( - keypoints_3d_pred, - keypoints_2d, - z, - (rleg, lleg), - global_step=global_step, - ) - lpar = ( - Ltan - + c_new_mse * (args.c_f * Lcos + args.c_parallel * Lpar) - + Lspine - + args.c_reg * Lgr - + args.c_reg * Lstraight3d - + args.c_cont2d * Lcon2d - ) - loss = loss + 300 * lpar - - for side in ["left", "right"]: - attr = f"{side}_foot_inds" - if hasattr(loss_parallel, attr): - foot_inds = getattr(loss_parallel, attr) - zind = 1 - if attr not in mean_zfoot_val: - with torch.no_grad(): - mean_zfoot_val[attr] = torch.median( - vertices_pred[0, foot_inds, zind], dim=0 - ).values - - loss_foot = ( - (vertices_pred[0, foot_inds, zind] - mean_zfoot_val[attr]) - ** 2 - ).sum() - loss = loss + args.c_reg * loss_foot - - if hasattr(loss_parallel, "silhuette_vertices_inds"): - inds = loss_parallel.silhuette_vertices_inds - loss_sh = ( - (vertices_pred[0, inds, 1] - loss_parallel.ground) ** 2 - ).sum() - loss = loss + args.c_reg * loss_sh - - lgsc_a = gsc_contact_loss = faces_angle_loss = 0.0 - if sc_crit is not None: - gsc_contact_loss, faces_angle_loss = sc_crit(vertices_pred) - lgsc_a = 1000 * gsc_contact_loss + 0.1 * faces_angle_loss - loss = loss + lgsc_a - - msc_loss = 0.0 - if contact is not None and len(contact) > 0 and msc_crit is not None: - if not isinstance(contact, list): - contact = [contact] - - for cntct in contact: - msc_loss = msc_crit( - cntct, - vertices_pred, - ) - loss = loss + args.c_msc * msc_loss - - loss.backward() - optimizer.step() - - epoch_loss = loss.item() - pbar.set_postfix( - **{ - "l": f"{epoch_loss:.3}", - "l2": f"{l2:.3}", - "par": f"{lpar:.3}", - "z": f"{z_loss:.3}", - "gsc_contact": f"{float(gsc_contact_loss):.3}", - "faces_angle": f"{float(faces_angle_loss):.3}", - "msc": f"{float(msc_loss):.3}", - } - ) - - rotmat_pred = rotmat_pred.detach() - - if zero_hands: - for i in [20, 21]: - rotmat_pred[3 * i : 3 * (i + 1)] = 0 - - for i in [12, 15]: # neck, head - rotmat_pred[3 * i + 1] = 0 # y - - global_orient = rotmat_pred[:3] - body_pose = rotmat_pred[3:] - left_hand_pose = None - right_hand_pose = None - if fist is not None: - left_hand_pose = rotmat_pred.new_tensor(fist_pose.LEFT_RELAXED).unsqueeze(0) - right_hand_pose = rotmat_pred.new_tensor(fist_pose.RIGHT_RELAXED).unsqueeze(0) - for f in fist: - pp = fist_pose.INT_TO_FIST[f] - if pp is not None: - pp = rotmat_pred.new_tensor(pp).unsqueeze(0) - - if f.startswith("lf"): - left_hand_pose = pp - elif f.startswith("rf"): - right_hand_pose = pp - elif f.startswith("l"): - body_pose[19 * 3 : 19 * 3 + 3] = pp - left_hand_pose = None - elif f.startswith("r"): - body_pose[20 * 3 : 20 * 3 + 3] = pp - right_hand_pose = None - else: - raise RuntimeError(f"No such hand pose: {f}") - - with torch.no_grad(): - smpl_output = smpl( - global_orient=global_orient.unsqueeze(0), - body_pose=body_pose.unsqueeze(0), - left_hand_pose=left_hand_pose, - right_hand_pose=right_hand_pose, - pose2rot=True, - ) - - return rotmat_pred, smpl_output - - -def create_bone(i, j, keypoints_2d): - a = keypoints_2d[i] - b = keypoints_2d[j] - ab = b - a - ab = torch.nn.functional.normalize(ab, dim=0) - - return ab - - -def is_parallel_to_plane(bone, thresh=21): - return abs(bone[0]) > math.cos(math.radians(thresh)) - - -def is_close_to_plane(bone, plane, thresh): - dist = abs(bone[0] - plane) - - return dist < thresh - - -def get_selector(): - selector = [] - for kp in pose_estimation.KPS: - tmp = spin.JOINT_NAMES.index(PE_KSP_TO_SPIN[kp]) - selector.append(tmp) - - return selector - - -def calc_cos(joints_2d, joints_3d): - cos = [] - for i, j in pose_estimation.SKELETON: - a = joints_2d[i] - joints_2d[j] - a = nn.functional.normalize(a, dim=0) - - b = joints_3d[i] - joints_3d[j] - b = nn.functional.normalize(b, dim=0)[:2] - - c = (a * b).sum() - cos.append(c) - - cos = torch.stack(cos, dim=0) - - return cos - - -def get_natural(keypoints_2d, vertices, right_foot_inds, left_foot_inds, loss_parallel, smpl): - height_2d = ( - keypoints_2d.max(dim=0).values[0] - keypoints_2d.min(dim=0).values[0] - ).item() - plane_2d = keypoints_2d.max(dim=0).values[0].item() - - ground_parallel = [] - parallel_in_3d = [] - parallel3d_bones = set() - - # parallel chains - for i, j, k in [ - ("Right Upper Leg", "Right Leg", "Right Foot"), - ("Right Leg", "Right Foot", "Right Toe"), # to remove? - ("Left Upper Leg", "Left Leg", "Left Foot"), - ("Left Leg", "Left Foot", "Left Toe"), # to remove? - ("Right Shoulder", "Right Arm", "Right Hand"), - ("Left Shoulder", "Left Arm", "Left Hand"), - # ("Hips", "Spine", "Neck"), - # ("Spine", "Neck", "Head"), - ]: - i = pose_estimation.KPS.index(i) - j = pose_estimation.KPS.index(j) - k = pose_estimation.KPS.index(k) - upleg_leg = create_bone(i, j, keypoints_2d) - leg_foot = create_bone(j, k, keypoints_2d) - - if is_parallel_to_plane(upleg_leg) and is_parallel_to_plane(leg_foot): - if is_close_to_plane( - upleg_leg, plane_2d, thresh=0.1 * height_2d - ) or is_close_to_plane(leg_foot, plane_2d, thresh=0.1 * height_2d): - ground_parallel.append(((i, j), 1)) - ground_parallel.append(((j, k), 1)) - - if (upleg_leg * leg_foot).sum() > math.cos(math.radians(21)): - parallel_in_3d.append(((i, j), (j, k))) - parallel3d_bones.add((i, j)) - parallel3d_bones.add((j, k)) - - # parallel feets - for i, j in [ - ("Right Foot", "Right Toe"), - ("Left Foot", "Left Toe"), - ]: - i = pose_estimation.KPS.index(i) - j = pose_estimation.KPS.index(j) - if (i, j) in parallel3d_bones: - continue - - foot_toe = create_bone(i, j, keypoints_2d) - if is_parallel_to_plane(foot_toe, thresh=25): - if "Right" in pose_estimation.KPS[i]: - loss_parallel.right_foot_inds = right_foot_inds - else: - loss_parallel.left_foot_inds = left_foot_inds - - loss_parallel.ground_parallel = ground_parallel - loss_parallel.parallel_in_3d = parallel_in_3d - - vertices_np = vertices[0].cpu().numpy() - if len(ground_parallel) > 0: - # Silhuette veritices - mesh = trimesh.Trimesh(vertices=vertices_np, faces=smpl.faces, process=False) - silhuette_vertices_mask_1 = np.abs(mesh.vertex_normals[..., 2]) < 2e-1 - height_3d = vertices_np[:, 1].max() - vertices_np[:, 1].min() - plane_3d = vertices_np[:, 1].max() - silhuette_vertices_mask_2 = ( - np.abs(vertices_np[:, 1] - plane_3d) < 0.15 * height_3d - ) - silhuette_vertices_mask = np.logical_and( - silhuette_vertices_mask_1, silhuette_vertices_mask_2 - ) - (silhuette_vertices_inds,) = np.where(silhuette_vertices_mask) - if len(silhuette_vertices_inds) > 0: - loss_parallel.silhuette_vertices_inds = silhuette_vertices_inds - loss_parallel.ground = plane_3d - - -def get_cos(keypoints_3d_pred, use_angle_transf, loss_parallel): - keypoints_2d_pred = keypoints_3d_pred[:, :2] - with torch.no_grad(): - cos_r = calc_cos(keypoints_2d_pred, keypoints_3d_pred) - - alpha = torch.acos(cos_r) - if use_angle_transf: - leg_inds = [ - 5, - 6, # right leg - 7, - 8, # left leg - ] - foot_inds = [15, 16] - nleg_inds = sorted( - set(range(len(pose_estimation.SKELETON))) - set(leg_inds) - set(foot_inds) - ) - alpha[nleg_inds] = alpha[nleg_inds] - alpha[nleg_inds].min() - - amli = alpha[leg_inds].min() - leg_inds.extend(foot_inds) - alpha[leg_inds] = alpha[leg_inds] - amli - - angles = alpha.detach().cpu().numpy() - angles = hist_cub.cub( - angles / (math.pi / 2), - a=1.2121212121212122, - b=-1.105527638190953, - c=0.787878787878789, - ) * (math.pi / 2) - alpha = alpha.new_tensor(angles) - - loss_parallel.cos = torch.cos(alpha) - - return cos_r - - -def get_contacts( - args, - sc_module, - y_data_conts, - keypoints_2d, - vertices, - bone_to_params, - loss_parallel, -): - use_contacts = args.use_contacts - use_msc = args.use_msc - c_mse = args.c_mse - - if use_contacts: - assert c_mse == 0 - contact, contact_2d, _ = find_contacts( - y_data_conts, keypoints_2d, bone_to_params - ) - if len(contact_2d) > 0: - loss_parallel.contact_2d = contact_2d - - if len(contact) == 0: - _, contact = sc_module.verts_in_contact(vertices, return_idx=True) - contact = contact.cpu().numpy().ravel() - elif use_msc: - _, contact = sc_module.verts_in_contact(vertices, return_idx=True) - contact = contact.cpu().numpy().ravel() - else: - contact = np.array([]) - - return contact - - -def save_mesh( - smpl, - smpl_output, - save_path, - fname, -): - mesh = trimesh.Trimesh( - vertices=smpl_output.vertices[0].cpu().numpy(), - faces=smpl.faces, - process=False, - ) - rot = trimesh.transformations.rotation_matrix(np.pi, [1, 0, 0]) - mesh.apply_transform(rot) - rot = trimesh.transformations.rotation_matrix(np.pi, [0, 1, 0]) - mesh.apply_transform(rot) - mesh.export(save_path / f"{fname}.glb") - - -def eft_step( - model_hmr, - smpl, - selector, - input_img, - keypoints_2d, - optimizer, - args, - loss_mse, - loss_parallel, - c_beta, - sc_module, - y_data_conts, - bone_to_params, -): - ( - _, - _, - _, - keypoints_3d_pred, - _, - smpl_output, - _, - _, - ) = optimize( - model_hmr, - smpl, - selector, - input_img, - keypoints_2d, - optimizer, - args, - loss_mse=loss_mse, - loss_parallel=loss_parallel, - c_mse=1, - c_new_mse=0, - c_beta=c_beta, - sc_crit=None, - msc_crit=None, - contact=None, - n_steps=60 + 90, - ) - - # find contacts - vertices = smpl_output.vertices.detach() - contact = get_contacts( - args, - sc_module, - y_data_conts, - keypoints_2d, - vertices, - bone_to_params, - loss_parallel, - ) - - return vertices, keypoints_3d_pred, contact - - -def dc_step( - model_hmr, - smpl, - selector, - input_img, - keypoints_2d, - optimizer, - args, - loss_mse, - loss_parallel, - c_mse, - c_new_mse, - c_beta, - sc_crit, - msc_crit, - contact, - use_contacts, - use_msc, -): - rotmat_pred, *_ = optimize( - model_hmr, - smpl, - selector, - input_img, - keypoints_2d, - optimizer, - args, - loss_mse=loss_mse, - loss_parallel=loss_parallel, - c_mse=c_mse, - c_new_mse=c_new_mse, - c_beta=c_beta, - sc_crit=sc_crit, - msc_crit=msc_crit if use_contacts or use_msc else None, - contact=contact if use_contacts or use_msc else None, - n_steps=60 if c_new_mse > 0 or use_contacts or use_msc else 0, # + 60,, - i_ini=60 + 90, - ) - - return rotmat_pred - - -def us_step( - model_hmr, - smpl, - selector, - input_img, - rotmat_pred, - keypoints_2d, - args, - loss_mse, - loss_parallel, - c_mse, - c_new_mse, - sc_crit, - msc_crit, - contact, - use_contacts, - use_msc, - save_path, -): - (_, _, camera_pred_us, _, _, _, smpl_output_us, _, _,) = get_pred_and_data( - model_hmr, - smpl, - selector, - input_img, - use_betas=False, - zero_hands=True, - ) - - _, smpl_output_us = optimize_ft( - rotmat_pred, - camera_pred_us, - smpl, - selector, - keypoints_2d, - args, - loss_mse=loss_mse, - loss_parallel=loss_parallel, - c_mse=c_mse, - c_new_mse=c_new_mse, - sc_crit=sc_crit, - msc_crit=msc_crit if use_contacts or use_msc else None, - contact=contact if use_contacts or use_msc else None, - n_steps=60 if use_contacts or use_msc else 0, # + 60, - i_ini=60 + 90 + 60, - zero_hands=True, - fist=args.fist, - ) - - save_mesh( - smpl, - smpl_output_us, - save_path, - "us", - ) - - -def main(): - args = parse_args() - print(args) - - # models - model_pose = cv2.dnn.readNetFromONNX( - args.pose_estimation_model_path - ) # "hrn_w48_384x288.onnx" - model_contact = cv2.dnn.readNetFromONNX( - args.contact_model_path - ) # "contact_hrn_w32_256x192.onnx" - - device = ( - torch.device(args.device) if torch.cuda.is_available() else torch.device("cpu") - ) - model_hmr = spin.hmr(args.smpl_mean_params_path) # "smpl_mean_params.npz" - model_hmr.to(device) - checkpoint = torch.load( - args.spin_model_path, # "spin_model_smplx_eft_18.pt" - map_location="cpu" - ) - - smpl = spin.SMPLX( - args.smpl_model_dir, # "models/smplx" - batch_size=1, - create_transl=False, - use_pca=False, - flat_hand_mean=args.fist is not None, - ) - smpl.to(device) - - selector = get_selector() - - use_contacts = args.use_contacts - use_msc = args.use_msc - - bone_to_params = np.load(args.bone_parametrization_path, allow_pickle=True).item() - foot_inds = np.load(args.foot_inds_path, allow_pickle=True).item() - left_foot_inds = foot_inds["left_foot_inds"] - right_foot_inds = foot_inds["right_foot_inds"] - - if use_contacts: - model_type = args.smpl_type - sc_module = selfcontact.SelfContact( - essentials_folder=args.essentials_dir, # "smplify-xmc-essentials" - geothres=0.3, - euclthres=0.02, - test_segments=True, - compute_hd=True, - model_type=model_type, - device=device, - ) - sc_module.to(device) - - sc_crit = selfcontact.losses.SelfContactLoss( - contact_module=sc_module, - inside_loss_weight=0.5, - outside_loss_weight=0.0, - contact_loss_weight=0.5, - align_faces=True, - use_hd=True, - test_segments=True, - device=device, - model_type=model_type, - ) - sc_crit.to(device) - - msc_crit = losses.MimickedSelfContactLoss(geodesics_mask=sc_module.geomask) - msc_crit.to(device) - else: - sc_module = None - sc_crit = None - msc_crit = None - - loss_mse = losses.MSE([1, 10, 13]) # Neck + Right Upper Leg + Left Upper Leg - - ignore = ( - (1, 2), # Neck + Right Shoulder - (1, 5), # Neck + Left Shoulder - (9, 10), # Hips + Right Upper Leg - (9, 13), # Hips + Left Upper Leg - ) - loss_parallel = losses.Parallel( - skeleton=pose_estimation.SKELETON, - ignore=ignore, - ) - - c_mse = args.c_mse - c_new_mse = args.c_par - c_beta = 1e-3 - - if c_mse > 0: - assert c_new_mse == 0 - elif c_mse == 0: - assert c_new_mse > 0 - - root_path = Path(args.save_path) - root_path.mkdir(exist_ok=True, parents=True) - - path_to_imgs = Path(args.img_path) - if path_to_imgs.is_dir(): - path_to_imgs = path_to_imgs.iterdir() - else: - path_to_imgs = [path_to_imgs] - - for img_path in path_to_imgs: - if not any( - img_path.name.lower().endswith(ext) for ext in [".jpg", ".png", ".jpeg"] - ): - continue - - img_name = img_path.stem - - # use 2d keypoints detection - ( - img_original, - predicted_keypoints_2d, - _, - _, - ) = pose_estimation.infer_single_image( - model_pose, - img_path, - input_img_size=pose_estimation.IMG_SIZE, - return_kps=True, - ) - - save_path = root_path / img_name - save_path.mkdir(exist_ok=True, parents=True) - - img_original = cv2.cvtColor(img_original, cv2.COLOR_BGR2RGB) - img_size_original = img_original.shape[:2] - keypoints_2d, *_ = normalize_keypoints_to_spin( - predicted_keypoints_2d, img_size_original - ) - keypoints_2d = torch.from_numpy(keypoints_2d) - keypoints_2d = keypoints_2d.to(device) - - ( - predicted_contact_heatmap, - predicted_contact_heatmap_raw, - very_hm_raw, - ) = get_contact_heatmap(model_contact, img_path) - predicted_contact_heatmap_raw = Image.fromarray( - predicted_contact_heatmap_raw - ).resize(img_size_original[::-1]) - predicted_contact_heatmap_raw = cv2.resize(very_hm_raw, img_size_original[::-1]) - - if c_new_mse == 0: - predicted_contact_heatmap_raw = None - - y_data_conts = get_vertices_in_heatmap(predicted_contact_heatmap) - - model_hmr.load_state_dict(checkpoint["model"], strict=True) - model_hmr.train() - freeze_layers(model_hmr) - - _, input_img = spin.process_image(img_path, input_res=spin.constants.IMG_RES) - input_img = input_img.to(device) - - optimizer = optim.Adam( - filter(lambda p: p.requires_grad, model_hmr.parameters()), - lr=1e-6, - ) - - vertices, keypoints_3d_pred, contact = eft_step( - model_hmr, - smpl, - selector, - input_img, - keypoints_2d, - optimizer, - args, - loss_mse, - loss_parallel, - c_beta, - sc_module, - y_data_conts, - bone_to_params, - ) - - if args.use_natural: - get_natural( - keypoints_2d, vertices, right_foot_inds, left_foot_inds, loss_parallel, smpl, - ) - - if args.use_cos: - get_cos(keypoints_3d_pred, args.use_angle_transf, loss_parallel) - - rotmat_pred = dc_step( - model_hmr, - smpl, - selector, - input_img, - keypoints_2d, - optimizer, - args, - loss_mse, - loss_parallel, - c_mse, - c_new_mse, - c_beta, - sc_crit, - msc_crit, - contact, - use_contacts, - use_msc, - ) - - us_step( - model_hmr, - smpl, - selector, - input_img, - rotmat_pred, - keypoints_2d, - args, - loss_mse, - loss_parallel, - c_mse, - c_new_mse, - sc_crit, - msc_crit, - contact, - use_contacts, - use_msc, - save_path, - ) - - -if __name__ == "__main__": - main() diff --git a/spaces/SashaKerbel/HandwritingClassifier/app.py b/spaces/SashaKerbel/HandwritingClassifier/app.py deleted file mode 100644 index 9a3563fced193940a341d55299ccce2eb143547e..0000000000000000000000000000000000000000 --- a/spaces/SashaKerbel/HandwritingClassifier/app.py +++ /dev/null @@ -1,30 +0,0 @@ -import gradio as gr -from fastai.vision.all import * -import skimage - -def label_func(x): - return x.parent.name - -learn = load_learner('model.pkl') - -labels = learn.dls.vocab - -def predict(img): - img = PILImage.create(img) - pred,pred_idx,probs = learn.predict(img) - return {labels[i]: float(probs[i] for i in range(len(labels)))} - -title = "MNIST" -description = "Fast.ai Lesson 2" -interpretation = 'default' -enable_queue = True - -gr.Interface( - fn = predict, - inputs = gr.inputs.Image(shape= (512,512)), - outputs = gr.outputs.Label(num_top_classes = 10), - title = title, - description = description, - interpretation = interpretation, - enable_queue = enable_queue -).launch() \ No newline at end of file diff --git a/spaces/ShoaibMajidDar/PDF-chatbot/app.py b/spaces/ShoaibMajidDar/PDF-chatbot/app.py deleted file mode 100644 index 4bbbf207c50a01cc7632bb44cb8ea1e840c24bdf..0000000000000000000000000000000000000000 --- a/spaces/ShoaibMajidDar/PDF-chatbot/app.py +++ /dev/null @@ -1,105 +0,0 @@ -import streamlit as st -import os -from apikey import get_apikey -from PyPDF2 import PdfReader -from langchain.text_splitter import RecursiveCharacterTextSplitter -from langchain.vectorstores import FAISS -from langchain.embeddings.openai import OpenAIEmbeddings -from langchain.chains import LLMChain -from langchain.prompts import PromptTemplate -from langchain.chat_models import ChatOpenAI -from langchain.memory import ConversationBufferMemory - - - -def make_context(docs): - context = "" - for doc in docs: - doc = doc.page_content + "\n\nSource: " + doc.metadata - context = context + doc + "\n\n" - return context - - - - -OPENAI_API_KEY = get_apikey() - - -os.environ['OPENAI_API_KEY'] = OPENAI_API_KEY - -if OPENAI_API_KEY: - llm = ChatOpenAI(model='gpt-3.5-turbo',temperature = 0, openai_api_key=OPENAI_API_KEY, max_tokens=800) - gptturbo = ChatOpenAI(model='gpt-3.5-turbo',temperature = 0, openai_api_key=OPENAI_API_KEY, max_tokens=800) - - - if "generated" not in st.session_state: - st.session_state["generated"] = [] - if "past" not in st.session_state: - st.session_state["past"] = [] - if "input" not in st.session_state: - st.session_state["input"] = "" - if "stored_session" not in st.session_state: - st.session_state["stored_session"] = [] - if "memory" not in st.session_state: - st.session_state.memory = ConversationBufferMemory(memory_key="chat_history") - - - - uploaded_file = st.file_uploader('Choose your .pdf file', type="pdf") - - - if uploaded_file is not None: - pdf_reader = PdfReader(uploaded_file) - data = "" - for page in pdf_reader.pages: - data += page.extract_text() - - text_splitter = RecursiveCharacterTextSplitter( - chunk_size = 800, - chunk_overlap = 0 - ) - - - texts = text_splitter.split_text(data) - embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY) - docsearch = FAISS.from_texts(texts, embedding = embeddings) - - question = question = st.text_input('Ask any question', key="input") - - if question: - docs = docsearch.similarity_search(query=question) - for doc in docs: - doc.metadata = uploaded_file.name - template = """ -your job is to answer the questions asked by the users. Create a final answer with references ("SOURCES"). -If the answer is not in the context, then try to answer it using your own knowledge. -Source of the context is written at the end of the context. -At the end of your answer write the source of the context in the following way: \n\nSource: (source) -Chat history is also provided to you. -Context: {context} ---- -Chat History: {chat_history} -Question: {question} -Answer: Let's think step by step and give best answer possible. Use points when needed. -""" - - context = make_context(docs) - prompt = PromptTemplate(template=template, input_variables=["context", "question", "chat_history"]).partial(context=context) - - llm_chain = LLMChain(prompt=prompt, llm=gptturbo, verbose=False, memory=st.session_state.memory) - - - response = llm_chain.run(question) - st.session_state.past.append(question) - st.session_state.generated.append(response) - - with st.expander("Conversation", expanded=True): - for i in range(len(st.session_state['generated'])-1, -1, -1): - st.info(st.session_state["past"][i],icon="🧐") - st.success(st.session_state["generated"][i], icon="🤖") - - - - - - diff --git a/spaces/Soumen/transform_image/README.md b/spaces/Soumen/transform_image/README.md deleted file mode 100644 index fa3ba653200d5204fc739fa1a8227225736c1212..0000000000000000000000000000000000000000 --- a/spaces/Soumen/transform_image/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Transform Image -emoji: 🏃 -colorFrom: purple -colorTo: yellow -sdk: streamlit -sdk_version: 1.10.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/SuSung-boy/LoRA-DreamBooth-Training-UI/uploader.py b/spaces/SuSung-boy/LoRA-DreamBooth-Training-UI/uploader.py deleted file mode 100644 index 0ce697f0d47325a4d73f92c13304ae5f51df794a..0000000000000000000000000000000000000000 --- a/spaces/SuSung-boy/LoRA-DreamBooth-Training-UI/uploader.py +++ /dev/null @@ -1,42 +0,0 @@ -from __future__ import annotations - -from huggingface_hub import HfApi - - -class Uploader: - def __init__(self, hf_token: str | None): - self.api = HfApi(token=hf_token) - - def get_username(self) -> str: - return self.api.whoami()['name'] - - def upload(self, - folder_path: str, - repo_name: str, - organization: str = '', - repo_type: str = 'model', - private: bool = True, - delete_existing_repo: bool = False) -> str: - if not folder_path: - raise ValueError - if not repo_name: - raise ValueError - if not organization: - organization = self.get_username() - repo_id = f'{organization}/{repo_name}' - if delete_existing_repo: - try: - self.api.delete_repo(repo_id, repo_type=repo_type) - except Exception: - pass - try: - self.api.create_repo(repo_id, repo_type=repo_type, private=private) - self.api.upload_folder(repo_id=repo_id, - folder_path=folder_path, - path_in_repo='.', - repo_type=repo_type) - url = f'https://huggingface.co/{repo_id}' - message = f'Your model was successfully uploaded to {url}.' - except Exception as e: - message = str(e) - return message diff --git a/spaces/SuYuanS/AudioCraft_Plus/audiocraft/adversarial/discriminators/base.py b/spaces/SuYuanS/AudioCraft_Plus/audiocraft/adversarial/discriminators/base.py deleted file mode 100644 index a9d517e9f5bf0f4e18252c45c8db3a35a7255f69..0000000000000000000000000000000000000000 --- a/spaces/SuYuanS/AudioCraft_Plus/audiocraft/adversarial/discriminators/base.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from abc import ABC, abstractmethod -import typing as tp - -import torch -import torch.nn as nn - - -FeatureMapType = tp.List[torch.Tensor] -LogitsType = torch.Tensor -MultiDiscriminatorOutputType = tp.Tuple[tp.List[LogitsType], tp.List[FeatureMapType]] - - -class MultiDiscriminator(ABC, nn.Module): - """Base implementation for discriminators composed of sub-discriminators acting at different scales. - """ - def __init__(self): - super().__init__() - - @abstractmethod - def forward(self, x: torch.Tensor) -> MultiDiscriminatorOutputType: - ... - - @property - @abstractmethod - def num_discriminators(self) -> int: - """Number of discriminators. - """ - ... diff --git a/spaces/Subbu-2004/MyNewAiAvatar/README.md b/spaces/Subbu-2004/MyNewAiAvatar/README.md deleted file mode 100644 index 8dbd6895fe73986708a70cebe8789686856ced6f..0000000000000000000000000000000000000000 --- a/spaces/Subbu-2004/MyNewAiAvatar/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: MyNewAiAvatar -emoji: 🐠 -colorFrom: yellow -colorTo: indigo -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Sultannn/YOLOX_DEMO-Webcam/app.py b/spaces/Sultannn/YOLOX_DEMO-Webcam/app.py deleted file mode 100644 index af1d9e58b9b40ba55e9026ee1043ffad7f1a4f6a..0000000000000000000000000000000000000000 --- a/spaces/Sultannn/YOLOX_DEMO-Webcam/app.py +++ /dev/null @@ -1,21 +0,0 @@ -# fork of my space -# add feature webcam(rear camera) -import gradio as gr - -choices = ["YOLOXNano", "YOLOXTiny", "YOLOXS", "YOLOXM", "YOLOXL", "YOLOXX"] - -description = "Demo for YOLOX(Object Detection). Models are YOLOXNano - YOLOXX,with rear camera. Find original repo at https://huggingface.co/spaces/Sultannn/YOLOX-Demo" - -article = "YOLOX is an anchor-free version of YOLO, with a simpler design but better performance!

    Untuk penjelasan lihat di repo ku 😁

    " - -gr.Interface.load( - "spaces/Sultannn/YOLOX-Demo", - inputs=[ - gr.inputs.Image(label="Input Image", source="webcam"), - gr.inputs.Dropdown(choices= choices, type="value", default='YOLOXS', label="Model") - ], - article = article, - description = description, - theme = "default", -).launch(), - \ No newline at end of file diff --git a/spaces/Sumit7864/Image-Enhancer/tests/test_discriminator_arch.py b/spaces/Sumit7864/Image-Enhancer/tests/test_discriminator_arch.py deleted file mode 100644 index c56a40c7743630aa63b3e99bca8dc1a85949c4c5..0000000000000000000000000000000000000000 --- a/spaces/Sumit7864/Image-Enhancer/tests/test_discriminator_arch.py +++ /dev/null @@ -1,19 +0,0 @@ -import torch - -from realesrgan.archs.discriminator_arch import UNetDiscriminatorSN - - -def test_unetdiscriminatorsn(): - """Test arch: UNetDiscriminatorSN.""" - - # model init and forward (cpu) - net = UNetDiscriminatorSN(num_in_ch=3, num_feat=4, skip_connection=True) - img = torch.rand((1, 3, 32, 32), dtype=torch.float32) - output = net(img) - assert output.shape == (1, 1, 32, 32) - - # model init and forward (gpu) - if torch.cuda.is_available(): - net.cuda() - output = net(img.cuda()) - assert output.shape == (1, 1, 32, 32) diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/utils/tokenutil.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/utils/tokenutil.py deleted file mode 100644 index 697d2b504a1b44cd5deee4a95733af4fd76ccd62..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/utils/tokenutil.py +++ /dev/null @@ -1,127 +0,0 @@ -"""Token-related utilities""" - -# Copyright (c) IPython Development Team. -# Distributed under the terms of the Modified BSD License. - -from collections import namedtuple -from io import StringIO -from keyword import iskeyword - -import tokenize - - -Token = namedtuple('Token', ['token', 'text', 'start', 'end', 'line']) - -def generate_tokens(readline): - """wrap generate_tokens to catch EOF errors""" - try: - for token in tokenize.generate_tokens(readline): - yield token - except tokenize.TokenError: - # catch EOF error - return - -def line_at_cursor(cell, cursor_pos=0): - """Return the line in a cell at a given cursor position - - Used for calling line-based APIs that don't support multi-line input, yet. - - Parameters - ---------- - cell : str - multiline block of text - cursor_pos : integer - the cursor position - - Returns - ------- - (line, offset): (string, integer) - The line with the current cursor, and the character offset of the start of the line. - """ - offset = 0 - lines = cell.splitlines(True) - for line in lines: - next_offset = offset + len(line) - if not line.endswith('\n'): - # If the last line doesn't have a trailing newline, treat it as if - # it does so that the cursor at the end of the line still counts - # as being on that line. - next_offset += 1 - if next_offset > cursor_pos: - break - offset = next_offset - else: - line = "" - return (line, offset) - -def token_at_cursor(cell, cursor_pos=0): - """Get the token at a given cursor - - Used for introspection. - - Function calls are prioritized, so the token for the callable will be returned - if the cursor is anywhere inside the call. - - Parameters - ---------- - cell : unicode - A block of Python code - cursor_pos : int - The location of the cursor in the block where the token should be found - """ - names = [] - tokens = [] - call_names = [] - - offsets = {1: 0} # lines start at 1 - for tup in generate_tokens(StringIO(cell).readline): - - tok = Token(*tup) - - # token, text, start, end, line = tup - start_line, start_col = tok.start - end_line, end_col = tok.end - if end_line + 1 not in offsets: - # keep track of offsets for each line - lines = tok.line.splitlines(True) - for lineno, line in enumerate(lines, start_line + 1): - if lineno not in offsets: - offsets[lineno] = offsets[lineno-1] + len(line) - - offset = offsets[start_line] - # allow '|foo' to find 'foo' at the beginning of a line - boundary = cursor_pos + 1 if start_col == 0 else cursor_pos - if offset + start_col >= boundary: - # current token starts after the cursor, - # don't consume it - break - - if tok.token == tokenize.NAME and not iskeyword(tok.text): - if names and tokens and tokens[-1].token == tokenize.OP and tokens[-1].text == '.': - names[-1] = "%s.%s" % (names[-1], tok.text) - else: - names.append(tok.text) - elif tok.token == tokenize.OP: - if tok.text == '=' and names: - # don't inspect the lhs of an assignment - names.pop(-1) - if tok.text == '(' and names: - # if we are inside a function call, inspect the function - call_names.append(names[-1]) - elif tok.text == ')' and call_names: - call_names.pop(-1) - - tokens.append(tok) - - if offsets[end_line] + end_col > cursor_pos: - # we found the cursor, stop reading - break - - if call_names: - return call_names[-1] - elif names: - return names[-1] - else: - return '' - - diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/_yaml/__init__.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/_yaml/__init__.py deleted file mode 100644 index 7baa8c4b68127d5cdf0be9a799429e61347c2694..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/_yaml/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# This is a stub package designed to roughly emulate the _yaml -# extension module, which previously existed as a standalone module -# and has been moved into the `yaml` package namespace. -# It does not perfectly mimic its old counterpart, but should get -# close enough for anyone who's relying on it even when they shouldn't. -import yaml - -# in some circumstances, the yaml module we imoprted may be from a different version, so we need -# to tread carefully when poking at it here (it may not have the attributes we expect) -if not getattr(yaml, '__with_libyaml__', False): - from sys import version_info - - exc = ModuleNotFoundError if version_info >= (3, 6) else ImportError - raise exc("No module named '_yaml'") -else: - from yaml._yaml import * - import warnings - warnings.warn( - 'The _yaml extension module is now located at yaml._yaml' - ' and its location is subject to change. To use the' - ' LibYAML-based parser and emitter, import from `yaml`:' - ' `from yaml import CLoader as Loader, CDumper as Dumper`.', - DeprecationWarning - ) - del warnings - # Don't `del yaml` here because yaml is actually an existing - # namespace member of _yaml. - -__name__ = '_yaml' -# If the module is top-level (i.e. not a part of any specific package) -# then the attribute should be set to ''. -# https://docs.python.org/3.8/library/types.html -__package__ = '' diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/aiohttp/streams.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/aiohttp/streams.py deleted file mode 100644 index 726b02326f66d37b9de1947cb78470479a7bc82b..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/aiohttp/streams.py +++ /dev/null @@ -1,660 +0,0 @@ -import asyncio -import collections -import warnings -from typing import Awaitable, Callable, Deque, Generic, List, Optional, Tuple, TypeVar - -from .base_protocol import BaseProtocol -from .helpers import BaseTimerContext, set_exception, set_result -from .log import internal_logger -from .typedefs import Final - -__all__ = ( - "EMPTY_PAYLOAD", - "EofStream", - "StreamReader", - "DataQueue", - "FlowControlDataQueue", -) - -_T = TypeVar("_T") - - -class EofStream(Exception): - """eof stream indication.""" - - -class AsyncStreamIterator(Generic[_T]): - def __init__(self, read_func: Callable[[], Awaitable[_T]]) -> None: - self.read_func = read_func - - def __aiter__(self) -> "AsyncStreamIterator[_T]": - return self - - async def __anext__(self) -> _T: - try: - rv = await self.read_func() - except EofStream: - raise StopAsyncIteration - if rv == b"": - raise StopAsyncIteration - return rv - - -class ChunkTupleAsyncStreamIterator: - def __init__(self, stream: "StreamReader") -> None: - self._stream = stream - - def __aiter__(self) -> "ChunkTupleAsyncStreamIterator": - return self - - async def __anext__(self) -> Tuple[bytes, bool]: - rv = await self._stream.readchunk() - if rv == (b"", False): - raise StopAsyncIteration - return rv - - -class AsyncStreamReaderMixin: - def __aiter__(self) -> AsyncStreamIterator[bytes]: - return AsyncStreamIterator(self.readline) # type: ignore[attr-defined] - - def iter_chunked(self, n: int) -> AsyncStreamIterator[bytes]: - """Returns an asynchronous iterator that yields chunks of size n. - - Python-3.5 available for Python 3.5+ only - """ - return AsyncStreamIterator( - lambda: self.read(n) # type: ignore[attr-defined,no-any-return] - ) - - def iter_any(self) -> AsyncStreamIterator[bytes]: - """Yield all available data as soon as it is received. - - Python-3.5 available for Python 3.5+ only - """ - return AsyncStreamIterator(self.readany) # type: ignore[attr-defined] - - def iter_chunks(self) -> ChunkTupleAsyncStreamIterator: - """Yield chunks of data as they are received by the server. - - The yielded objects are tuples - of (bytes, bool) as returned by the StreamReader.readchunk method. - - Python-3.5 available for Python 3.5+ only - """ - return ChunkTupleAsyncStreamIterator(self) # type: ignore[arg-type] - - -class StreamReader(AsyncStreamReaderMixin): - """An enhancement of asyncio.StreamReader. - - Supports asynchronous iteration by line, chunk or as available:: - - async for line in reader: - ... - async for chunk in reader.iter_chunked(1024): - ... - async for slice in reader.iter_any(): - ... - - """ - - total_bytes = 0 - - def __init__( - self, - protocol: BaseProtocol, - limit: int, - *, - timer: Optional[BaseTimerContext] = None, - loop: Optional[asyncio.AbstractEventLoop] = None, - ) -> None: - self._protocol = protocol - self._low_water = limit - self._high_water = limit * 2 - if loop is None: - loop = asyncio.get_event_loop() - self._loop = loop - self._size = 0 - self._cursor = 0 - self._http_chunk_splits: Optional[List[int]] = None - self._buffer: Deque[bytes] = collections.deque() - self._buffer_offset = 0 - self._eof = False - self._waiter: Optional[asyncio.Future[None]] = None - self._eof_waiter: Optional[asyncio.Future[None]] = None - self._exception: Optional[BaseException] = None - self._timer = timer - self._eof_callbacks: List[Callable[[], None]] = [] - - def __repr__(self) -> str: - info = [self.__class__.__name__] - if self._size: - info.append("%d bytes" % self._size) - if self._eof: - info.append("eof") - if self._low_water != 2**16: # default limit - info.append("low=%d high=%d" % (self._low_water, self._high_water)) - if self._waiter: - info.append("w=%r" % self._waiter) - if self._exception: - info.append("e=%r" % self._exception) - return "<%s>" % " ".join(info) - - def get_read_buffer_limits(self) -> Tuple[int, int]: - return (self._low_water, self._high_water) - - def exception(self) -> Optional[BaseException]: - return self._exception - - def set_exception(self, exc: BaseException) -> None: - self._exception = exc - self._eof_callbacks.clear() - - waiter = self._waiter - if waiter is not None: - self._waiter = None - set_exception(waiter, exc) - - waiter = self._eof_waiter - if waiter is not None: - self._eof_waiter = None - set_exception(waiter, exc) - - def on_eof(self, callback: Callable[[], None]) -> None: - if self._eof: - try: - callback() - except Exception: - internal_logger.exception("Exception in eof callback") - else: - self._eof_callbacks.append(callback) - - def feed_eof(self) -> None: - self._eof = True - - waiter = self._waiter - if waiter is not None: - self._waiter = None - set_result(waiter, None) - - waiter = self._eof_waiter - if waiter is not None: - self._eof_waiter = None - set_result(waiter, None) - - for cb in self._eof_callbacks: - try: - cb() - except Exception: - internal_logger.exception("Exception in eof callback") - - self._eof_callbacks.clear() - - def is_eof(self) -> bool: - """Return True if 'feed_eof' was called.""" - return self._eof - - def at_eof(self) -> bool: - """Return True if the buffer is empty and 'feed_eof' was called.""" - return self._eof and not self._buffer - - async def wait_eof(self) -> None: - if self._eof: - return - - assert self._eof_waiter is None - self._eof_waiter = self._loop.create_future() - try: - await self._eof_waiter - finally: - self._eof_waiter = None - - def unread_data(self, data: bytes) -> None: - """rollback reading some data from stream, inserting it to buffer head.""" - warnings.warn( - "unread_data() is deprecated " - "and will be removed in future releases (#3260)", - DeprecationWarning, - stacklevel=2, - ) - if not data: - return - - if self._buffer_offset: - self._buffer[0] = self._buffer[0][self._buffer_offset :] - self._buffer_offset = 0 - self._size += len(data) - self._cursor -= len(data) - self._buffer.appendleft(data) - self._eof_counter = 0 - - # TODO: size is ignored, remove the param later - def feed_data(self, data: bytes, size: int = 0) -> None: - assert not self._eof, "feed_data after feed_eof" - - if not data: - return - - self._size += len(data) - self._buffer.append(data) - self.total_bytes += len(data) - - waiter = self._waiter - if waiter is not None: - self._waiter = None - set_result(waiter, None) - - if self._size > self._high_water and not self._protocol._reading_paused: - self._protocol.pause_reading() - - def begin_http_chunk_receiving(self) -> None: - if self._http_chunk_splits is None: - if self.total_bytes: - raise RuntimeError( - "Called begin_http_chunk_receiving when" "some data was already fed" - ) - self._http_chunk_splits = [] - - def end_http_chunk_receiving(self) -> None: - if self._http_chunk_splits is None: - raise RuntimeError( - "Called end_chunk_receiving without calling " - "begin_chunk_receiving first" - ) - - # self._http_chunk_splits contains logical byte offsets from start of - # the body transfer. Each offset is the offset of the end of a chunk. - # "Logical" means bytes, accessible for a user. - # If no chunks containig logical data were received, current position - # is difinitely zero. - pos = self._http_chunk_splits[-1] if self._http_chunk_splits else 0 - - if self.total_bytes == pos: - # We should not add empty chunks here. So we check for that. - # Note, when chunked + gzip is used, we can receive a chunk - # of compressed data, but that data may not be enough for gzip FSM - # to yield any uncompressed data. That's why current position may - # not change after receiving a chunk. - return - - self._http_chunk_splits.append(self.total_bytes) - - # wake up readchunk when end of http chunk received - waiter = self._waiter - if waiter is not None: - self._waiter = None - set_result(waiter, None) - - async def _wait(self, func_name: str) -> None: - # StreamReader uses a future to link the protocol feed_data() method - # to a read coroutine. Running two read coroutines at the same time - # would have an unexpected behaviour. It would not possible to know - # which coroutine would get the next data. - if self._waiter is not None: - raise RuntimeError( - "%s() called while another coroutine is " - "already waiting for incoming data" % func_name - ) - - waiter = self._waiter = self._loop.create_future() - try: - if self._timer: - with self._timer: - await waiter - else: - await waiter - finally: - self._waiter = None - - async def readline(self) -> bytes: - return await self.readuntil() - - async def readuntil(self, separator: bytes = b"\n") -> bytes: - seplen = len(separator) - if seplen == 0: - raise ValueError("Separator should be at least one-byte string") - - if self._exception is not None: - raise self._exception - - chunk = b"" - chunk_size = 0 - not_enough = True - - while not_enough: - while self._buffer and not_enough: - offset = self._buffer_offset - ichar = self._buffer[0].find(separator, offset) + 1 - # Read from current offset to found separator or to the end. - data = self._read_nowait_chunk(ichar - offset if ichar else -1) - chunk += data - chunk_size += len(data) - if ichar: - not_enough = False - - if chunk_size > self._high_water: - raise ValueError("Chunk too big") - - if self._eof: - break - - if not_enough: - await self._wait("readuntil") - - return chunk - - async def read(self, n: int = -1) -> bytes: - if self._exception is not None: - raise self._exception - - # migration problem; with DataQueue you have to catch - # EofStream exception, so common way is to run payload.read() inside - # infinite loop. what can cause real infinite loop with StreamReader - # lets keep this code one major release. - if __debug__: - if self._eof and not self._buffer: - self._eof_counter = getattr(self, "_eof_counter", 0) + 1 - if self._eof_counter > 5: - internal_logger.warning( - "Multiple access to StreamReader in eof state, " - "might be infinite loop.", - stack_info=True, - ) - - if not n: - return b"" - - if n < 0: - # This used to just loop creating a new waiter hoping to - # collect everything in self._buffer, but that would - # deadlock if the subprocess sends more than self.limit - # bytes. So just call self.readany() until EOF. - blocks = [] - while True: - block = await self.readany() - if not block: - break - blocks.append(block) - return b"".join(blocks) - - # TODO: should be `if` instead of `while` - # because waiter maybe triggered on chunk end, - # without feeding any data - while not self._buffer and not self._eof: - await self._wait("read") - - return self._read_nowait(n) - - async def readany(self) -> bytes: - if self._exception is not None: - raise self._exception - - # TODO: should be `if` instead of `while` - # because waiter maybe triggered on chunk end, - # without feeding any data - while not self._buffer and not self._eof: - await self._wait("readany") - - return self._read_nowait(-1) - - async def readchunk(self) -> Tuple[bytes, bool]: - """Returns a tuple of (data, end_of_http_chunk). - - When chunked transfer - encoding is used, end_of_http_chunk is a boolean indicating if the end - of the data corresponds to the end of a HTTP chunk , otherwise it is - always False. - """ - while True: - if self._exception is not None: - raise self._exception - - while self._http_chunk_splits: - pos = self._http_chunk_splits.pop(0) - if pos == self._cursor: - return (b"", True) - if pos > self._cursor: - return (self._read_nowait(pos - self._cursor), True) - internal_logger.warning( - "Skipping HTTP chunk end due to data " - "consumption beyond chunk boundary" - ) - - if self._buffer: - return (self._read_nowait_chunk(-1), False) - # return (self._read_nowait(-1), False) - - if self._eof: - # Special case for signifying EOF. - # (b'', True) is not a final return value actually. - return (b"", False) - - await self._wait("readchunk") - - async def readexactly(self, n: int) -> bytes: - if self._exception is not None: - raise self._exception - - blocks: List[bytes] = [] - while n > 0: - block = await self.read(n) - if not block: - partial = b"".join(blocks) - raise asyncio.IncompleteReadError(partial, len(partial) + n) - blocks.append(block) - n -= len(block) - - return b"".join(blocks) - - def read_nowait(self, n: int = -1) -> bytes: - # default was changed to be consistent with .read(-1) - # - # I believe the most users don't know about the method and - # they are not affected. - if self._exception is not None: - raise self._exception - - if self._waiter and not self._waiter.done(): - raise RuntimeError( - "Called while some coroutine is waiting for incoming data." - ) - - return self._read_nowait(n) - - def _read_nowait_chunk(self, n: int) -> bytes: - first_buffer = self._buffer[0] - offset = self._buffer_offset - if n != -1 and len(first_buffer) - offset > n: - data = first_buffer[offset : offset + n] - self._buffer_offset += n - - elif offset: - self._buffer.popleft() - data = first_buffer[offset:] - self._buffer_offset = 0 - - else: - data = self._buffer.popleft() - - self._size -= len(data) - self._cursor += len(data) - - chunk_splits = self._http_chunk_splits - # Prevent memory leak: drop useless chunk splits - while chunk_splits and chunk_splits[0] < self._cursor: - chunk_splits.pop(0) - - if self._size < self._low_water and self._protocol._reading_paused: - self._protocol.resume_reading() - return data - - def _read_nowait(self, n: int) -> bytes: - """Read not more than n bytes, or whole buffer if n == -1""" - chunks = [] - - while self._buffer: - chunk = self._read_nowait_chunk(n) - chunks.append(chunk) - if n != -1: - n -= len(chunk) - if n == 0: - break - - return b"".join(chunks) if chunks else b"" - - -class EmptyStreamReader(StreamReader): # lgtm [py/missing-call-to-init] - def __init__(self) -> None: - pass - - def exception(self) -> Optional[BaseException]: - return None - - def set_exception(self, exc: BaseException) -> None: - pass - - def on_eof(self, callback: Callable[[], None]) -> None: - try: - callback() - except Exception: - internal_logger.exception("Exception in eof callback") - - def feed_eof(self) -> None: - pass - - def is_eof(self) -> bool: - return True - - def at_eof(self) -> bool: - return True - - async def wait_eof(self) -> None: - return - - def feed_data(self, data: bytes, n: int = 0) -> None: - pass - - async def readline(self) -> bytes: - return b"" - - async def read(self, n: int = -1) -> bytes: - return b"" - - # TODO add async def readuntil - - async def readany(self) -> bytes: - return b"" - - async def readchunk(self) -> Tuple[bytes, bool]: - return (b"", True) - - async def readexactly(self, n: int) -> bytes: - raise asyncio.IncompleteReadError(b"", n) - - def read_nowait(self, n: int = -1) -> bytes: - return b"" - - -EMPTY_PAYLOAD: Final[StreamReader] = EmptyStreamReader() - - -class DataQueue(Generic[_T]): - """DataQueue is a general-purpose blocking queue with one reader.""" - - def __init__(self, loop: asyncio.AbstractEventLoop) -> None: - self._loop = loop - self._eof = False - self._waiter: Optional[asyncio.Future[None]] = None - self._exception: Optional[BaseException] = None - self._size = 0 - self._buffer: Deque[Tuple[_T, int]] = collections.deque() - - def __len__(self) -> int: - return len(self._buffer) - - def is_eof(self) -> bool: - return self._eof - - def at_eof(self) -> bool: - return self._eof and not self._buffer - - def exception(self) -> Optional[BaseException]: - return self._exception - - def set_exception(self, exc: BaseException) -> None: - self._eof = True - self._exception = exc - - waiter = self._waiter - if waiter is not None: - self._waiter = None - set_exception(waiter, exc) - - def feed_data(self, data: _T, size: int = 0) -> None: - self._size += size - self._buffer.append((data, size)) - - waiter = self._waiter - if waiter is not None: - self._waiter = None - set_result(waiter, None) - - def feed_eof(self) -> None: - self._eof = True - - waiter = self._waiter - if waiter is not None: - self._waiter = None - set_result(waiter, None) - - async def read(self) -> _T: - if not self._buffer and not self._eof: - assert not self._waiter - self._waiter = self._loop.create_future() - try: - await self._waiter - except (asyncio.CancelledError, asyncio.TimeoutError): - self._waiter = None - raise - - if self._buffer: - data, size = self._buffer.popleft() - self._size -= size - return data - else: - if self._exception is not None: - raise self._exception - else: - raise EofStream - - def __aiter__(self) -> AsyncStreamIterator[_T]: - return AsyncStreamIterator(self.read) - - -class FlowControlDataQueue(DataQueue[_T]): - """FlowControlDataQueue resumes and pauses an underlying stream. - - It is a destination for parsed data. - """ - - def __init__( - self, protocol: BaseProtocol, limit: int, *, loop: asyncio.AbstractEventLoop - ) -> None: - super().__init__(loop=loop) - - self._protocol = protocol - self._limit = limit * 2 - - def feed_data(self, data: _T, size: int = 0) -> None: - super().feed_data(data, size) - - if self._size > self._limit and not self._protocol._reading_paused: - self._protocol.pause_reading() - - async def read(self) -> _T: - try: - return await super().read() - finally: - if self._size < self._limit and self._protocol._reading_paused: - self._protocol.resume_reading() diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/click/shell_completion.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/click/shell_completion.py deleted file mode 100644 index c17a8e643c6e3bf281819b0116017a97bdf6c997..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/click/shell_completion.py +++ /dev/null @@ -1,580 +0,0 @@ -import os -import re -import typing as t -from gettext import gettext as _ - -from .core import Argument -from .core import BaseCommand -from .core import Context -from .core import MultiCommand -from .core import Option -from .core import Parameter -from .core import ParameterSource -from .parser import split_arg_string -from .utils import echo - - -def shell_complete( - cli: BaseCommand, - ctx_args: t.Dict[str, t.Any], - prog_name: str, - complete_var: str, - instruction: str, -) -> int: - """Perform shell completion for the given CLI program. - - :param cli: Command being called. - :param ctx_args: Extra arguments to pass to - ``cli.make_context``. - :param prog_name: Name of the executable in the shell. - :param complete_var: Name of the environment variable that holds - the completion instruction. - :param instruction: Value of ``complete_var`` with the completion - instruction and shell, in the form ``instruction_shell``. - :return: Status code to exit with. - """ - shell, _, instruction = instruction.partition("_") - comp_cls = get_completion_class(shell) - - if comp_cls is None: - return 1 - - comp = comp_cls(cli, ctx_args, prog_name, complete_var) - - if instruction == "source": - echo(comp.source()) - return 0 - - if instruction == "complete": - echo(comp.complete()) - return 0 - - return 1 - - -class CompletionItem: - """Represents a completion value and metadata about the value. The - default metadata is ``type`` to indicate special shell handling, - and ``help`` if a shell supports showing a help string next to the - value. - - Arbitrary parameters can be passed when creating the object, and - accessed using ``item.attr``. If an attribute wasn't passed, - accessing it returns ``None``. - - :param value: The completion suggestion. - :param type: Tells the shell script to provide special completion - support for the type. Click uses ``"dir"`` and ``"file"``. - :param help: String shown next to the value if supported. - :param kwargs: Arbitrary metadata. The built-in implementations - don't use this, but custom type completions paired with custom - shell support could use it. - """ - - __slots__ = ("value", "type", "help", "_info") - - def __init__( - self, - value: t.Any, - type: str = "plain", - help: t.Optional[str] = None, - **kwargs: t.Any, - ) -> None: - self.value = value - self.type = type - self.help = help - self._info = kwargs - - def __getattr__(self, name: str) -> t.Any: - return self._info.get(name) - - -# Only Bash >= 4.4 has the nosort option. -_SOURCE_BASH = """\ -%(complete_func)s() { - local IFS=$'\\n' - local response - - response=$(env COMP_WORDS="${COMP_WORDS[*]}" COMP_CWORD=$COMP_CWORD \ -%(complete_var)s=bash_complete $1) - - for completion in $response; do - IFS=',' read type value <<< "$completion" - - if [[ $type == 'dir' ]]; then - COMPREPLY=() - compopt -o dirnames - elif [[ $type == 'file' ]]; then - COMPREPLY=() - compopt -o default - elif [[ $type == 'plain' ]]; then - COMPREPLY+=($value) - fi - done - - return 0 -} - -%(complete_func)s_setup() { - complete -o nosort -F %(complete_func)s %(prog_name)s -} - -%(complete_func)s_setup; -""" - -_SOURCE_ZSH = """\ -#compdef %(prog_name)s - -%(complete_func)s() { - local -a completions - local -a completions_with_descriptions - local -a response - (( ! $+commands[%(prog_name)s] )) && return 1 - - response=("${(@f)$(env COMP_WORDS="${words[*]}" COMP_CWORD=$((CURRENT-1)) \ -%(complete_var)s=zsh_complete %(prog_name)s)}") - - for type key descr in ${response}; do - if [[ "$type" == "plain" ]]; then - if [[ "$descr" == "_" ]]; then - completions+=("$key") - else - completions_with_descriptions+=("$key":"$descr") - fi - elif [[ "$type" == "dir" ]]; then - _path_files -/ - elif [[ "$type" == "file" ]]; then - _path_files -f - fi - done - - if [ -n "$completions_with_descriptions" ]; then - _describe -V unsorted completions_with_descriptions -U - fi - - if [ -n "$completions" ]; then - compadd -U -V unsorted -a completions - fi -} - -compdef %(complete_func)s %(prog_name)s; -""" - -_SOURCE_FISH = """\ -function %(complete_func)s; - set -l response; - - for value in (env %(complete_var)s=fish_complete COMP_WORDS=(commandline -cp) \ -COMP_CWORD=(commandline -t) %(prog_name)s); - set response $response $value; - end; - - for completion in $response; - set -l metadata (string split "," $completion); - - if test $metadata[1] = "dir"; - __fish_complete_directories $metadata[2]; - else if test $metadata[1] = "file"; - __fish_complete_path $metadata[2]; - else if test $metadata[1] = "plain"; - echo $metadata[2]; - end; - end; -end; - -complete --no-files --command %(prog_name)s --arguments \ -"(%(complete_func)s)"; -""" - - -class ShellComplete: - """Base class for providing shell completion support. A subclass for - a given shell will override attributes and methods to implement the - completion instructions (``source`` and ``complete``). - - :param cli: Command being called. - :param prog_name: Name of the executable in the shell. - :param complete_var: Name of the environment variable that holds - the completion instruction. - - .. versionadded:: 8.0 - """ - - name: t.ClassVar[str] - """Name to register the shell as with :func:`add_completion_class`. - This is used in completion instructions (``{name}_source`` and - ``{name}_complete``). - """ - - source_template: t.ClassVar[str] - """Completion script template formatted by :meth:`source`. This must - be provided by subclasses. - """ - - def __init__( - self, - cli: BaseCommand, - ctx_args: t.Dict[str, t.Any], - prog_name: str, - complete_var: str, - ) -> None: - self.cli = cli - self.ctx_args = ctx_args - self.prog_name = prog_name - self.complete_var = complete_var - - @property - def func_name(self) -> str: - """The name of the shell function defined by the completion - script. - """ - safe_name = re.sub(r"\W*", "", self.prog_name.replace("-", "_"), re.ASCII) - return f"_{safe_name}_completion" - - def source_vars(self) -> t.Dict[str, t.Any]: - """Vars for formatting :attr:`source_template`. - - By default this provides ``complete_func``, ``complete_var``, - and ``prog_name``. - """ - return { - "complete_func": self.func_name, - "complete_var": self.complete_var, - "prog_name": self.prog_name, - } - - def source(self) -> str: - """Produce the shell script that defines the completion - function. By default this ``%``-style formats - :attr:`source_template` with the dict returned by - :meth:`source_vars`. - """ - return self.source_template % self.source_vars() - - def get_completion_args(self) -> t.Tuple[t.List[str], str]: - """Use the env vars defined by the shell script to return a - tuple of ``args, incomplete``. This must be implemented by - subclasses. - """ - raise NotImplementedError - - def get_completions( - self, args: t.List[str], incomplete: str - ) -> t.List[CompletionItem]: - """Determine the context and last complete command or parameter - from the complete args. Call that object's ``shell_complete`` - method to get the completions for the incomplete value. - - :param args: List of complete args before the incomplete value. - :param incomplete: Value being completed. May be empty. - """ - ctx = _resolve_context(self.cli, self.ctx_args, self.prog_name, args) - obj, incomplete = _resolve_incomplete(ctx, args, incomplete) - return obj.shell_complete(ctx, incomplete) - - def format_completion(self, item: CompletionItem) -> str: - """Format a completion item into the form recognized by the - shell script. This must be implemented by subclasses. - - :param item: Completion item to format. - """ - raise NotImplementedError - - def complete(self) -> str: - """Produce the completion data to send back to the shell. - - By default this calls :meth:`get_completion_args`, gets the - completions, then calls :meth:`format_completion` for each - completion. - """ - args, incomplete = self.get_completion_args() - completions = self.get_completions(args, incomplete) - out = [self.format_completion(item) for item in completions] - return "\n".join(out) - - -class BashComplete(ShellComplete): - """Shell completion for Bash.""" - - name = "bash" - source_template = _SOURCE_BASH - - def _check_version(self) -> None: - import subprocess - - output = subprocess.run( - ["bash", "-c", "echo ${BASH_VERSION}"], stdout=subprocess.PIPE - ) - match = re.search(r"^(\d+)\.(\d+)\.\d+", output.stdout.decode()) - - if match is not None: - major, minor = match.groups() - - if major < "4" or major == "4" and minor < "4": - raise RuntimeError( - _( - "Shell completion is not supported for Bash" - " versions older than 4.4." - ) - ) - else: - raise RuntimeError( - _("Couldn't detect Bash version, shell completion is not supported.") - ) - - def source(self) -> str: - self._check_version() - return super().source() - - def get_completion_args(self) -> t.Tuple[t.List[str], str]: - cwords = split_arg_string(os.environ["COMP_WORDS"]) - cword = int(os.environ["COMP_CWORD"]) - args = cwords[1:cword] - - try: - incomplete = cwords[cword] - except IndexError: - incomplete = "" - - return args, incomplete - - def format_completion(self, item: CompletionItem) -> str: - return f"{item.type},{item.value}" - - -class ZshComplete(ShellComplete): - """Shell completion for Zsh.""" - - name = "zsh" - source_template = _SOURCE_ZSH - - def get_completion_args(self) -> t.Tuple[t.List[str], str]: - cwords = split_arg_string(os.environ["COMP_WORDS"]) - cword = int(os.environ["COMP_CWORD"]) - args = cwords[1:cword] - - try: - incomplete = cwords[cword] - except IndexError: - incomplete = "" - - return args, incomplete - - def format_completion(self, item: CompletionItem) -> str: - return f"{item.type}\n{item.value}\n{item.help if item.help else '_'}" - - -class FishComplete(ShellComplete): - """Shell completion for Fish.""" - - name = "fish" - source_template = _SOURCE_FISH - - def get_completion_args(self) -> t.Tuple[t.List[str], str]: - cwords = split_arg_string(os.environ["COMP_WORDS"]) - incomplete = os.environ["COMP_CWORD"] - args = cwords[1:] - - # Fish stores the partial word in both COMP_WORDS and - # COMP_CWORD, remove it from complete args. - if incomplete and args and args[-1] == incomplete: - args.pop() - - return args, incomplete - - def format_completion(self, item: CompletionItem) -> str: - if item.help: - return f"{item.type},{item.value}\t{item.help}" - - return f"{item.type},{item.value}" - - -_available_shells: t.Dict[str, t.Type[ShellComplete]] = { - "bash": BashComplete, - "fish": FishComplete, - "zsh": ZshComplete, -} - - -def add_completion_class( - cls: t.Type[ShellComplete], name: t.Optional[str] = None -) -> None: - """Register a :class:`ShellComplete` subclass under the given name. - The name will be provided by the completion instruction environment - variable during completion. - - :param cls: The completion class that will handle completion for the - shell. - :param name: Name to register the class under. Defaults to the - class's ``name`` attribute. - """ - if name is None: - name = cls.name - - _available_shells[name] = cls - - -def get_completion_class(shell: str) -> t.Optional[t.Type[ShellComplete]]: - """Look up a registered :class:`ShellComplete` subclass by the name - provided by the completion instruction environment variable. If the - name isn't registered, returns ``None``. - - :param shell: Name the class is registered under. - """ - return _available_shells.get(shell) - - -def _is_incomplete_argument(ctx: Context, param: Parameter) -> bool: - """Determine if the given parameter is an argument that can still - accept values. - - :param ctx: Invocation context for the command represented by the - parsed complete args. - :param param: Argument object being checked. - """ - if not isinstance(param, Argument): - return False - - assert param.name is not None - value = ctx.params[param.name] - return ( - param.nargs == -1 - or ctx.get_parameter_source(param.name) is not ParameterSource.COMMANDLINE - or ( - param.nargs > 1 - and isinstance(value, (tuple, list)) - and len(value) < param.nargs - ) - ) - - -def _start_of_option(ctx: Context, value: str) -> bool: - """Check if the value looks like the start of an option.""" - if not value: - return False - - c = value[0] - return c in ctx._opt_prefixes - - -def _is_incomplete_option(ctx: Context, args: t.List[str], param: Parameter) -> bool: - """Determine if the given parameter is an option that needs a value. - - :param args: List of complete args before the incomplete value. - :param param: Option object being checked. - """ - if not isinstance(param, Option): - return False - - if param.is_flag or param.count: - return False - - last_option = None - - for index, arg in enumerate(reversed(args)): - if index + 1 > param.nargs: - break - - if _start_of_option(ctx, arg): - last_option = arg - - return last_option is not None and last_option in param.opts - - -def _resolve_context( - cli: BaseCommand, ctx_args: t.Dict[str, t.Any], prog_name: str, args: t.List[str] -) -> Context: - """Produce the context hierarchy starting with the command and - traversing the complete arguments. This only follows the commands, - it doesn't trigger input prompts or callbacks. - - :param cli: Command being called. - :param prog_name: Name of the executable in the shell. - :param args: List of complete args before the incomplete value. - """ - ctx_args["resilient_parsing"] = True - ctx = cli.make_context(prog_name, args.copy(), **ctx_args) - args = ctx.protected_args + ctx.args - - while args: - command = ctx.command - - if isinstance(command, MultiCommand): - if not command.chain: - name, cmd, args = command.resolve_command(ctx, args) - - if cmd is None: - return ctx - - ctx = cmd.make_context(name, args, parent=ctx, resilient_parsing=True) - args = ctx.protected_args + ctx.args - else: - while args: - name, cmd, args = command.resolve_command(ctx, args) - - if cmd is None: - return ctx - - sub_ctx = cmd.make_context( - name, - args, - parent=ctx, - allow_extra_args=True, - allow_interspersed_args=False, - resilient_parsing=True, - ) - args = sub_ctx.args - - ctx = sub_ctx - args = [*sub_ctx.protected_args, *sub_ctx.args] - else: - break - - return ctx - - -def _resolve_incomplete( - ctx: Context, args: t.List[str], incomplete: str -) -> t.Tuple[t.Union[BaseCommand, Parameter], str]: - """Find the Click object that will handle the completion of the - incomplete value. Return the object and the incomplete value. - - :param ctx: Invocation context for the command represented by - the parsed complete args. - :param args: List of complete args before the incomplete value. - :param incomplete: Value being completed. May be empty. - """ - # Different shells treat an "=" between a long option name and - # value differently. Might keep the value joined, return the "=" - # as a separate item, or return the split name and value. Always - # split and discard the "=" to make completion easier. - if incomplete == "=": - incomplete = "" - elif "=" in incomplete and _start_of_option(ctx, incomplete): - name, _, incomplete = incomplete.partition("=") - args.append(name) - - # The "--" marker tells Click to stop treating values as options - # even if they start with the option character. If it hasn't been - # given and the incomplete arg looks like an option, the current - # command will provide option name completions. - if "--" not in args and _start_of_option(ctx, incomplete): - return ctx.command, incomplete - - params = ctx.command.get_params(ctx) - - # If the last complete arg is an option name with an incomplete - # value, the option will provide value completions. - for param in params: - if _is_incomplete_option(ctx, args, param): - return param, incomplete - - # It's not an option name or value. The first argument without a - # parsed value will provide value completions. - for param in params: - if _is_incomplete_argument(ctx, param): - return param, incomplete - - # There were no unparsed arguments, the command may be a group that - # will provide command name completions. - return ctx.command, incomplete diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/array/doc_vec/__init__.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/array/doc_vec/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/documents/mesh/vertices_and_faces.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/documents/mesh/vertices_and_faces.py deleted file mode 100644 index 758f0acc6b0978d61695131e6611117c9ce0993c..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/documents/mesh/vertices_and_faces.py +++ /dev/null @@ -1,50 +0,0 @@ -from typing import TYPE_CHECKING, Any, Type, TypeVar, Union - -from docarray.base_doc import BaseDoc -from docarray.typing.tensor.tensor import AnyTensor -from docarray.utils._internal.misc import import_library - -T = TypeVar('T', bound='VerticesAndFaces') - - -class VerticesAndFaces(BaseDoc): - """ - Document for handling the tensor data of a [`Mesh3D`][docarray.documents.mesh.Mesh3D] object. - - A VerticesAndFaces Document can contain: - - - an [`AnyTensor`](../../../../api_references/typing/tensor/tensor) - containing the vertices information (`VerticesAndFaces.vertices`) - - an [`AnyTensor`](../../../../api_references/typing/tensor/tensor) - containing the faces information (`VerticesAndFaces.faces`) - """ - - vertices: AnyTensor - faces: AnyTensor - - @classmethod - def validate( - cls: Type[T], - value: Union[str, Any], - ) -> T: - return super().validate(value) - - def display(self) -> None: - """ - Plot mesh consisting of vertices and faces. - """ - if TYPE_CHECKING: - import trimesh - else: - trimesh = import_library('trimesh', raise_error=True) - - from IPython.display import display - - if self.vertices is None or self.faces is None: - raise ValueError( - 'Can\'t display mesh from tensors when the vertices and/or faces ' - 'are None.' - ) - - mesh = trimesh.Trimesh(vertices=self.vertices, faces=self.faces) - display(mesh.show()) diff --git a/spaces/Superlang/ImageProcessor/annotator/oneformer/oneformer/modeling/pixel_decoder/ops/modules/ms_deform_attn.py b/spaces/Superlang/ImageProcessor/annotator/oneformer/oneformer/modeling/pixel_decoder/ops/modules/ms_deform_attn.py deleted file mode 100644 index 5bc471d2da550c839a3446a6041e40d338425129..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/oneformer/oneformer/modeling/pixel_decoder/ops/modules/ms_deform_attn.py +++ /dev/null @@ -1,120 +0,0 @@ -# ------------------------------------------------------------------------------------------------ -# Deformable DETR -# Copyright (c) 2020 SenseTime. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------------------------------ -# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 -# ------------------------------------------------------------------------------------------------ - -# Copyright (c) Facebook, Inc. and its affiliates. -# Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR - -from __future__ import absolute_import -from __future__ import print_function -from __future__ import division - -import warnings -import math - -import torch -from torch import nn -import torch.nn.functional as F -from torch.nn.init import xavier_uniform_, constant_ - -MSDeformAttnFunction = None -from ..functions.ms_deform_attn_func import ms_deform_attn_core_pytorch - - -def _is_power_of_2(n): - if (not isinstance(n, int)) or (n < 0): - raise ValueError("invalid input for _is_power_of_2: {} (type: {})".format(n, type(n))) - return (n & (n-1) == 0) and n != 0 - - -class MSDeformAttn(nn.Module): - def __init__(self, d_model=256, n_levels=4, n_heads=8, n_points=4): - """ - Multi-Scale Deformable Attention Module - :param d_model hidden dimension - :param n_levels number of feature levels - :param n_heads number of attention heads - :param n_points number of sampling points per attention head per feature level - """ - super().__init__() - if d_model % n_heads != 0: - raise ValueError('d_model must be divisible by n_heads, but got {} and {}'.format(d_model, n_heads)) - _d_per_head = d_model // n_heads - # you'd better set _d_per_head to a power of 2 which is more efficient in our CUDA implementation - if not _is_power_of_2(_d_per_head): - warnings.warn("You'd better set d_model in MSDeformAttn to make the dimension of each attention head a power of 2 " - "which is more efficient in our CUDA implementation.") - - self.im2col_step = 128 - - self.d_model = d_model - self.n_levels = n_levels - self.n_heads = n_heads - self.n_points = n_points - - self.sampling_offsets = nn.Linear(d_model, n_heads * n_levels * n_points * 2) - self.attention_weights = nn.Linear(d_model, n_heads * n_levels * n_points) - self.value_proj = nn.Linear(d_model, d_model) - self.output_proj = nn.Linear(d_model, d_model) - - self._reset_parameters() - - def _reset_parameters(self): - constant_(self.sampling_offsets.weight.data, 0.) - thetas = torch.arange(self.n_heads, dtype=torch.float32) * (2.0 * math.pi / self.n_heads) - grid_init = torch.stack([thetas.cos(), thetas.sin()], -1) - grid_init = (grid_init / grid_init.abs().max(-1, keepdim=True)[0]).view(self.n_heads, 1, 1, 2).repeat(1, self.n_levels, self.n_points, 1) - for i in range(self.n_points): - grid_init[:, :, i, :] *= i + 1 - with torch.no_grad(): - self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1)) - constant_(self.attention_weights.weight.data, 0.) - constant_(self.attention_weights.bias.data, 0.) - xavier_uniform_(self.value_proj.weight.data) - constant_(self.value_proj.bias.data, 0.) - xavier_uniform_(self.output_proj.weight.data) - constant_(self.output_proj.bias.data, 0.) - - def forward(self, query, reference_points, input_flatten, input_spatial_shapes, input_level_start_index, input_padding_mask=None): - """ - :param query (N, Length_{query}, C) - :param reference_points (N, Length_{query}, n_levels, 2), range in [0, 1], top-left (0,0), bottom-right (1, 1), including padding area - or (N, Length_{query}, n_levels, 4), add additional (w, h) to form reference boxes - :param input_flatten (N, \sum_{l=0}^{L-1} H_l \cdot W_l, C) - :param input_spatial_shapes (n_levels, 2), [(H_0, W_0), (H_1, W_1), ..., (H_{L-1}, W_{L-1})] - :param input_level_start_index (n_levels, ), [0, H_0*W_0, H_0*W_0+H_1*W_1, H_0*W_0+H_1*W_1+H_2*W_2, ..., H_0*W_0+H_1*W_1+...+H_{L-1}*W_{L-1}] - :param input_padding_mask (N, \sum_{l=0}^{L-1} H_l \cdot W_l), True for padding elements, False for non-padding elements - :return output (N, Length_{query}, C) - """ - N, Len_q, _ = query.shape - N, Len_in, _ = input_flatten.shape - assert (input_spatial_shapes[:, 0] * input_spatial_shapes[:, 1]).sum() == Len_in - - value = self.value_proj(input_flatten) - if input_padding_mask is not None: - value = value.masked_fill(input_padding_mask[..., None], float(0)) - value = value.view(N, Len_in, self.n_heads, self.d_model // self.n_heads) - sampling_offsets = self.sampling_offsets(query).view(N, Len_q, self.n_heads, self.n_levels, self.n_points, 2) - attention_weights = self.attention_weights(query).view(N, Len_q, self.n_heads, self.n_levels * self.n_points) - attention_weights = F.softmax(attention_weights, -1).view(N, Len_q, self.n_heads, self.n_levels, self.n_points) - # N, Len_q, n_heads, n_levels, n_points, 2 - if reference_points.shape[-1] == 2: - offset_normalizer = torch.stack([input_spatial_shapes[..., 1], input_spatial_shapes[..., 0]], -1) - sampling_locations = reference_points[:, :, None, :, None, :] \ - + sampling_offsets / offset_normalizer[None, None, None, :, None, :] - elif reference_points.shape[-1] == 4: - sampling_locations = reference_points[:, :, None, :, None, :2] \ - + sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5 - else: - raise ValueError( - 'Last dim of reference_points must be 2 or 4, but get {} instead.'.format(reference_points.shape[-1])) - # try: - output = ms_deform_attn_core_pytorch(value, input_spatial_shapes, sampling_locations, attention_weights) - # # For FLOPs calculation only - # output = ms_deform_attn_core_pytorch(value, input_spatial_shapes, sampling_locations, attention_weights) - output = self.output_proj(output) - return output \ No newline at end of file diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/runner/hooks/hook.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/runner/hooks/hook.py deleted file mode 100644 index b8855c107727ecf85b917c890fc8b7f6359238a4..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/runner/hooks/hook.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from annotator.uniformer.mmcv.utils import Registry, is_method_overridden - -HOOKS = Registry('hook') - - -class Hook: - stages = ('before_run', 'before_train_epoch', 'before_train_iter', - 'after_train_iter', 'after_train_epoch', 'before_val_epoch', - 'before_val_iter', 'after_val_iter', 'after_val_epoch', - 'after_run') - - def before_run(self, runner): - pass - - def after_run(self, runner): - pass - - def before_epoch(self, runner): - pass - - def after_epoch(self, runner): - pass - - def before_iter(self, runner): - pass - - def after_iter(self, runner): - pass - - def before_train_epoch(self, runner): - self.before_epoch(runner) - - def before_val_epoch(self, runner): - self.before_epoch(runner) - - def after_train_epoch(self, runner): - self.after_epoch(runner) - - def after_val_epoch(self, runner): - self.after_epoch(runner) - - def before_train_iter(self, runner): - self.before_iter(runner) - - def before_val_iter(self, runner): - self.before_iter(runner) - - def after_train_iter(self, runner): - self.after_iter(runner) - - def after_val_iter(self, runner): - self.after_iter(runner) - - def every_n_epochs(self, runner, n): - return (runner.epoch + 1) % n == 0 if n > 0 else False - - def every_n_inner_iters(self, runner, n): - return (runner.inner_iter + 1) % n == 0 if n > 0 else False - - def every_n_iters(self, runner, n): - return (runner.iter + 1) % n == 0 if n > 0 else False - - def end_of_epoch(self, runner): - return runner.inner_iter + 1 == len(runner.data_loader) - - def is_last_epoch(self, runner): - return runner.epoch + 1 == runner._max_epochs - - def is_last_iter(self, runner): - return runner.iter + 1 == runner._max_iters - - def get_triggered_stages(self): - trigger_stages = set() - for stage in Hook.stages: - if is_method_overridden(stage, Hook, self): - trigger_stages.add(stage) - - # some methods will be triggered in multi stages - # use this dict to map method to stages. - method_stages_map = { - 'before_epoch': ['before_train_epoch', 'before_val_epoch'], - 'after_epoch': ['after_train_epoch', 'after_val_epoch'], - 'before_iter': ['before_train_iter', 'before_val_iter'], - 'after_iter': ['after_train_iter', 'after_val_iter'], - } - - for method, map_stages in method_stages_map.items(): - if is_method_overridden(method, Hook, self): - trigger_stages.update(map_stages) - - return [stage for stage in Hook.stages if stage in trigger_stages] diff --git a/spaces/SusiePHaltmann/GPT-DALL-X/app.py b/spaces/SusiePHaltmann/GPT-DALL-X/app.py deleted file mode 100644 index 2566cf4bb19a68f2b6e47173ed7d061a8978223c..0000000000000000000000000000000000000000 --- a/spaces/SusiePHaltmann/GPT-DALL-X/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("huggingface/osanseviero/dalle-mini-fork").launch() diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_vendor/tomli/_re.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_vendor/tomli/_re.py deleted file mode 100644 index 994bb7493fd92865e6ab87c277ba5741b44c31a9..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_vendor/tomli/_re.py +++ /dev/null @@ -1,107 +0,0 @@ -# SPDX-License-Identifier: MIT -# SPDX-FileCopyrightText: 2021 Taneli Hukkinen -# Licensed to PSF under a Contributor Agreement. - -from __future__ import annotations - -from datetime import date, datetime, time, timedelta, timezone, tzinfo -from functools import lru_cache -import re -from typing import Any - -from ._types import ParseFloat - -# E.g. -# - 00:32:00.999999 -# - 00:32:00 -_TIME_RE_STR = r"([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])(?:\.([0-9]{1,6})[0-9]*)?" - -RE_NUMBER = re.compile( - r""" -0 -(?: - x[0-9A-Fa-f](?:_?[0-9A-Fa-f])* # hex - | - b[01](?:_?[01])* # bin - | - o[0-7](?:_?[0-7])* # oct -) -| -[+-]?(?:0|[1-9](?:_?[0-9])*) # dec, integer part -(?P - (?:\.[0-9](?:_?[0-9])*)? # optional fractional part - (?:[eE][+-]?[0-9](?:_?[0-9])*)? # optional exponent part -) -""", - flags=re.VERBOSE, -) -RE_LOCALTIME = re.compile(_TIME_RE_STR) -RE_DATETIME = re.compile( - rf""" -([0-9]{{4}})-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01]) # date, e.g. 1988-10-27 -(?: - [Tt ] - {_TIME_RE_STR} - (?:([Zz])|([+-])([01][0-9]|2[0-3]):([0-5][0-9]))? # optional time offset -)? -""", - flags=re.VERBOSE, -) - - -def match_to_datetime(match: re.Match) -> datetime | date: - """Convert a `RE_DATETIME` match to `datetime.datetime` or `datetime.date`. - - Raises ValueError if the match does not correspond to a valid date - or datetime. - """ - ( - year_str, - month_str, - day_str, - hour_str, - minute_str, - sec_str, - micros_str, - zulu_time, - offset_sign_str, - offset_hour_str, - offset_minute_str, - ) = match.groups() - year, month, day = int(year_str), int(month_str), int(day_str) - if hour_str is None: - return date(year, month, day) - hour, minute, sec = int(hour_str), int(minute_str), int(sec_str) - micros = int(micros_str.ljust(6, "0")) if micros_str else 0 - if offset_sign_str: - tz: tzinfo | None = cached_tz( - offset_hour_str, offset_minute_str, offset_sign_str - ) - elif zulu_time: - tz = timezone.utc - else: # local date-time - tz = None - return datetime(year, month, day, hour, minute, sec, micros, tzinfo=tz) - - -@lru_cache(maxsize=None) -def cached_tz(hour_str: str, minute_str: str, sign_str: str) -> timezone: - sign = 1 if sign_str == "+" else -1 - return timezone( - timedelta( - hours=sign * int(hour_str), - minutes=sign * int(minute_str), - ) - ) - - -def match_to_localtime(match: re.Match) -> time: - hour_str, minute_str, sec_str, micros_str = match.groups() - micros = int(micros_str.ljust(6, "0")) if micros_str else 0 - return time(int(hour_str), int(minute_str), int(sec_str), micros) - - -def match_to_number(match: re.Match, parse_float: ParseFloat) -> Any: - if match.group("floatpart"): - return parse_float(match.group()) - return int(match.group(), 0) diff --git a/spaces/ThirdEyeData/Rogue_Component_Prediction/app.py b/spaces/ThirdEyeData/Rogue_Component_Prediction/app.py deleted file mode 100644 index 1b114fe381735293d38cdd1914c635a043f9c8f5..0000000000000000000000000000000000000000 --- a/spaces/ThirdEyeData/Rogue_Component_Prediction/app.py +++ /dev/null @@ -1,133 +0,0 @@ -import tensorflow as tf -from tensorflow import keras -import numpy as np -import matplotlib.pyplot as plt -import pandas as pd -from sklearn.model_selection import train_test_split -from sklearn import preprocessing -import seaborn as sns -from sklearn.preprocessing import LabelEncoder -import streamlit as st - -st.title("Rouge Component Prediction") -#Reading Dataset -df = pd.read_csv('identify_rogue_50K_ALL.csv') -print("Dataset Size:",df.shape) -st.sidebar.header('Enter the Components Details here') -# Dropping the SRU serial number -df.drop(['SRU serial number','Date of Manufacture','Last Maintenance Date','date of last failure'], axis = 1, inplace=True) - -# DATA from user -def user_report(): - manufacturer = st.sidebar.selectbox("Manufacturer", - ("JKL Company", "GHI Company","AGS Company","ABC Company","XYZ Company" )) - if manufacturer=='JKL Company': - manufacturer=3 - elif manufacturer=="GHI Company": - manufacturer=2 - elif manufacturer=="AGS Company": - manufacturer=1 - elif manufacturer=="ABC Company": - manufacturer =0 - else: - manufacturer=4 - component_age = st.sidebar.slider('Component Age (in hours)', 500,2000, 600 ) - total_operating_hours = st.sidebar.slider('Total Operating Hours)', 50,2000, 500 ) - usage_intensity = st.sidebar.slider('Usage Intensity hours/day', 0,9, 5 ) - last_maintance_type = st.sidebar.selectbox('Last Mantainence Type', ("Preventive","Corrective") ) - if last_maintance_type=="Preventive": - last_maintance_type=1 - else: - last_maintance_type=0 - previous_number_of_repairs = st.sidebar.number_input('Enter the Previous Number of Repairs Undergone 0 to 5 )',min_value=0,max_value=5,step=1) - operating_temperature = st.sidebar.slider('Operating Temperature', 10,25, 15 ) - humidity = st.sidebar.slider('Humidity', 20,105, 25 ) - Vibration_Level = st.sidebar.slider('Vibration Level', 2,7, 2 ) - Pressure = st.sidebar.slider('Pressure', 200,550, 250 ) - Power_Input_Voltage= st.sidebar.slider('Power Input Voltage (V)',100,133,115) - repair_type = st.sidebar.selectbox('Repair Type', ("Hardware","Software") ) - if repair_type=='Hardware': - repair_type=0 - else: - repair_type=1 - number_of_inspection = st.sidebar.selectbox('Number of Inspections',('1','2')) - if number_of_inspection=='1': - number_of_inspection=1 - else: - number_of_inspection=2 - number_of_inspection_6months = st.sidebar.selectbox('Number of Inspections in last 6 Months',('0','1')) - if number_of_inspection_6months=='0': - number_of_inspection_6months=0 - else: - number_of_inspection_6months=1 - prior_maintainence = st.sidebar.selectbox('Prior Maintainence',("Regular","Irregular")) - if prior_maintainence =='Regular': - prior_maintainence=1 - else: - prior_maintainence=0 - - user_report_data = { - 'Manufacturer':manufacturer, - 'Component_Age':component_age, - 'Total Operating Hours':total_operating_hours, - 'Usage Intensity (hours/day)':usage_intensity, - 'Last Maintenance Type': last_maintance_type, - 'Previous number of repairs':previous_number_of_repairs, - 'Operating Temperature':operating_temperature, - 'Humidity': humidity, - 'Vibration Level':Vibration_Level, - 'Pressure':Pressure, - 'Power Input Voltage (V)':Power_Input_Voltage, - 'repair type':repair_type , - 'total number of inspection':number_of_inspection, - 'No. of Inspections in Last 6 Months':number_of_inspection_6months, - 'Prior Maintenance':prior_maintainence - - } - report_data = pd.DataFrame(user_report_data, index=[0]) - return report_data - -#Customer Data -user_data = user_report() -st.header("Component Details") -st.write(user_data) - -def label_encoder(df): - le = LabelEncoder() - cat = df.select_dtypes(include='O').keys() - categ = list(cat) - df[categ] = df[categ].apply(le.fit_transform) - return df - -def preprocess_dataset(X): - x = X.values #returns a numpy array - min_max_scaler = preprocessing.MinMaxScaler() - x_scaled = min_max_scaler.fit_transform(x) - X_df = pd.DataFrame(x_scaled) - return X_df - -def prediction(df): - #X = df.loc[:,df.columns!= "Rogue LRU/SRU (Target)"] - #y = df["Rogue LRU/SRU (Target)"] - #X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) - #print(X_train.shape) - #print(X_test.shape) - X_test_encoded = label_encoder(df) - X_test_df = preprocess_dataset(X_test_encoded) - x_model = loaded_model = tf.keras.models.load_model('my_model') - y_pred = x_model.predict(X_test_df) - #predicition = [] - #for i in list(y_pred): - if y_pred ==0: - return 'Component is Good' - else: - return 'Component is not Good' - #X_test['Actual_time_to_repair'] = y_test - #X_test['Predicted_time_to_repair'] = predicition - # X_test.to_csv(r'/content/drive/MyDrive/Colab Notebooks/HAL/rogue_test_data.csv') - #print(X_test.head()) - -y_pred = prediction(user_data) - -if st.button("Predict"): - st.subheader(y_pred) \ No newline at end of file diff --git a/spaces/Tiju1996/resume-parser/main.py b/spaces/Tiju1996/resume-parser/main.py deleted file mode 100644 index 7cd6ab7cc98c09323482927829d169b4f77a83cb..0000000000000000000000000000000000000000 --- a/spaces/Tiju1996/resume-parser/main.py +++ /dev/null @@ -1,23 +0,0 @@ -from ResumeReader import ResumeReader -from ResumeParser import ResumeParser -from Models import Models -import json -import os - - -class Main: - def __init__(self): - models = Models() - ner, ner_dates, zero_shot_classifier, tagger = models.load_trained_models() - self.reader = ResumeReader() - self.parser = ResumeParser(ner, ner_dates, zero_shot_classifier, tagger) - - def parse_cv(self, file_path): - resume_lines = self.reader.read_file(file_path) - output = self.parser.parse(resume_lines) - return output - - def save_parse_as_json(self, dict, file_name): - print("Saving the parse...") - with open(file_name, 'w', encoding="utf-8") as f: - json.dump(dict, f, indent=4, default=str, ensure_ascii=False) \ No newline at end of file diff --git a/spaces/TushDeMort/yolo/models/yolo.py b/spaces/TushDeMort/yolo/models/yolo.py deleted file mode 100644 index 95a019c6aeec8c3f1d582907d5fe7ff3ed6b9369..0000000000000000000000000000000000000000 --- a/spaces/TushDeMort/yolo/models/yolo.py +++ /dev/null @@ -1,843 +0,0 @@ -import argparse -import logging -import sys -from copy import deepcopy - -sys.path.append('./') # to run '$ python *.py' files in subdirectories -logger = logging.getLogger(__name__) -import torch -from models.common import * -from models.experimental import * -from utils.autoanchor import check_anchor_order -from utils.general import make_divisible, check_file, set_logging -from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \ - select_device, copy_attr -from utils.loss import SigmoidBin - -try: - import thop # for FLOPS computation -except ImportError: - thop = None - - -class Detect(nn.Module): - stride = None # strides computed during build - export = False # onnx export - end2end = False - include_nms = False - concat = False - - def __init__(self, nc=80, anchors=(), ch=()): # detection layer - super(Detect, self).__init__() - self.nc = nc # number of classes - self.no = nc + 5 # number of outputs per anchor - self.nl = len(anchors) # number of detection layers - self.na = len(anchors[0]) // 2 # number of anchors - self.grid = [torch.zeros(1)] * self.nl # init grid - a = torch.tensor(anchors).float().view(self.nl, -1, 2) - self.register_buffer('anchors', a) # shape(nl,na,2) - self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2) - self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv - - def forward(self, x): - # x = x.copy() # for profiling - z = [] # inference output - self.training |= self.export - for i in range(self.nl): - x[i] = self.m[i](x[i]) # conv - bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) - x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() - - if not self.training: # inference - if self.grid[i].shape[2:4] != x[i].shape[2:4]: - self.grid[i] = self._make_grid(nx, ny).to(x[i].device) - y = x[i].sigmoid() - if not torch.onnx.is_in_onnx_export(): - y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy - y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh - else: - xy, wh, conf = y.split((2, 2, self.nc + 1), 4) # y.tensor_split((2, 4, 5), 4) # torch 1.8.0 - xy = xy * (2. * self.stride[i]) + (self.stride[i] * (self.grid[i] - 0.5)) # new xy - wh = wh ** 2 * (4 * self.anchor_grid[i].data) # new wh - y = torch.cat((xy, wh, conf), 4) - z.append(y.view(bs, -1, self.no)) - - if self.training: - out = x - elif self.end2end: - out = torch.cat(z, 1) - elif self.include_nms: - z = self.convert(z) - out = (z, ) - elif self.concat: - out = torch.cat(z, 1) - else: - out = (torch.cat(z, 1), x) - - return out - - @staticmethod - def _make_grid(nx=20, ny=20): - yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) - return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() - - def convert(self, z): - z = torch.cat(z, 1) - box = z[:, :, :4] - conf = z[:, :, 4:5] - score = z[:, :, 5:] - score *= conf - convert_matrix = torch.tensor([[1, 0, 1, 0], [0, 1, 0, 1], [-0.5, 0, 0.5, 0], [0, -0.5, 0, 0.5]], - dtype=torch.float32, - device=z.device) - box @= convert_matrix - return (box, score) - - -class IDetect(nn.Module): - stride = None # strides computed during build - export = False # onnx export - end2end = False - include_nms = False - concat = False - - def __init__(self, nc=80, anchors=(), ch=()): # detection layer - super(IDetect, self).__init__() - self.nc = nc # number of classes - self.no = nc + 5 # number of outputs per anchor - self.nl = len(anchors) # number of detection layers - self.na = len(anchors[0]) // 2 # number of anchors - self.grid = [torch.zeros(1)] * self.nl # init grid - a = torch.tensor(anchors).float().view(self.nl, -1, 2) - self.register_buffer('anchors', a) # shape(nl,na,2) - self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2) - self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv - - self.ia = nn.ModuleList(ImplicitA(x) for x in ch) - self.im = nn.ModuleList(ImplicitM(self.no * self.na) for _ in ch) - - def forward(self, x): - # x = x.copy() # for profiling - z = [] # inference output - self.training |= self.export - for i in range(self.nl): - x[i] = self.m[i](self.ia[i](x[i])) # conv - x[i] = self.im[i](x[i]) - bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) - x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() - - if not self.training: # inference - if self.grid[i].shape[2:4] != x[i].shape[2:4]: - self.grid[i] = self._make_grid(nx, ny).to(x[i].device) - - y = x[i].sigmoid() - y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy - y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh - z.append(y.view(bs, -1, self.no)) - - return x if self.training else (torch.cat(z, 1), x) - - def fuseforward(self, x): - # x = x.copy() # for profiling - z = [] # inference output - self.training |= self.export - for i in range(self.nl): - x[i] = self.m[i](x[i]) # conv - bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) - x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() - - if not self.training: # inference - if self.grid[i].shape[2:4] != x[i].shape[2:4]: - self.grid[i] = self._make_grid(nx, ny).to(x[i].device) - - y = x[i].sigmoid() - if not torch.onnx.is_in_onnx_export(): - y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy - y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh - else: - xy, wh, conf = y.split((2, 2, self.nc + 1), 4) # y.tensor_split((2, 4, 5), 4) # torch 1.8.0 - xy = xy * (2. * self.stride[i]) + (self.stride[i] * (self.grid[i] - 0.5)) # new xy - wh = wh ** 2 * (4 * self.anchor_grid[i].data) # new wh - y = torch.cat((xy, wh, conf), 4) - z.append(y.view(bs, -1, self.no)) - - if self.training: - out = x - elif self.end2end: - out = torch.cat(z, 1) - elif self.include_nms: - z = self.convert(z) - out = (z, ) - elif self.concat: - out = torch.cat(z, 1) - else: - out = (torch.cat(z, 1), x) - - return out - - def fuse(self): - print("IDetect.fuse") - # fuse ImplicitA and Convolution - for i in range(len(self.m)): - c1,c2,_,_ = self.m[i].weight.shape - c1_,c2_, _,_ = self.ia[i].implicit.shape - self.m[i].bias += torch.matmul(self.m[i].weight.reshape(c1,c2),self.ia[i].implicit.reshape(c2_,c1_)).squeeze(1) - - # fuse ImplicitM and Convolution - for i in range(len(self.m)): - c1,c2, _,_ = self.im[i].implicit.shape - self.m[i].bias *= self.im[i].implicit.reshape(c2) - self.m[i].weight *= self.im[i].implicit.transpose(0,1) - - @staticmethod - def _make_grid(nx=20, ny=20): - yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) - return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() - - def convert(self, z): - z = torch.cat(z, 1) - box = z[:, :, :4] - conf = z[:, :, 4:5] - score = z[:, :, 5:] - score *= conf - convert_matrix = torch.tensor([[1, 0, 1, 0], [0, 1, 0, 1], [-0.5, 0, 0.5, 0], [0, -0.5, 0, 0.5]], - dtype=torch.float32, - device=z.device) - box @= convert_matrix - return (box, score) - - -class IKeypoint(nn.Module): - stride = None # strides computed during build - export = False # onnx export - - def __init__(self, nc=80, anchors=(), nkpt=17, ch=(), inplace=True, dw_conv_kpt=False): # detection layer - super(IKeypoint, self).__init__() - self.nc = nc # number of classes - self.nkpt = nkpt - self.dw_conv_kpt = dw_conv_kpt - self.no_det=(nc + 5) # number of outputs per anchor for box and class - self.no_kpt = 3*self.nkpt ## number of outputs per anchor for keypoints - self.no = self.no_det+self.no_kpt - self.nl = len(anchors) # number of detection layers - self.na = len(anchors[0]) // 2 # number of anchors - self.grid = [torch.zeros(1)] * self.nl # init grid - self.flip_test = False - a = torch.tensor(anchors).float().view(self.nl, -1, 2) - self.register_buffer('anchors', a) # shape(nl,na,2) - self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2) - self.m = nn.ModuleList(nn.Conv2d(x, self.no_det * self.na, 1) for x in ch) # output conv - - self.ia = nn.ModuleList(ImplicitA(x) for x in ch) - self.im = nn.ModuleList(ImplicitM(self.no_det * self.na) for _ in ch) - - if self.nkpt is not None: - if self.dw_conv_kpt: #keypoint head is slightly more complex - self.m_kpt = nn.ModuleList( - nn.Sequential(DWConv(x, x, k=3), Conv(x,x), - DWConv(x, x, k=3), Conv(x, x), - DWConv(x, x, k=3), Conv(x,x), - DWConv(x, x, k=3), Conv(x, x), - DWConv(x, x, k=3), Conv(x, x), - DWConv(x, x, k=3), nn.Conv2d(x, self.no_kpt * self.na, 1)) for x in ch) - else: #keypoint head is a single convolution - self.m_kpt = nn.ModuleList(nn.Conv2d(x, self.no_kpt * self.na, 1) for x in ch) - - self.inplace = inplace # use in-place ops (e.g. slice assignment) - - def forward(self, x): - # x = x.copy() # for profiling - z = [] # inference output - self.training |= self.export - for i in range(self.nl): - if self.nkpt is None or self.nkpt==0: - x[i] = self.im[i](self.m[i](self.ia[i](x[i]))) # conv - else : - x[i] = torch.cat((self.im[i](self.m[i](self.ia[i](x[i]))), self.m_kpt[i](x[i])), axis=1) - - bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) - x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() - x_det = x[i][..., :6] - x_kpt = x[i][..., 6:] - - if not self.training: # inference - if self.grid[i].shape[2:4] != x[i].shape[2:4]: - self.grid[i] = self._make_grid(nx, ny).to(x[i].device) - kpt_grid_x = self.grid[i][..., 0:1] - kpt_grid_y = self.grid[i][..., 1:2] - - if self.nkpt == 0: - y = x[i].sigmoid() - else: - y = x_det.sigmoid() - - if self.inplace: - xy = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy - wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i].view(1, self.na, 1, 1, 2) # wh - if self.nkpt != 0: - x_kpt[..., 0::3] = (x_kpt[..., ::3] * 2. - 0.5 + kpt_grid_x.repeat(1,1,1,1,17)) * self.stride[i] # xy - x_kpt[..., 1::3] = (x_kpt[..., 1::3] * 2. - 0.5 + kpt_grid_y.repeat(1,1,1,1,17)) * self.stride[i] # xy - #x_kpt[..., 0::3] = (x_kpt[..., ::3] + kpt_grid_x.repeat(1,1,1,1,17)) * self.stride[i] # xy - #x_kpt[..., 1::3] = (x_kpt[..., 1::3] + kpt_grid_y.repeat(1,1,1,1,17)) * self.stride[i] # xy - #print('=============') - #print(self.anchor_grid[i].shape) - #print(self.anchor_grid[i][...,0].unsqueeze(4).shape) - #print(x_kpt[..., 0::3].shape) - #x_kpt[..., 0::3] = ((x_kpt[..., 0::3].tanh() * 2.) ** 3 * self.anchor_grid[i][...,0].unsqueeze(4).repeat(1,1,1,1,self.nkpt)) + kpt_grid_x.repeat(1,1,1,1,17) * self.stride[i] # xy - #x_kpt[..., 1::3] = ((x_kpt[..., 1::3].tanh() * 2.) ** 3 * self.anchor_grid[i][...,1].unsqueeze(4).repeat(1,1,1,1,self.nkpt)) + kpt_grid_y.repeat(1,1,1,1,17) * self.stride[i] # xy - #x_kpt[..., 0::3] = (((x_kpt[..., 0::3].sigmoid() * 4.) ** 2 - 8.) * self.anchor_grid[i][...,0].unsqueeze(4).repeat(1,1,1,1,self.nkpt)) + kpt_grid_x.repeat(1,1,1,1,17) * self.stride[i] # xy - #x_kpt[..., 1::3] = (((x_kpt[..., 1::3].sigmoid() * 4.) ** 2 - 8.) * self.anchor_grid[i][...,1].unsqueeze(4).repeat(1,1,1,1,self.nkpt)) + kpt_grid_y.repeat(1,1,1,1,17) * self.stride[i] # xy - x_kpt[..., 2::3] = x_kpt[..., 2::3].sigmoid() - - y = torch.cat((xy, wh, y[..., 4:], x_kpt), dim = -1) - - else: # for YOLOv5 on AWS Inferentia https://github.com/ultralytics/yolov5/pull/2953 - xy = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy - wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh - if self.nkpt != 0: - y[..., 6:] = (y[..., 6:] * 2. - 0.5 + self.grid[i].repeat((1,1,1,1,self.nkpt))) * self.stride[i] # xy - y = torch.cat((xy, wh, y[..., 4:]), -1) - - z.append(y.view(bs, -1, self.no)) - - return x if self.training else (torch.cat(z, 1), x) - - @staticmethod - def _make_grid(nx=20, ny=20): - yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) - return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() - - -class IAuxDetect(nn.Module): - stride = None # strides computed during build - export = False # onnx export - end2end = False - include_nms = False - concat = False - - def __init__(self, nc=80, anchors=(), ch=()): # detection layer - super(IAuxDetect, self).__init__() - self.nc = nc # number of classes - self.no = nc + 5 # number of outputs per anchor - self.nl = len(anchors) # number of detection layers - self.na = len(anchors[0]) // 2 # number of anchors - self.grid = [torch.zeros(1)] * self.nl # init grid - a = torch.tensor(anchors).float().view(self.nl, -1, 2) - self.register_buffer('anchors', a) # shape(nl,na,2) - self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2) - self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch[:self.nl]) # output conv - self.m2 = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch[self.nl:]) # output conv - - self.ia = nn.ModuleList(ImplicitA(x) for x in ch[:self.nl]) - self.im = nn.ModuleList(ImplicitM(self.no * self.na) for _ in ch[:self.nl]) - - def forward(self, x): - # x = x.copy() # for profiling - z = [] # inference output - self.training |= self.export - for i in range(self.nl): - x[i] = self.m[i](self.ia[i](x[i])) # conv - x[i] = self.im[i](x[i]) - bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) - x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() - - x[i+self.nl] = self.m2[i](x[i+self.nl]) - x[i+self.nl] = x[i+self.nl].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() - - if not self.training: # inference - if self.grid[i].shape[2:4] != x[i].shape[2:4]: - self.grid[i] = self._make_grid(nx, ny).to(x[i].device) - - y = x[i].sigmoid() - if not torch.onnx.is_in_onnx_export(): - y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy - y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh - else: - xy, wh, conf = y.split((2, 2, self.nc + 1), 4) # y.tensor_split((2, 4, 5), 4) # torch 1.8.0 - xy = xy * (2. * self.stride[i]) + (self.stride[i] * (self.grid[i] - 0.5)) # new xy - wh = wh ** 2 * (4 * self.anchor_grid[i].data) # new wh - y = torch.cat((xy, wh, conf), 4) - z.append(y.view(bs, -1, self.no)) - - return x if self.training else (torch.cat(z, 1), x[:self.nl]) - - def fuseforward(self, x): - # x = x.copy() # for profiling - z = [] # inference output - self.training |= self.export - for i in range(self.nl): - x[i] = self.m[i](x[i]) # conv - bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) - x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() - - if not self.training: # inference - if self.grid[i].shape[2:4] != x[i].shape[2:4]: - self.grid[i] = self._make_grid(nx, ny).to(x[i].device) - - y = x[i].sigmoid() - if not torch.onnx.is_in_onnx_export(): - y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy - y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh - else: - xy = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy - wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i].data # wh - y = torch.cat((xy, wh, y[..., 4:]), -1) - z.append(y.view(bs, -1, self.no)) - - if self.training: - out = x - elif self.end2end: - out = torch.cat(z, 1) - elif self.include_nms: - z = self.convert(z) - out = (z, ) - elif self.concat: - out = torch.cat(z, 1) - else: - out = (torch.cat(z, 1), x) - - return out - - def fuse(self): - print("IAuxDetect.fuse") - # fuse ImplicitA and Convolution - for i in range(len(self.m)): - c1,c2,_,_ = self.m[i].weight.shape - c1_,c2_, _,_ = self.ia[i].implicit.shape - self.m[i].bias += torch.matmul(self.m[i].weight.reshape(c1,c2),self.ia[i].implicit.reshape(c2_,c1_)).squeeze(1) - - # fuse ImplicitM and Convolution - for i in range(len(self.m)): - c1,c2, _,_ = self.im[i].implicit.shape - self.m[i].bias *= self.im[i].implicit.reshape(c2) - self.m[i].weight *= self.im[i].implicit.transpose(0,1) - - @staticmethod - def _make_grid(nx=20, ny=20): - yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) - return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() - - def convert(self, z): - z = torch.cat(z, 1) - box = z[:, :, :4] - conf = z[:, :, 4:5] - score = z[:, :, 5:] - score *= conf - convert_matrix = torch.tensor([[1, 0, 1, 0], [0, 1, 0, 1], [-0.5, 0, 0.5, 0], [0, -0.5, 0, 0.5]], - dtype=torch.float32, - device=z.device) - box @= convert_matrix - return (box, score) - - -class IBin(nn.Module): - stride = None # strides computed during build - export = False # onnx export - - def __init__(self, nc=80, anchors=(), ch=(), bin_count=21): # detection layer - super(IBin, self).__init__() - self.nc = nc # number of classes - self.bin_count = bin_count - - self.w_bin_sigmoid = SigmoidBin(bin_count=self.bin_count, min=0.0, max=4.0) - self.h_bin_sigmoid = SigmoidBin(bin_count=self.bin_count, min=0.0, max=4.0) - # classes, x,y,obj - self.no = nc + 3 + \ - self.w_bin_sigmoid.get_length() + self.h_bin_sigmoid.get_length() # w-bce, h-bce - # + self.x_bin_sigmoid.get_length() + self.y_bin_sigmoid.get_length() - - self.nl = len(anchors) # number of detection layers - self.na = len(anchors[0]) // 2 # number of anchors - self.grid = [torch.zeros(1)] * self.nl # init grid - a = torch.tensor(anchors).float().view(self.nl, -1, 2) - self.register_buffer('anchors', a) # shape(nl,na,2) - self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2) - self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv - - self.ia = nn.ModuleList(ImplicitA(x) for x in ch) - self.im = nn.ModuleList(ImplicitM(self.no * self.na) for _ in ch) - - def forward(self, x): - - #self.x_bin_sigmoid.use_fw_regression = True - #self.y_bin_sigmoid.use_fw_regression = True - self.w_bin_sigmoid.use_fw_regression = True - self.h_bin_sigmoid.use_fw_regression = True - - # x = x.copy() # for profiling - z = [] # inference output - self.training |= self.export - for i in range(self.nl): - x[i] = self.m[i](self.ia[i](x[i])) # conv - x[i] = self.im[i](x[i]) - bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) - x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() - - if not self.training: # inference - if self.grid[i].shape[2:4] != x[i].shape[2:4]: - self.grid[i] = self._make_grid(nx, ny).to(x[i].device) - - y = x[i].sigmoid() - y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy - #y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh - - - #px = (self.x_bin_sigmoid.forward(y[..., 0:12]) + self.grid[i][..., 0]) * self.stride[i] - #py = (self.y_bin_sigmoid.forward(y[..., 12:24]) + self.grid[i][..., 1]) * self.stride[i] - - pw = self.w_bin_sigmoid.forward(y[..., 2:24]) * self.anchor_grid[i][..., 0] - ph = self.h_bin_sigmoid.forward(y[..., 24:46]) * self.anchor_grid[i][..., 1] - - #y[..., 0] = px - #y[..., 1] = py - y[..., 2] = pw - y[..., 3] = ph - - y = torch.cat((y[..., 0:4], y[..., 46:]), dim=-1) - - z.append(y.view(bs, -1, y.shape[-1])) - - return x if self.training else (torch.cat(z, 1), x) - - @staticmethod - def _make_grid(nx=20, ny=20): - yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) - return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() - - -class Model(nn.Module): - def __init__(self, cfg='yolor-csp-c.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes - super(Model, self).__init__() - self.traced = False - if isinstance(cfg, dict): - self.yaml = cfg # model dict - else: # is *.yaml - import yaml # for torch hub - self.yaml_file = Path(cfg).name - with open(cfg) as f: - self.yaml = yaml.load(f, Loader=yaml.SafeLoader) # model dict - - # Define model - ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels - if nc and nc != self.yaml['nc']: - logger.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}") - self.yaml['nc'] = nc # override yaml value - if anchors: - logger.info(f'Overriding model.yaml anchors with anchors={anchors}') - self.yaml['anchors'] = round(anchors) # override yaml value - self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist - self.names = [str(i) for i in range(self.yaml['nc'])] # default names - # print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))]) - - # Build strides, anchors - m = self.model[-1] # Detect() - if isinstance(m, Detect): - s = 256 # 2x min stride - m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward - check_anchor_order(m) - m.anchors /= m.stride.view(-1, 1, 1) - self.stride = m.stride - self._initialize_biases() # only run once - # print('Strides: %s' % m.stride.tolist()) - if isinstance(m, IDetect): - s = 256 # 2x min stride - m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward - check_anchor_order(m) - m.anchors /= m.stride.view(-1, 1, 1) - self.stride = m.stride - self._initialize_biases() # only run once - # print('Strides: %s' % m.stride.tolist()) - if isinstance(m, IAuxDetect): - s = 256 # 2x min stride - m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))[:4]]) # forward - #print(m.stride) - check_anchor_order(m) - m.anchors /= m.stride.view(-1, 1, 1) - self.stride = m.stride - self._initialize_aux_biases() # only run once - # print('Strides: %s' % m.stride.tolist()) - if isinstance(m, IBin): - s = 256 # 2x min stride - m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward - check_anchor_order(m) - m.anchors /= m.stride.view(-1, 1, 1) - self.stride = m.stride - self._initialize_biases_bin() # only run once - # print('Strides: %s' % m.stride.tolist()) - if isinstance(m, IKeypoint): - s = 256 # 2x min stride - m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward - check_anchor_order(m) - m.anchors /= m.stride.view(-1, 1, 1) - self.stride = m.stride - self._initialize_biases_kpt() # only run once - # print('Strides: %s' % m.stride.tolist()) - - # Init weights, biases - initialize_weights(self) - self.info() - logger.info('') - - def forward(self, x, augment=False, profile=False): - if augment: - img_size = x.shape[-2:] # height, width - s = [1, 0.83, 0.67] # scales - f = [None, 3, None] # flips (2-ud, 3-lr) - y = [] # outputs - for si, fi in zip(s, f): - xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max())) - yi = self.forward_once(xi)[0] # forward - # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save - yi[..., :4] /= si # de-scale - if fi == 2: - yi[..., 1] = img_size[0] - yi[..., 1] # de-flip ud - elif fi == 3: - yi[..., 0] = img_size[1] - yi[..., 0] # de-flip lr - y.append(yi) - return torch.cat(y, 1), None # augmented inference, train - else: - return self.forward_once(x, profile) # single-scale inference, train - - def forward_once(self, x, profile=False): - y, dt = [], [] # outputs - for m in self.model: - if m.f != -1: # if not from previous layer - x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers - - if not hasattr(self, 'traced'): - self.traced=False - - if self.traced: - if isinstance(m, Detect) or isinstance(m, IDetect) or isinstance(m, IAuxDetect) or isinstance(m, IKeypoint): - break - - if profile: - c = isinstance(m, (Detect, IDetect, IAuxDetect, IBin)) - o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPS - for _ in range(10): - m(x.copy() if c else x) - t = time_synchronized() - for _ in range(10): - m(x.copy() if c else x) - dt.append((time_synchronized() - t) * 100) - print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type)) - - x = m(x) # run - - y.append(x if m.i in self.save else None) # save output - - if profile: - print('%.1fms total' % sum(dt)) - return x - - def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency - # https://arxiv.org/abs/1708.02002 section 3.3 - # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. - m = self.model[-1] # Detect() module - for mi, s in zip(m.m, m.stride): # from - b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) - b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) - b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls - mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) - - def _initialize_aux_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency - # https://arxiv.org/abs/1708.02002 section 3.3 - # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. - m = self.model[-1] # Detect() module - for mi, mi2, s in zip(m.m, m.m2, m.stride): # from - b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) - b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) - b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls - mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) - b2 = mi2.bias.view(m.na, -1) # conv.bias(255) to (3,85) - b2.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) - b2.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls - mi2.bias = torch.nn.Parameter(b2.view(-1), requires_grad=True) - - def _initialize_biases_bin(self, cf=None): # initialize biases into Detect(), cf is class frequency - # https://arxiv.org/abs/1708.02002 section 3.3 - # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. - m = self.model[-1] # Bin() module - bc = m.bin_count - for mi, s in zip(m.m, m.stride): # from - b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) - old = b[:, (0,1,2,bc+3)].data - obj_idx = 2*bc+4 - b[:, :obj_idx].data += math.log(0.6 / (bc + 1 - 0.99)) - b[:, obj_idx].data += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) - b[:, (obj_idx+1):].data += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls - b[:, (0,1,2,bc+3)].data = old - mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) - - def _initialize_biases_kpt(self, cf=None): # initialize biases into Detect(), cf is class frequency - # https://arxiv.org/abs/1708.02002 section 3.3 - # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. - m = self.model[-1] # Detect() module - for mi, s in zip(m.m, m.stride): # from - b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) - b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) - b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls - mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) - - def _print_biases(self): - m = self.model[-1] # Detect() module - for mi in m.m: # from - b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85) - print(('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean())) - - # def _print_weights(self): - # for m in self.model.modules(): - # if type(m) is Bottleneck: - # print('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights - - def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers - print('Fusing layers... ') - for m in self.model.modules(): - if isinstance(m, RepConv): - #print(f" fuse_repvgg_block") - m.fuse_repvgg_block() - elif isinstance(m, RepConv_OREPA): - #print(f" switch_to_deploy") - m.switch_to_deploy() - elif type(m) is Conv and hasattr(m, 'bn'): - m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv - delattr(m, 'bn') # remove batchnorm - m.forward = m.fuseforward # update forward - elif isinstance(m, (IDetect, IAuxDetect)): - m.fuse() - m.forward = m.fuseforward - self.info() - return self - - def nms(self, mode=True): # add or remove NMS module - present = type(self.model[-1]) is NMS # last layer is NMS - if mode and not present: - print('Adding NMS... ') - m = NMS() # module - m.f = -1 # from - m.i = self.model[-1].i + 1 # index - self.model.add_module(name='%s' % m.i, module=m) # add - self.eval() - elif not mode and present: - print('Removing NMS... ') - self.model = self.model[:-1] # remove - return self - - def autoshape(self): # add autoShape module - print('Adding autoShape... ') - m = autoShape(self) # wrap model - copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes - return m - - def info(self, verbose=False, img_size=640): # print model information - model_info(self, verbose, img_size) - - -def parse_model(d, ch): # model_dict, input_channels(3) - logger.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments')) - anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'] - na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors - no = na * (nc + 5) # number of outputs = anchors * (classes + 5) - - layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out - for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args - m = eval(m) if isinstance(m, str) else m # eval strings - for j, a in enumerate(args): - try: - args[j] = eval(a) if isinstance(a, str) else a # eval strings - except: - pass - - n = max(round(n * gd), 1) if n > 1 else n # depth gain - if m in [nn.Conv2d, Conv, RobustConv, RobustConv2, DWConv, GhostConv, RepConv, RepConv_OREPA, DownC, - SPP, SPPF, SPPCSPC, GhostSPPCSPC, MixConv2d, Focus, Stem, GhostStem, CrossConv, - Bottleneck, BottleneckCSPA, BottleneckCSPB, BottleneckCSPC, - RepBottleneck, RepBottleneckCSPA, RepBottleneckCSPB, RepBottleneckCSPC, - Res, ResCSPA, ResCSPB, ResCSPC, - RepRes, RepResCSPA, RepResCSPB, RepResCSPC, - ResX, ResXCSPA, ResXCSPB, ResXCSPC, - RepResX, RepResXCSPA, RepResXCSPB, RepResXCSPC, - Ghost, GhostCSPA, GhostCSPB, GhostCSPC, - SwinTransformerBlock, STCSPA, STCSPB, STCSPC, - SwinTransformer2Block, ST2CSPA, ST2CSPB, ST2CSPC]: - c1, c2 = ch[f], args[0] - if c2 != no: # if not output - c2 = make_divisible(c2 * gw, 8) - - args = [c1, c2, *args[1:]] - if m in [DownC, SPPCSPC, GhostSPPCSPC, - BottleneckCSPA, BottleneckCSPB, BottleneckCSPC, - RepBottleneckCSPA, RepBottleneckCSPB, RepBottleneckCSPC, - ResCSPA, ResCSPB, ResCSPC, - RepResCSPA, RepResCSPB, RepResCSPC, - ResXCSPA, ResXCSPB, ResXCSPC, - RepResXCSPA, RepResXCSPB, RepResXCSPC, - GhostCSPA, GhostCSPB, GhostCSPC, - STCSPA, STCSPB, STCSPC, - ST2CSPA, ST2CSPB, ST2CSPC]: - args.insert(2, n) # number of repeats - n = 1 - elif m is nn.BatchNorm2d: - args = [ch[f]] - elif m is Concat: - c2 = sum([ch[x] for x in f]) - elif m is Chuncat: - c2 = sum([ch[x] for x in f]) - elif m is Shortcut: - c2 = ch[f[0]] - elif m is Foldcut: - c2 = ch[f] // 2 - elif m in [Detect, IDetect, IAuxDetect, IBin, IKeypoint]: - args.append([ch[x] for x in f]) - if isinstance(args[1], int): # number of anchors - args[1] = [list(range(args[1] * 2))] * len(f) - elif m is ReOrg: - c2 = ch[f] * 4 - elif m is Contract: - c2 = ch[f] * args[0] ** 2 - elif m is Expand: - c2 = ch[f] // args[0] ** 2 - else: - c2 = ch[f] - - m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module - t = str(m)[8:-2].replace('__main__.', '') # module type - np = sum([x.numel() for x in m_.parameters()]) # number params - m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params - logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print - save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist - layers.append(m_) - if i == 0: - ch = [] - ch.append(c2) - return nn.Sequential(*layers), sorted(save) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--cfg', type=str, default='yolor-csp-c.yaml', help='model.yaml') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--profile', action='store_true', help='profile model speed') - opt = parser.parse_args() - opt.cfg = check_file(opt.cfg) # check file - set_logging() - device = select_device(opt.device) - - # Create model - model = Model(opt.cfg).to(device) - model.train() - - if opt.profile: - img = torch.rand(1, 3, 640, 640).to(device) - y = model(img, profile=True) - - # Profile - # img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device) - # y = model(img, profile=True) - - # Tensorboard - # from torch.utils.tensorboard import SummaryWriter - # tb_writer = SummaryWriter() - # print("Run 'tensorboard --logdir=models/runs' to view tensorboard at http://localhost:6006/") - # tb_writer.add_graph(model.model, img) # add model to tensorboard - # tb_writer.add_image('test', img[0], dataformats='CWH') # add model to tensorboard diff --git a/spaces/Um124/Global_Warming_Analysis/pages/Automobile Data Analysis.py b/spaces/Um124/Global_Warming_Analysis/pages/Automobile Data Analysis.py deleted file mode 100644 index 53a4cc11301f35cf322ec12ee0e8e5d0b926ef47..0000000000000000000000000000000000000000 --- a/spaces/Um124/Global_Warming_Analysis/pages/Automobile Data Analysis.py +++ /dev/null @@ -1,69 +0,0 @@ -import pandas as pd -import numpy as np -import plotly.express as px -import streamlit as st - -st.set_page_config( - page_icon='📈', - page_title="Automobile Data Analysis", - layout='wide' -) - -years = ['2002','2003','2004','2005','2006','2007'] -@st.cache_data -def load_data(): - df=pd.read_csv('data/cars_trucks_and_buses_per_1000_persons.csv') - df.rename(columns={'geo':'Country'},inplace=True) - df.set_index('Country',inplace=True) - df['Total'] = df[years].sum(axis=1) - df['Avgrage']=df.mean(axis=1) - df['Maximum']=df.max(axis=1) - df.sort_index(inplace=True) - return df - -st.title('Cars Trucks and Buses Per 1000 Persons') -df = load_data() -st.dataframe(df,use_container_width=True) - -countries= df.index.unique().tolist() -Graphs = ['bar','pie','line','area','histogram'] -c1,c2 = st.columns(2) -country = c1.selectbox("Select a Country", countries) -Graph = c2.selectbox("Select a Graph type", Graphs) - -st.header("Country wise visualization") -cdf = df.loc[country,years].reset_index() -cdf.rename({'index':'Years'},axis=1, inplace=True) -if Graph == Graphs[0]: - fig = px.bar(cdf, 'Years',country, title=f'{country} Cars trucks and buses per 1000 persons') -if Graph == Graphs[1]: - fig = px.pie(cdf, 'Years',country, title=f'{country} Cars trucks and buses per 1000 persons') -if Graph == Graphs[2]: - fig = px.line(cdf, 'Years',country, title=f'{country} Cars trucks and buses per 1000 persons') -if Graph == Graphs[3]: - fig = px.area(cdf, 'Years',country, title=f'{country} Cars trucks and buses per 1000 persons') -if Graph == Graphs[4]: - fig = px.histogram(cdf, 'Years',country, title=f'{country} Cars trucks and buses per 1000 persons') -st.plotly_chart(fig, use_container_width=True) - -st.header("Comparison of Countries") -clist = st.multiselect("Select countries to compare", countries, default='India') -cdf = df.loc[clist, years].T # T to rotate the data in 90deg -st.write(cdf) -figc = px.line(cdf,cdf.index, clist, title=f'Comparing {", ".join(clist)}') - -st.plotly_chart(figc, use_container_width=True) - -df.sort_values(by='Total', ascending=False, inplace=True) -fig1=px.bar(df, x=df.index, y='Total',title='Total number of cars, trucks and buese per 1000 person') -st.plotly_chart(fig1, use_container_width=True) - -dfavg = df.sort_values(by='Avgrage').reset_index() -dfavg.rename({'index':'Country'},axis=1,inplace=True) -fig2=px.bar(dfavg, 'Country', 'Avgrage', title="Avgrage Use of vehicle per 1000 person") -st.plotly_chart(fig2, use_container_width=True) - -dfmax=df.sort_values(by='Maximum').reset_index() -dfmax.rename({'index':'Country'},axis=1,inplace=True) -fig3=px.bar(dfmax,'Country','Maximum',title='Maximum cars, bus and truck per 1000 person') -st.plotly_chart(fig3, use_container_width=True) \ No newline at end of file diff --git a/spaces/Vageesh1/Voice_Cloner/app.py b/spaces/Vageesh1/Voice_Cloner/app.py deleted file mode 100644 index 0dc82e63e5e8b79fdf396c6a47cd55256d6ad205..0000000000000000000000000000000000000000 --- a/spaces/Vageesh1/Voice_Cloner/app.py +++ /dev/null @@ -1,70 +0,0 @@ -from transformers import AutoTokenizer, AutoModelForSeq2SeqLM -from transformers import MarianMTModel, MarianTokenizer -from elevenlabs import set_api_key -from elevenlabs import clone, generate, play -from transformers import AutoModelForSeq2SeqLM, AutoTokenizer -from pydub import AudioSegment -from huggingface_hub.hf_api import HfFolder -HfFolder.save_token('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') -access_token = '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' - -import streamlit as st - - -#language translation function -def translate_english_to_hindi(input_text): - # Load the pre-trained English to Hindi translation model and tokenizer - model_name = "Helsinki-NLP/opus-mt-en-hi" - tokenizer = MarianTokenizer.from_pretrained(model_name) - model = MarianMTModel.from_pretrained(model_name) - - # Tokenize the input text and generate translation - inputs = tokenizer(input_text, return_tensors="pt", padding=True) - translated_ids = model.generate(inputs.input_ids) - - # Decode the translated output - translated_text = tokenizer.decode(translated_ids[0], skip_special_tokens=True) - - return translated_text - - -def save_uploaded_file_as_mp3(uploaded_file, output_file_path): - audio = AudioSegment.from_file(uploaded_file) - audio.export(output_file_path, format="mp3") - -def ui(): - st.title('Multi Lingual Voice Cloner') - st.markdown("Made by vageesh") - - #audio input box - audio_file = st.file_uploader("Upload an audio file that needs to be cloned", type=[ "wav,Mp4","Mp3"]) - if audio_file is not None: - output_file_path = "./output_audio.mp3" - save_uploaded_file_as_mp3(audio_file, output_file_path) - st.success(f"Audio file saved as {output_file_path}") - user_api_key = st.sidebar.text_input( - label="#### Your Eleven Labs API key here 👇", - placeholder="Paste your Eleven Labs API key API key, sk-", - type="password") - - if user_api_key is not None and user_api_key.strip() != "": - set_api_key(user_api_key) - #making an voice - voice = clone( - name="Alex", - description="An middle aged American male voice with a slight hoarseness in his throat. Perfect for npodcast", # Optional - files=["./output_audio.mp3"], - model='eleven_multilingual_v1', - language='Hindi' - ) - - in_text=st.text_input("Paste the text you want to hear from english to hindi") - if in_text is not None and in_text.strip() != "": - hin_text=translate_english_to_hindi(in_text) - audio = generate(text=hin_text, voice=voice) - - st.audio(audio) - -if __name__=="__main__": - ui() - diff --git a/spaces/YUANAI/DiffspeechResearch/modules/commons/rel_transformer.py b/spaces/YUANAI/DiffspeechResearch/modules/commons/rel_transformer.py deleted file mode 100644 index 7e5b68b682be7ef0d1049015f0cd03d4e74f77d2..0000000000000000000000000000000000000000 --- a/spaces/YUANAI/DiffspeechResearch/modules/commons/rel_transformer.py +++ /dev/null @@ -1,439 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import functional as F - -from modules.commons.layers import Embedding - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -class Encoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., - window_size=None, block_length=None, pre_ln=False, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - self.block_length = block_length - self.pre_ln = pre_ln - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append( - MultiHeadAttention(hidden_channels, hidden_channels, n_heads, window_size=window_size, - p_dropout=p_dropout, block_length=block_length)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - if pre_ln: - self.last_ln = LayerNorm(hidden_channels) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - for i in range(self.n_layers): - x = x * x_mask - x_ = x - if self.pre_ln: - x = self.norm_layers_1[i](x) - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = x_ + y - if not self.pre_ln: - x = self.norm_layers_1[i](x) - - x_ = x - if self.pre_ln: - x = self.norm_layers_2[i](x) - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = x_ + y - if not self.pre_ln: - x = self.norm_layers_2[i](x) - if self.pre_ln: - x = self.last_ln(x) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__(self, channels, out_channels, n_heads, window_size=None, heads_share=True, p_dropout=0., - block_length=None, proximal_bias=False, proximal_init=False): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.p_dropout = p_dropout - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels ** -0.5 - self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - if proximal_init: - self.conv_k.weight.data.copy_(self.conv_q.weight.data) - self.conv_k.bias.data.copy_(self.conv_q.bias.data) - nn.init.xavier_uniform_(self.conv_v.weight) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self.k_channels) - if self.window_size is not None: - assert t_s == t_t, "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys(query, key_relative_embeddings) - rel_logits = self._relative_position_to_absolute_position(rel_logits) - scores_local = rel_logits / math.sqrt(self.k_channels) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) - scores = scores * block_mask + -1e4 * (1 - block_mask) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) - output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) - output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[:, slice_start_position:slice_end_position] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[:, :, :length, length - 1:] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])) - x_flat = x.view([batch, heads, length ** 2 + length * (length - 1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(x * x_mask) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(x * x_mask) - return x * x_mask - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-4): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - n_dims = len(x.shape) - mean = torch.mean(x, 1, keepdim=True) - variance = torch.mean((x - mean) ** 2, 1, keepdim=True) - - x = (x - mean) * torch.rsqrt(variance + self.eps) - - shape = [1, -1] + [1] * (n_dims - 2) - x = x * self.gamma.view(*shape) + self.beta.view(*shape) - return x - - -class ConvReluNorm(nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size // 2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential( - nn.ReLU(), - nn.Dropout(p_dropout)) - for _ in range(n_layers - 1): - self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class RelTransformerEncoder(nn.Module): - def __init__(self, - n_vocab, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - window_size=4, - block_length=None, - prenet=True, - pre_ln=True, - ): - - super().__init__() - - self.n_vocab = n_vocab - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - self.block_length = block_length - self.prenet = prenet - self.emb = Embedding(n_vocab, hidden_channels, padding_idx=0) - - if prenet: - self.pre = ConvReluNorm(hidden_channels, hidden_channels, hidden_channels, - kernel_size=5, n_layers=3, p_dropout=0) - self.encoder = Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - window_size=window_size, - block_length=block_length, - pre_ln=pre_ln, - ) - - def forward(self, x, x_mask=None): - if self.n_vocab > 0: - x_lengths = (x > 0).long().sum(-1) - x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] - else: - x_lengths = (x.abs().sum(-1) > 0).long().sum(-1) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - - if self.prenet: - x = self.pre(x, x_mask) - x = self.encoder(x, x_mask) - return x.transpose(1, 2) - - -class RelTransformerEncoder(nn.Module): - def __init__(self, - n_vocab, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout=0.0, - window_size=4, - block_length=None, - prenet=True, - pre_ln=True, - ): - - super().__init__() - - self.n_vocab = n_vocab - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - self.block_length = block_length - self.prenet = prenet - if n_vocab > 0: - self.emb = Embedding(n_vocab, hidden_channels, padding_idx=0) - - if prenet: - self.pre = ConvReluNorm(hidden_channels, hidden_channels, hidden_channels, - kernel_size=5, n_layers=3, p_dropout=0) - self.encoder = Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - window_size=window_size, - block_length=block_length, - pre_ln=pre_ln, - ) - - def forward(self, x, x_mask=None): - if self.n_vocab > 0: - x_lengths = (x > 0).long().sum(-1) - x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] - else: - x_lengths = (x.abs().sum(-1) > 0).long().sum(-1) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - - if self.prenet: - x = self.pre(x, x_mask) - x = self.encoder(x, x_mask) - return x.transpose(1, 2) diff --git a/spaces/Yabo/ControlVideo/app.py b/spaces/Yabo/ControlVideo/app.py deleted file mode 100644 index a62b1a1abdeca99203c9c06a82df4971c38a09bd..0000000000000000000000000000000000000000 --- a/spaces/Yabo/ControlVideo/app.py +++ /dev/null @@ -1,351 +0,0 @@ -import gradio as gr -import os -import shutil -import subprocess -import cv2 -import numpy as np -import math - -from huggingface_hub import snapshot_download - -os.environ['CUDA_LAUNCH_BLOCKING'] = '1' - - -model_ids = [ - 'runwayml/stable-diffusion-v1-5', - 'lllyasviel/sd-controlnet-depth', - 'lllyasviel/sd-controlnet-canny', - 'lllyasviel/sd-controlnet-openpose', - # "lllyasviel/control_v11p_sd15_softedge", - # "lllyasviel/control_v11p_sd15_scribble", - # "lllyasviel/control_v11p_sd15_lineart_anime", - # "lllyasviel/control_v11p_sd15_lineart", - # "lllyasviel/control_v11f1p_sd15_depth", - # "lllyasviel/control_v11p_sd15_canny", - # "lllyasviel/control_v11p_sd15_openpose", - # "lllyasviel/control_v11p_sd15_normalbae" -] - - -for model_id in model_ids: - model_name = model_id.split('/')[-1] - snapshot_download(model_id, cache_dir=f'checkpoints/{model_name}') - -def load_model(model_id): - local_dir = f'checkpoints/stable-diffusion-v1-5' - # Check if the directory exists - if os.path.exists(local_dir): - # Delete the directory if it exists - shutil.rmtree(local_dir) - - model_name = model_id.split('/')[-1] - snapshot_download(model_id, local_dir=f'checkpoints/{model_name}') - os.rename(f'checkpoints/{model_name}', f'checkpoints/stable-diffusion-v1-5') - return "model loaded" - -def get_frame_count(filepath): - if filepath is not None: - video = cv2.VideoCapture(filepath) - frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) - - video.release() - - # LIMITS - if frame_count > 100 : - frame_count = 100 # limit to 100 frames to avoid cuDNN errors - - return gr.update(maximum=frame_count) - - else: - return gr.update(value=1, maximum=100 ) - -def get_video_dimension(filepath): - video = cv2.VideoCapture(filepath) - width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH)) - height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT)) - fps = int(video.get(cv2.CAP_PROP_FPS)) - frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) - video.release() - return width, height, fps, frame_count - -def resize_video(input_vid, output_vid, width, height, fps): - print(f"RESIZING ...") - # Open the input video file - video = cv2.VideoCapture(input_vid) - - # Create a VideoWriter object to write the resized video - fourcc = cv2.VideoWriter_fourcc(*'mp4v') # Codec for the output video - output_video = cv2.VideoWriter(output_vid, fourcc, fps, (width, height)) - - while True: - # Read a frame from the input video - ret, frame = video.read() - if not ret: - break - - # Resize the frame to the desired dimensions - resized_frame = cv2.resize(frame, (width, height)) - - # Write the resized frame to the output video file - output_video.write(resized_frame) - - # Release the video objects - video.release() - output_video.release() - print(f"RESIZE VIDEO DONE!") - return output_vid - -def make_nearest_multiple_of_32(number): - remainder = number % 32 - if remainder <= 16: - number -= remainder - else: - number += 32 - remainder - return number - -def change_video_fps(input_path): - print(f"CHANGING FIANL OUTPUT FPS") - cap = cv2.VideoCapture(input_path) - # Check if the final file already exists - if os.path.exists('output_video.mp4'): - # Delete the existing file - os.remove('output_video.mp4') - output_path = 'output_video.mp4' - fourcc = cv2.VideoWriter_fourcc(*'mp4v') - output_fps = 12 - output_size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))) - out = cv2.VideoWriter(output_path, fourcc, output_fps, output_size) - - frame_count = 0 - while cap.isOpened(): - ret, frame = cap.read() - if not ret: - break - - # Write the current frame to the output video multiple times to increase the frame rate - for _ in range(output_fps // 8): - out.write(frame) - - frame_count += 1 - print(f'Processed frame {frame_count}') - - cap.release() - out.release() - cv2.destroyAllWindows() - - return 'output_video.mp4' - -def run_inference(prompt, video_path, version_condition, video_length, seed): - - seed = math.floor(seed) - o_width = get_video_dimension(video_path)[0] - o_height = get_video_dimension(video_path)[1] - version, condition = version_condition.split("+") - - # Prepare dimensions - if o_width > 512 : - # Calculate the new height while maintaining the aspect ratio - n_height = int(o_height / o_width * 512) - n_width = 512 - else: - n_height = o_height - n_width = o_width - - # Make sure new dimensions are multipe of 32 - r_width = make_nearest_multiple_of_32(n_width) - r_height = make_nearest_multiple_of_32(n_height) - print(f"multiple of 32 sizes : {r_width}x{r_height}") - - # Get FPS of original video input - original_fps = get_video_dimension(video_path)[2] - if original_fps > 12 : - print(f"FPS is too high: {original_fps}") - target_fps = 12 - else : - target_fps = original_fps - print(f"NEW INPUT FPS: {target_fps}, NEW LENGTH: {video_length}") - - # Check if the resized file already exists - if os.path.exists('resized.mp4'): - # Delete the existing file - os.remove('resized.mp4') - - resized = resize_video(video_path, 'resized.mp4', r_width, r_height, target_fps) - resized_video_fcount = get_video_dimension(resized)[3] - print(f"RESIZED VIDEO FRAME COUNT: {resized_video_fcount}") - - # Make sure new total frame count is enough to handle chosen video length - if video_length > resized_video_fcount : - video_length = resized_video_fcount - # video_length = int((target_fps * video_length) / original_fps) - - output_path = 'output/' - os.makedirs(output_path, exist_ok=True) - - # Check if the file already exists - if os.path.exists(os.path.join(output_path, f"result.mp4")): - # Delete the existing file - os.remove(os.path.join(output_path, f"result.mp4")) - - print(f"RUNNING INFERENCE ...") - if video_length > 16: - command = f"python inference.py --prompt '{prompt}' --condition '{condition}' --video_path '{resized}' --output_path '{output_path}' --temp_video_name 'result' --width {r_width} --height {r_height} --seed {seed} --video_length {video_length} --smoother_steps 19 20 --version {version} --is_long_video" - else: - command = f"python inference.py --prompt '{prompt}' --condition '{condition}' --video_path '{resized}' --output_path '{output_path}' --temp_video_name 'result' --width {r_width} --height {r_height} --seed {seed} --video_length {video_length} --smoother_steps 19 20 --version {version} " - - try: - subprocess.run(command, shell=True) - except cuda.Error as e: - return f"CUDA Error: {e}", None - except RuntimeError as e: - return f"Runtime Error: {e}", None - - # Construct the video path - video_path_output = os.path.join(output_path, f"result.mp4") - - # Resize to original video input size - #o_width = get_video_dimension(video_path)[0] - #o_height = get_video_dimension(video_path)[1] - #resize_video(video_path_output, 'resized_final.mp4', o_width, o_height, target_fps) - - # Check generated video FPS - gen_fps = get_video_dimension(video_path_output)[2] - print(f"GEN VIDEO FPS: {gen_fps}") - final = change_video_fps(video_path_output) - print(f"FINISHED !") - - return final - # return final, gr.Group.update(visible=True) - - -css=""" -#col-container {max-width: 810px; margin-left: auto; margin-right: auto;} -.animate-spin { - animation: spin 1s linear infinite; -} -@keyframes spin { - from { - transform: rotate(0deg); - } - to { - transform: rotate(360deg); - } -} -#share-btn-container { - display: flex; - padding-left: 0.5rem !important; - padding-right: 0.5rem !important; - background-color: #000000; - justify-content: center; - align-items: center; - border-radius: 9999px !important; - max-width: 13rem; -} -#share-btn-container:hover { - background-color: #060606; -} -#share-btn { - all: initial; - color: #ffffff; - font-weight: 600; - cursor:pointer; - font-family: 'IBM Plex Sans', sans-serif; - margin-left: 0.5rem !important; - padding-top: 0.5rem !important; - padding-bottom: 0.5rem !important; - right:0; -} -#share-btn * { - all: unset; -} -#share-btn-container div:nth-child(-n+2){ - width: auto !important; - min-height: 0px !important; -} -#share-btn-container .wrap { - display: none !important; -} -#share-btn-container.hidden { - display: none!important; -} -img[src*='#center'] { - display: block; - margin: auto; -} -""" -with gr.Blocks(css=css) as demo: - with gr.Column(elem_id="col-container"): - gr.Markdown(""" -

    ControlVideo: Training-free Controllable Text-to-Video Generation

    -

    - [arXiv] - [GitHub] -

    -

    ControlVideo adapts ControlNet to the video counterpart without any finetuning, aiming to directly inherit its high-quality and consistent generation.

    - """) - - with gr.Column(): - with gr.Row(): - video_path = gr.Video(label="Input video", source="upload", type="filepath", visible=True, elem_id="video-in") - video_res = gr.Video(label="result", elem_id="video-out") - - # with gr.Column(): - # video_res = gr.Video(label="result", elem_id="video-out") - # with gr.Group(elem_id="share-btn-container", visible=False) as share_group: - # community_icon = gr.HTML(community_icon_html) - # loading_icon = gr.HTML(loading_icon_html) - # share_button = gr.Button("Share to community", elem_id="share-btn") - with gr.Row(): - chosen_model = gr.Dropdown(label="Diffusion model (*1.5)", choices=['runwayml/stable-diffusion-v1-5','nitrosocke/Ghibli-Diffusion'], value="runwayml/stable-diffusion-v1-5", allow_custom_value=True) - model_status = gr.Textbox(label="status") - load_model_btn = gr.Button("load model (optional)") - prompt = gr.Textbox(label="prompt", info="If you loaded a custom model, do not forget to include Prompt trigger", elem_id="prompt-in") - with gr.Column(): - video_length = gr.Slider(label="Video length", info="How many frames do you want to process ? For demo purpose, max is set to 24", minimum=1, maximum=12, step=1, value=2) - with gr.Row(): - # version = gr.Dropdown(label="ControlNet version", choices=["v10", "v11"], value="v10") - version_condition = gr.Dropdown(label="ControlNet version + Condition", - choices=["v10+depth_midas", "v10+canny", "v10+openpose"], - value="v10+depth_midas") - - # "v11+softedge_pidinet", "v11+softedge_pidsafe", - # "v11+softedge_hed", "v11+softedge_hedsafe", "v11+scribble_hed", "v11+scribble_pidinet", "v11+lineart_anime", - # "v11+lineart_coarse", "v11+lineart_realistic", "v11+depth_midas", "v11+depth_leres", "v11+depth_leres++", - # "v11+depth_zoe", "v11+canny", "v11+openpose", "v11+openpose_face", "v11+openpose_faceonly", "v11+openpose_full", - # "v11+openpose_hand", "v11+normal_bae"], - seed = gr.Number(label="seed", value=42) - submit_btn = gr.Button("Submit") - - - gr.Examples( - examples=[["James bond moonwalks on the beach.", "./moonwalk.mp4", 'v10+openpose', 15, 42], - ["A striking mallard floats effortlessly on the sparkling pond.", "./mallard-water.mp4", "v10+depth_midas", 15, 42]], - fn=run_inference, - inputs=[prompt, - video_path, - version_condition, - video_length, - seed, - ], - # outputs=[video_res, share_group], - outputs=video_res, - cache_examples=False - ) - - # share_button.click(None, [], [], _js=share_js) - load_model_btn.click(fn=load_model, inputs=[chosen_model], outputs=[model_status], queue=False) - video_path.change(fn=get_frame_count, - inputs=[video_path], - outputs=[video_length], - queue=False - ) - submit_btn.click(fn=run_inference, - inputs=[prompt, - video_path, - version_condition, - video_length, - seed, - ], - outputs=video_res) - -demo.queue(max_size=12).launch() \ No newline at end of file diff --git a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/experimental/README.md b/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/experimental/README.md deleted file mode 100644 index 81a9de81c73728ea41eb6e8617a5429c3c9645ff..0000000000000000000000000000000000000000 --- a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/experimental/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# 🧨 Diffusers Experimental - -We are adding experimental code to support novel applications and usages of the Diffusers library. -Currently, the following experiments are supported: -* Reinforcement learning via an implementation of the [Diffuser](https://arxiv.org/abs/2205.09991) model. \ No newline at end of file diff --git a/spaces/Yeshwant123/mcc/mcc.py b/spaces/Yeshwant123/mcc/mcc.py deleted file mode 100644 index ba4a196282aa044c9e0d5a7349655b664e840779..0000000000000000000000000000000000000000 --- a/spaces/Yeshwant123/mcc/mcc.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""TODO: MCC is a correlation coefficient between the observed and predicted binary classifications, and takes into account true and false positives and negatives.""" - -import evaluate -import datasets -from sklearn.metrics import matthews_corrcoef - - - -# TODO: Add BibTeX citation -_CITATION = """\ -@InProceedings{huggingface:module, -title = {MCC Metric}, -authors={huggingface, Inc.}, -year={2020} -} -""" - -# TODO: Add description of the module here -_DESCRIPTION = """\ -MCC (Matthews Correlation Coefficient) is a correlation coefficient between the observed and predicted binary classifications, and takes into account true and false positives and negatives. It can be computed with the equation: -MCC = (TP * TN - FP * FN) / sqrt((TP+FP) * (TP+FN) * (TN+FP) * (TN+FN)) -Where TP is the true positives, TN is the true negatives, FP is the false positives, and FN is the false negatives. -""" - - -# TODO: Add description of the arguments of the module here -_KWARGS_DESCRIPTION = """ -Calculates how good are predictions given some references, using certain scores -Args: - - **predictions** (`list` of `int`): The predicted labels. - - **references** (`list` of `int`): The ground truth labels. -Returns: - - **mcc** (`float`): The MCC score. Minimum possible value is -1. Maximum possible value is 1. A higher MCC means that the predicted and observed binary classifications agree better, while a negative MCC means that they agree worse than chance. -Examples: - Example 1-A simple example with some errors - >>> mcc_metric = evaluate.load('mcc') - >>> results = mcc_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1]) - >>> print(results) - {'mcc': 0.16666666666666666} - Example 2-The same example as Example 1, but with some different labels - >>> mcc_metric = evaluate.load('mcc') - >>> results = mcc_metric.compute(references=[0, 1, 2, 2, 2], predictions=[0, 2, 2, 1, 2]) - >>> print(results) - {'mcc': 0.2041241452319315} -""" - -# TODO: Define external resources urls if needed -BAD_WORDS_URL = "https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html" - - -@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) -class MCC(evaluate.Metric): - """Compute MCC Scores""" - - def _info(self): - return evaluate.MetricInfo( - module_type="metric", - description=_DESCRIPTION, - citation=_CITATION, - inputs_description=_KWARGS_DESCRIPTION, - features=datasets.Features({ - 'predictions': datasets.Value('int64'), - 'references': datasets.Value('int64'), - }), - # Homepage of the module for documentation - homepage="https://huggingface.co/evaluate-metric?message=Request%20sent", - # Additional links to the codebase or references - codebase_urls=[], - reference_urls=[] - ) - - def _compute(self, predictions, references): - """Returns the mcc scores""" - # Computes the MCC score using matthews_corrcoef from sklearn - - return {"mcc": matthews_corrcoef(references, predictions)} \ No newline at end of file diff --git a/spaces/Yukki-Yui/moe-tts/text/japanese.py b/spaces/Yukki-Yui/moe-tts/text/japanese.py deleted file mode 100644 index 65480534b452efabe87b40033316e2c1577ff3ea..0000000000000000000000000000000000000000 --- a/spaces/Yukki-Yui/moe-tts/text/japanese.py +++ /dev/null @@ -1,132 +0,0 @@ -import re -from unidecode import unidecode -import pyopenjtalk - - -# Regular expression matching Japanese without punctuation marks: -_japanese_characters = re.compile( - r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# Regular expression matching non-Japanese characters or punctuation marks: -_japanese_marks = re.compile( - r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# List of (symbol, Japanese) pairs for marks: -_symbols_to_japanese = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('%', 'パーセント') -]] - -# List of (romaji, ipa) pairs for marks: -_romaji_to_ipa = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('ts', 'ʦ'), - ('u', 'ɯ'), - ('...', '…'), - ('j', 'ʥ'), - ('y', 'j'), - ('ni', 'n^i'), - ('nj', 'n^'), - ('hi', 'çi'), - ('hj', 'ç'), - ('f', 'ɸ'), - ('I', 'i*'), - ('U', 'ɯ*'), - ('r', 'ɾ') -]] - -# Dictinary of (consonant, sokuon) pairs: -_real_sokuon = { - 'k': 'k#', - 'g': 'k#', - 't': 't#', - 'd': 't#', - 'ʦ': 't#', - 'ʧ': 't#', - 'ʥ': 't#', - 'j': 't#', - 's': 's', - 'ʃ': 's', - 'p': 'p#', - 'b': 'p#' -} - -# Dictinary of (consonant, hatsuon) pairs: -_real_hatsuon = { - 'p': 'm', - 'b': 'm', - 'm': 'm', - 't': 'n', - 'd': 'n', - 'n': 'n', - 'ʧ': 'n^', - 'ʥ': 'n^', - 'k': 'ŋ', - 'g': 'ŋ' -} - - -def symbols_to_japanese(text): - for regex, replacement in _symbols_to_japanese: - text = re.sub(regex, replacement, text) - return text - - -def japanese_to_romaji_with_accent(text): - '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html''' - text = symbols_to_japanese(text) - sentences = re.split(_japanese_marks, text) - marks = re.findall(_japanese_marks, text) - text = '' - for i, sentence in enumerate(sentences): - if re.match(_japanese_characters, sentence): - if text != '': - text += ' ' - labels = pyopenjtalk.extract_fullcontext(sentence) - for n, label in enumerate(labels): - phoneme = re.search(r'\-([^\+]*)\+', label).group(1) - if phoneme not in ['sil', 'pau']: - text += phoneme.replace('ch', 'ʧ').replace('sh', - 'ʃ').replace('cl', 'Q') - else: - continue - # n_moras = int(re.search(r'/F:(\d+)_', label).group(1)) - a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1)) - a2 = int(re.search(r"\+(\d+)\+", label).group(1)) - a3 = int(re.search(r"\+(\d+)/", label).group(1)) - if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil', 'pau']: - a2_next = -1 - else: - a2_next = int( - re.search(r"\+(\d+)\+", labels[n + 1]).group(1)) - # Accent phrase boundary - if a3 == 1 and a2_next == 1: - text += ' ' - # Falling - elif a1 == 0 and a2_next == a2 + 1: - text += '↓' - # Rising - elif a2 == 1 and a2_next == 2: - text += '↑' - if i < len(marks): - text += unidecode(marks[i]).replace(' ', '') - return text - - -def get_real_sokuon(text): - text=re.sub('Q[↑↓]*(.)',lambda x:_real_sokuon[x.group(1)]+x.group(0)[1:] if x.group(1) in _real_sokuon.keys() else x.group(0),text) - return text - - -def get_real_hatsuon(text): - text=re.sub('N[↑↓]*(.)',lambda x:_real_hatsuon[x.group(1)]+x.group(0)[1:] if x.group(1) in _real_hatsuon.keys() else x.group(0),text) - return text - - -def japanese_to_ipa(text): - text=japanese_to_romaji_with_accent(text) - for regex, replacement in _romaji_to_ipa: - text = re.sub(regex, replacement, text) - text = re.sub( - r'([A-Za-zɯ])\1+', lambda x: x.group(0)[0]+'ː'*(len(x.group(0))-1), text) - text = get_real_sokuon(text) - text = get_real_hatsuon(text) - return text diff --git a/spaces/Yuliang/ECON/lib/pixielib/models/encoders.py b/spaces/Yuliang/ECON/lib/pixielib/models/encoders.py deleted file mode 100644 index 44f979a2063fe62e3de451bebb267a8852e85955..0000000000000000000000000000000000000000 --- a/spaces/Yuliang/ECON/lib/pixielib/models/encoders.py +++ /dev/null @@ -1,60 +0,0 @@ -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F - - -class ResnetEncoder(nn.Module): - def __init__(self, append_layers=None): - super(ResnetEncoder, self).__init__() - from . import resnet - - # feature_size = 2048 - self.feature_dim = 2048 - self.encoder = resnet.load_ResNet50Model() # out: 2048 - # regressor - self.append_layers = append_layers - - def forward(self, inputs): - """inputs: [bz, 3, h, w], range: [0,1]""" - features = self.encoder(inputs) - if self.append_layers: - features = self.last_op(features) - return features - - -class MLP(nn.Module): - def __init__(self, channels=[2048, 1024, 1], last_op=None): - super(MLP, self).__init__() - layers = [] - - for l in range(0, len(channels) - 1): - layers.append(nn.Linear(channels[l], channels[l + 1])) - if l < len(channels) - 2: - layers.append(nn.ReLU()) - if last_op: - layers.append(last_op) - - self.layers = nn.Sequential(*layers) - - def forward(self, inputs): - outs = self.layers(inputs) - return outs - - -class HRNEncoder(nn.Module): - def __init__(self, append_layers=None): - super(HRNEncoder, self).__init__() - from . import hrnet - - self.feature_dim = 2048 - self.encoder = hrnet.load_HRNet(pretrained=True) # out: 2048 - # regressor - self.append_layers = append_layers - - def forward(self, inputs): - """inputs: [bz, 3, h, w], range: [-1,1]""" - features = self.encoder(inputs)["concat"] - if self.append_layers: - features = self.last_op(features) - return features diff --git a/spaces/Yuliang/ICON/lib/renderer/gl/norm_render.py b/spaces/Yuliang/ICON/lib/renderer/gl/norm_render.py deleted file mode 100644 index 3ba29a678280a0903361b9d03c4d0d05f42d7fbf..0000000000000000000000000000000000000000 --- a/spaces/Yuliang/ICON/lib/renderer/gl/norm_render.py +++ /dev/null @@ -1,79 +0,0 @@ -''' -MIT License - -Copyright (c) 2019 Shunsuke Saito, Zeng Huang, and Ryota Natsume - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. -''' -from OpenGL.GLUT import * - -from .render2 import Render - - -class NormRender(Render): - def __init__(self, - width=1600, - height=1200, - name='Cam Renderer', - program_files=['simple.fs', 'simple.vs'], - color_size=1, - ms_rate=1): - Render.__init__(self, width, height, name, program_files, color_size, - ms_rate) - self.camera = None - - glutDisplayFunc(self.display) - glutKeyboardFunc(self.keyboard) - - def set_camera(self, camera): - self.camera = camera - self.projection_matrix, self.model_view_matrix = camera.get_gl_matrix() - - def set_matrices(self, projection, modelview): - self.projection_matrix = projection - self.model_view_matrix = modelview - - def keyboard(self, key, x, y): - # up - eps = 1 - # print(key) - if key == b'w': - self.camera.center += eps * self.camera.direction - elif key == b's': - self.camera.center -= eps * self.camera.direction - if key == b'a': - self.camera.center -= eps * self.camera.right - elif key == b'd': - self.camera.center += eps * self.camera.right - if key == b' ': - self.camera.center += eps * self.camera.up - elif key == b'x': - self.camera.center -= eps * self.camera.up - elif key == b'i': - self.camera.near += 0.1 * eps - self.camera.far += 0.1 * eps - elif key == b'o': - self.camera.near -= 0.1 * eps - self.camera.far -= 0.1 * eps - - self.projection_matrix, self.model_view_matrix = self.camera.get_gl_matrix( - ) - - def show(self): - glutMainLoop() diff --git a/spaces/Zaxxced/rvc-random-v2/lib/infer_pack/modules/F0Predictor/DioF0Predictor.py b/spaces/Zaxxced/rvc-random-v2/lib/infer_pack/modules/F0Predictor/DioF0Predictor.py deleted file mode 100644 index ee3171bcb7c4a5066560723108b56e055f18be45..0000000000000000000000000000000000000000 --- a/spaces/Zaxxced/rvc-random-v2/lib/infer_pack/modules/F0Predictor/DioF0Predictor.py +++ /dev/null @@ -1,90 +0,0 @@ -from lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor -import pyworld -import numpy as np - - -class DioF0Predictor(F0Predictor): - def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100): - self.hop_length = hop_length - self.f0_min = f0_min - self.f0_max = f0_max - self.sampling_rate = sampling_rate - - def interpolate_f0(self, f0): - """ - 对F0进行插值处理 - """ - - data = np.reshape(f0, (f0.size, 1)) - - vuv_vector = np.zeros((data.size, 1), dtype=np.float32) - vuv_vector[data > 0.0] = 1.0 - vuv_vector[data <= 0.0] = 0.0 - - ip_data = data - - frame_number = data.size - last_value = 0.0 - for i in range(frame_number): - if data[i] <= 0.0: - j = i + 1 - for j in range(i + 1, frame_number): - if data[j] > 0.0: - break - if j < frame_number - 1: - if last_value > 0.0: - step = (data[j] - data[i - 1]) / float(j - i) - for k in range(i, j): - ip_data[k] = data[i - 1] + step * (k - i + 1) - else: - for k in range(i, j): - ip_data[k] = data[j] - else: - for k in range(i, frame_number): - ip_data[k] = last_value - else: - ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝 - last_value = data[i] - - return ip_data[:, 0], vuv_vector[:, 0] - - def resize_f0(self, x, target_len): - source = np.array(x) - source[source < 0.001] = np.nan - target = np.interp( - np.arange(0, len(source) * target_len, len(source)) / target_len, - np.arange(0, len(source)), - source, - ) - res = np.nan_to_num(target) - return res - - def compute_f0(self, wav, p_len=None): - if p_len is None: - p_len = wav.shape[0] // self.hop_length - f0, t = pyworld.dio( - wav.astype(np.double), - fs=self.sampling_rate, - f0_floor=self.f0_min, - f0_ceil=self.f0_max, - frame_period=1000 * self.hop_length / self.sampling_rate, - ) - f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate) - for index, pitch in enumerate(f0): - f0[index] = round(pitch, 1) - return self.interpolate_f0(self.resize_f0(f0, p_len))[0] - - def compute_f0_uv(self, wav, p_len=None): - if p_len is None: - p_len = wav.shape[0] // self.hop_length - f0, t = pyworld.dio( - wav.astype(np.double), - fs=self.sampling_rate, - f0_floor=self.f0_min, - f0_ceil=self.f0_max, - frame_period=1000 * self.hop_length / self.sampling_rate, - ) - f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate) - for index, pitch in enumerate(f0): - f0[index] = round(pitch, 1) - return self.interpolate_f0(self.resize_f0(f0, p_len)) diff --git a/spaces/Zeltoria/anime-voice-generator/mel_processing.py b/spaces/Zeltoria/anime-voice-generator/mel_processing.py deleted file mode 100644 index 3e252e76320522a8a4195a60665168f22769aec2..0000000000000000000000000000000000000000 --- a/spaces/Zeltoria/anime-voice-generator/mel_processing.py +++ /dev/null @@ -1,101 +0,0 @@ -import torch -import torch.utils.data -from librosa.filters import mel as librosa_mel_fn - -MAX_WAV_VALUE = 32768.0 - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - """ - PARAMS - ------ - C: compression factor - """ - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - """ - PARAMS - ------ - C: compression factor used to compress - """ - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - output = dynamic_range_compression_torch(magnitudes) - return output - - -def spectral_de_normalize_torch(magnitudes): - output = dynamic_range_decompression_torch(magnitudes) - return output - - -mel_basis = {} -hann_window = {} - - -def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - return spec - - -def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): - global mel_basis - dtype_device = str(spec.dtype) + '_' + str(spec.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device) - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - return spec - - -def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global mel_basis, hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device) - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - - return spec diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmseg/models/decode_heads/gc_head.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmseg/models/decode_heads/gc_head.py deleted file mode 100644 index 70741245af975800840709911bd18d72247e3e04..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmseg/models/decode_heads/gc_head.py +++ /dev/null @@ -1,47 +0,0 @@ -import torch -from annotator.uniformer.mmcv.cnn import ContextBlock - -from ..builder import HEADS -from .fcn_head import FCNHead - - -@HEADS.register_module() -class GCHead(FCNHead): - """GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond. - - This head is the implementation of `GCNet - `_. - - Args: - ratio (float): Multiplier of channels ratio. Default: 1/4. - pooling_type (str): The pooling type of context aggregation. - Options are 'att', 'avg'. Default: 'avg'. - fusion_types (tuple[str]): The fusion type for feature fusion. - Options are 'channel_add', 'channel_mul'. Default: ('channel_add',) - """ - - def __init__(self, - ratio=1 / 4., - pooling_type='att', - fusion_types=('channel_add', ), - **kwargs): - super(GCHead, self).__init__(num_convs=2, **kwargs) - self.ratio = ratio - self.pooling_type = pooling_type - self.fusion_types = fusion_types - self.gc_block = ContextBlock( - in_channels=self.channels, - ratio=self.ratio, - pooling_type=self.pooling_type, - fusion_types=self.fusion_types) - - def forward(self, inputs): - """Forward function.""" - x = self._transform_inputs(inputs) - output = self.convs[0](x) - output = self.gc_block(output) - output = self.convs[1](output) - if self.concat_input: - output = self.conv_cat(torch.cat([x, output], dim=1)) - output = self.cls_seg(output) - return output diff --git a/spaces/ahmedghani/Editing-Tools/image_converter.py b/spaces/ahmedghani/Editing-Tools/image_converter.py deleted file mode 100644 index 6037b711394c0684f7c3ef9009ee0e3f699f1c20..0000000000000000000000000000000000000000 --- a/spaces/ahmedghani/Editing-Tools/image_converter.py +++ /dev/null @@ -1,64 +0,0 @@ -import os -from PIL import Image -import pyheif -import io - -class ImageConverter: - def __init__(self): - self.supported_formats = { - 'jpg': 'JPEG', - 'jpeg': 'JPEG', - 'png': 'PNG', - 'bmp': 'BMP', - 'tiff': 'TIFF', - 'gif': 'GIF', - 'webp': 'WEBP', - 'ico': 'ICO', - 'heic': 'HEIC', - 'heiv': 'HEIC', - 'heif': 'HEIC', - } - - def open_heif_image(self, input_image): - heif_file = pyheif.read(input_image) - return Image.frombytes( - heif_file.mode, - heif_file.size, - heif_file.data, - "raw", - heif_file.mode, - heif_file.stride, - ) - - def convert_image(self, input_image, output_format, output_path=None): - try: - if not os.path.exists(input_image): - raise FileNotFoundError(f"The input image '{input_image}' does not exist.") - - input_extension = input_image.split('.')[-1].lower() - - if input_extension not in self.supported_formats: - raise ValueError(f"The input format '{input_extension}' is not supported.") - - if output_format.lower() not in self.supported_formats: - raise ValueError(f"The output format '{output_format}' is not supported.") - - if input_extension in ['heic', 'heiv', 'heif']: - image = self.open_heif_image(input_image) - else: - image = Image.open(input_image) - - if output_path is None: - output_image = '.'.join(input_image.split('.')[:-1]) + f'.{output_format}' - else: - output_image = output_path - - image.save(output_image, self.supported_formats[output_format.lower()]) - print(f"Image converted and saved as {output_image}") - return output_image, "Image converted and saved as {output_image}" - except Exception as e: - None, print(f"Error: {e}") - -def convert_image(input_image, output_format): - converter = ImageConverter() - return converter.convert_image(input_image.name, output_format) \ No newline at end of file diff --git a/spaces/airus/ss/upcunet_v3.py b/spaces/airus/ss/upcunet_v3.py deleted file mode 100644 index f7919a6cc9efe3b8af73a73e30825a4c7d7d76da..0000000000000000000000000000000000000000 --- a/spaces/airus/ss/upcunet_v3.py +++ /dev/null @@ -1,714 +0,0 @@ -import torch -from torch import nn as nn -from torch.nn import functional as F -import os, sys -import numpy as np - -root_path = os.path.abspath('.') -sys.path.append(root_path) - - -class SEBlock(nn.Module): - def __init__(self, in_channels, reduction=8, bias=False): - super(SEBlock, self).__init__() - self.conv1 = nn.Conv2d(in_channels, in_channels // reduction, 1, 1, 0, bias=bias) - self.conv2 = nn.Conv2d(in_channels // reduction, in_channels, 1, 1, 0, bias=bias) - - def forward(self, x): - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - x0 = torch.mean(x.float(), dim=(2, 3), keepdim=True).half() - else: - x0 = torch.mean(x, dim=(2, 3), keepdim=True) - x0 = self.conv1(x0) - x0 = F.relu(x0, inplace=True) - x0 = self.conv2(x0) - x0 = torch.sigmoid(x0) - x = torch.mul(x, x0) - return x - - def forward_mean(self, x, x0): - x0 = self.conv1(x0) - x0 = F.relu(x0, inplace=True) - x0 = self.conv2(x0) - x0 = torch.sigmoid(x0) - x = torch.mul(x, x0) - return x - - -class UNetConv(nn.Module): - def __init__(self, in_channels, mid_channels, out_channels, se): - super(UNetConv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d(in_channels, mid_channels, 3, 1, 0), - nn.LeakyReLU(0.1, inplace=True), - nn.Conv2d(mid_channels, out_channels, 3, 1, 0), - nn.LeakyReLU(0.1, inplace=True), - ) - if se: - self.seblock = SEBlock(out_channels, reduction=8, bias=True) - else: - self.seblock = None - - def forward(self, x): - z = self.conv(x) - if self.seblock is not None: - z = self.seblock(z) - return z - - -class UNet1(nn.Module): - def __init__(self, in_channels, out_channels, deconv): - super(UNet1, self).__init__() - self.conv1 = UNetConv(in_channels, 32, 64, se=False) - self.conv1_down = nn.Conv2d(64, 64, 2, 2, 0) - self.conv2 = UNetConv(64, 128, 64, se=True) - self.conv2_up = nn.ConvTranspose2d(64, 64, 2, 2, 0) - self.conv3 = nn.Conv2d(64, 64, 3, 1, 0) - - if deconv: - self.conv_bottom = nn.ConvTranspose2d(64, out_channels, 4, 2, 3) - else: - self.conv_bottom = nn.Conv2d(64, out_channels, 3, 1, 0) - - for m in self.modules(): - if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)): - nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif isinstance(m, nn.Linear): - nn.init.normal_(m.weight, 0, 0.01) - if m.bias is not None: - nn.init.constant_(m.bias, 0) - - def forward(self, x): - x1 = self.conv1(x) - x2 = self.conv1_down(x1) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - x2 = self.conv2(x2) - x2 = self.conv2_up(x2) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - - x1 = F.pad(x1, (-4, -4, -4, -4)) - x3 = self.conv3(x1 + x2) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - z = self.conv_bottom(x3) - return z - - def forward_a(self, x): - x1 = self.conv1(x) - x2 = self.conv1_down(x1) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - x2 = self.conv2.conv(x2) - return x1, x2 - - def forward_b(self, x1, x2): - x2 = self.conv2_up(x2) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - - x1 = F.pad(x1, (-4, -4, -4, -4)) - x3 = self.conv3(x1 + x2) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - z = self.conv_bottom(x3) - return z - - -class UNet1x3(nn.Module): - def __init__(self, in_channels, out_channels, deconv): - super(UNet1x3, self).__init__() - self.conv1 = UNetConv(in_channels, 32, 64, se=False) - self.conv1_down = nn.Conv2d(64, 64, 2, 2, 0) - self.conv2 = UNetConv(64, 128, 64, se=True) - self.conv2_up = nn.ConvTranspose2d(64, 64, 2, 2, 0) - self.conv3 = nn.Conv2d(64, 64, 3, 1, 0) - - if deconv: - self.conv_bottom = nn.ConvTranspose2d(64, out_channels, 5, 3, 2) - else: - self.conv_bottom = nn.Conv2d(64, out_channels, 3, 1, 0) - - for m in self.modules(): - if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)): - nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif isinstance(m, nn.Linear): - nn.init.normal_(m.weight, 0, 0.01) - if m.bias is not None: - nn.init.constant_(m.bias, 0) - - def forward(self, x): - x1 = self.conv1(x) - x2 = self.conv1_down(x1) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - x2 = self.conv2(x2) - x2 = self.conv2_up(x2) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - - x1 = F.pad(x1, (-4, -4, -4, -4)) - x3 = self.conv3(x1 + x2) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - z = self.conv_bottom(x3) - return z - - def forward_a(self, x): - x1 = self.conv1(x) - x2 = self.conv1_down(x1) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - x2 = self.conv2.conv(x2) - return x1, x2 - - def forward_b(self, x1, x2): - x2 = self.conv2_up(x2) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - - x1 = F.pad(x1, (-4, -4, -4, -4)) - x3 = self.conv3(x1 + x2) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - z = self.conv_bottom(x3) - return z - - -class UNet2(nn.Module): - def __init__(self, in_channels, out_channels, deconv): - super(UNet2, self).__init__() - - self.conv1 = UNetConv(in_channels, 32, 64, se=False) - self.conv1_down = nn.Conv2d(64, 64, 2, 2, 0) - self.conv2 = UNetConv(64, 64, 128, se=True) - self.conv2_down = nn.Conv2d(128, 128, 2, 2, 0) - self.conv3 = UNetConv(128, 256, 128, se=True) - self.conv3_up = nn.ConvTranspose2d(128, 128, 2, 2, 0) - self.conv4 = UNetConv(128, 64, 64, se=True) - self.conv4_up = nn.ConvTranspose2d(64, 64, 2, 2, 0) - self.conv5 = nn.Conv2d(64, 64, 3, 1, 0) - - if deconv: - self.conv_bottom = nn.ConvTranspose2d(64, out_channels, 4, 2, 3) - else: - self.conv_bottom = nn.Conv2d(64, out_channels, 3, 1, 0) - - for m in self.modules(): - if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)): - nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif isinstance(m, nn.Linear): - nn.init.normal_(m.weight, 0, 0.01) - if m.bias is not None: - nn.init.constant_(m.bias, 0) - - def forward(self, x): - x1 = self.conv1(x) - x2 = self.conv1_down(x1) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - x2 = self.conv2(x2) - - x3 = self.conv2_down(x2) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - x3 = self.conv3(x3) - x3 = self.conv3_up(x3) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - - x2 = F.pad(x2, (-4, -4, -4, -4)) - x4 = self.conv4(x2 + x3) - x4 = self.conv4_up(x4) - x4 = F.leaky_relu(x4, 0.1, inplace=True) - - x1 = F.pad(x1, (-16, -16, -16, -16)) - x5 = self.conv5(x1 + x4) - x5 = F.leaky_relu(x5, 0.1, inplace=True) - - z = self.conv_bottom(x5) - return z - - def forward_a(self, x): # conv234结尾有se - x1 = self.conv1(x) - x2 = self.conv1_down(x1) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - x2 = self.conv2.conv(x2) - return x1, x2 - - def forward_b(self, x2): # conv234结尾有se - x3 = self.conv2_down(x2) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - x3 = self.conv3.conv(x3) - return x3 - - def forward_c(self, x2, x3): # conv234结尾有se - x3 = self.conv3_up(x3) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - - x2 = F.pad(x2, (-4, -4, -4, -4)) - x4 = self.conv4.conv(x2 + x3) - return x4 - - def forward_d(self, x1, x4): # conv234结尾有se - x4 = self.conv4_up(x4) - x4 = F.leaky_relu(x4, 0.1, inplace=True) - - x1 = F.pad(x1, (-16, -16, -16, -16)) - x5 = self.conv5(x1 + x4) - x5 = F.leaky_relu(x5, 0.1, inplace=True) - - z = self.conv_bottom(x5) - return z - - -class UpCunet2x(nn.Module): # 完美tile,全程无损 - def __init__(self, in_channels=3, out_channels=3): - super(UpCunet2x, self).__init__() - self.unet1 = UNet1(in_channels, out_channels, deconv=True) - self.unet2 = UNet2(in_channels, out_channels, deconv=False) - - def forward(self, x, tile_mode): # 1.7G - n, c, h0, w0 = x.shape - if (tile_mode == 0): # 不tile - ph = ((h0 - 1) // 2 + 1) * 2 - pw = ((w0 - 1) // 2 + 1) * 2 - x = F.pad(x, (18, 18 + pw - w0, 18, 18 + ph - h0), 'reflect') # 需要保证被2整除 - x = self.unet1.forward(x) - x0 = self.unet2.forward(x) - x1 = F.pad(x, (-20, -20, -20, -20)) - x = torch.add(x0, x1) - if (w0 != pw or h0 != ph): x = x[:, :, :h0 * 2, :w0 * 2] - return x - elif (tile_mode == 1): # 对长边减半 - if (w0 >= h0): - crop_size_w = ((w0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除 - crop_size_h = (h0 - 1) // 2 * 2 + 2 # 能被2整除 - else: - crop_size_h = ((h0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除 - crop_size_w = (w0 - 1) // 2 * 2 + 2 # 能被2整除 - crop_size = (crop_size_h, crop_size_w) # 6.6G - elif (tile_mode == 2): # hw都减半 - crop_size = (((h0 - 1) // 4 * 4 + 4) // 2, ((w0 - 1) // 4 * 4 + 4) // 2) # 5.6G - elif (tile_mode == 3): # hw都三分之一 - crop_size = (((h0 - 1) // 6 * 6 + 6) // 3, ((w0 - 1) // 6 * 6 + 6) // 3) # 4.2G - elif (tile_mode == 4): # hw都四分之一 - crop_size = (((h0 - 1) // 8 * 8 + 8) // 4, ((w0 - 1) // 8 * 8 + 8) // 4) # 3.7G - ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0] - pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1] - x = F.pad(x, (18, 18 + pw - w0, 18, 18 + ph - h0), 'reflect') - n, c, h, w = x.shape - se_mean0 = torch.zeros((n, 64, 1, 1)).to(x.device) - if ("Half" in x.type()): - se_mean0 = se_mean0.half() - n_patch = 0 - tmp_dict = {} - opt_res_dict = {} - for i in range(0, h - 36, crop_size[0]): - tmp_dict[i] = {} - for j in range(0, w - 36, crop_size[1]): - x_crop = x[:, :, i:i + crop_size[0] + 36, j:j + crop_size[1] + 36] - n, c1, h1, w1 = x_crop.shape - tmp0, x_crop = self.unet1.forward_a(x_crop) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(x_crop.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(x_crop, dim=(2, 3), keepdim=True) - se_mean0 += tmp_se_mean - n_patch += 1 - tmp_dict[i][j] = (tmp0, x_crop) - se_mean0 /= n_patch - se_mean1 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean1 = se_mean1.half() - for i in range(0, h - 36, crop_size[0]): - for j in range(0, w - 36, crop_size[1]): - tmp0, x_crop = tmp_dict[i][j] - x_crop = self.unet1.conv2.seblock.forward_mean(x_crop, se_mean0) - opt_unet1 = self.unet1.forward_b(tmp0, x_crop) - tmp_x1, tmp_x2 = self.unet2.forward_a(opt_unet1) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x2.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x2, dim=(2, 3), keepdim=True) - se_mean1 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2) - se_mean1 /= n_patch - se_mean0 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean0 = se_mean0.half() - for i in range(0, h - 36, crop_size[0]): - for j in range(0, w - 36, crop_size[1]): - opt_unet1, tmp_x1, tmp_x2 = tmp_dict[i][j] - tmp_x2 = self.unet2.conv2.seblock.forward_mean(tmp_x2, se_mean1) - tmp_x3 = self.unet2.forward_b(tmp_x2) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x3.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x3, dim=(2, 3), keepdim=True) - se_mean0 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2, tmp_x3) - se_mean0 /= n_patch - se_mean1 = torch.zeros((n, 64, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean1 = se_mean1.half() - for i in range(0, h - 36, crop_size[0]): - for j in range(0, w - 36, crop_size[1]): - opt_unet1, tmp_x1, tmp_x2, tmp_x3 = tmp_dict[i][j] - tmp_x3 = self.unet2.conv3.seblock.forward_mean(tmp_x3, se_mean0) - tmp_x4 = self.unet2.forward_c(tmp_x2, tmp_x3) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x4.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x4, dim=(2, 3), keepdim=True) - se_mean1 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x4) - se_mean1 /= n_patch - for i in range(0, h - 36, crop_size[0]): - opt_res_dict[i] = {} - for j in range(0, w - 36, crop_size[1]): - opt_unet1, tmp_x1, tmp_x4 = tmp_dict[i][j] - tmp_x4 = self.unet2.conv4.seblock.forward_mean(tmp_x4, se_mean1) - x0 = self.unet2.forward_d(tmp_x1, tmp_x4) - x1 = F.pad(opt_unet1, (-20, -20, -20, -20)) - x_crop = torch.add(x0, x1) # x0是unet2的最终输出 - opt_res_dict[i][j] = x_crop - del tmp_dict - torch.cuda.empty_cache() - res = torch.zeros((n, c, h * 2 - 72, w * 2 - 72)).to(x.device) - if ("Half" in x.type()): - res = res.half() - for i in range(0, h - 36, crop_size[0]): - for j in range(0, w - 36, crop_size[1]): - res[:, :, i * 2:i * 2 + h1 * 2 - 72, j * 2:j * 2 + w1 * 2 - 72] = opt_res_dict[i][j] - del opt_res_dict - torch.cuda.empty_cache() - if (w0 != pw or h0 != ph): res = res[:, :, :h0 * 2, :w0 * 2] - return res # - - -class UpCunet3x(nn.Module): # 完美tile,全程无损 - def __init__(self, in_channels=3, out_channels=3): - super(UpCunet3x, self).__init__() - self.unet1 = UNet1x3(in_channels, out_channels, deconv=True) - self.unet2 = UNet2(in_channels, out_channels, deconv=False) - - def forward(self, x, tile_mode): # 1.7G - n, c, h0, w0 = x.shape - if (tile_mode == 0): # 不tile - ph = ((h0 - 1) // 4 + 1) * 4 - pw = ((w0 - 1) // 4 + 1) * 4 - x = F.pad(x, (14, 14 + pw - w0, 14, 14 + ph - h0), 'reflect') # 需要保证被2整除 - x = self.unet1.forward(x) - x0 = self.unet2.forward(x) - x1 = F.pad(x, (-20, -20, -20, -20)) - x = torch.add(x0, x1) - if (w0 != pw or h0 != ph): x = x[:, :, :h0 * 3, :w0 * 3] - return x - elif (tile_mode == 1): # 对长边减半 - if (w0 >= h0): - crop_size_w = ((w0 - 1) // 8 * 8 + 8) // 2 # 减半后能被4整除,所以要先被8整除 - crop_size_h = (h0 - 1) // 4 * 4 + 4 # 能被4整除 - else: - crop_size_h = ((h0 - 1) // 8 * 8 + 8) // 2 # 减半后能被4整除,所以要先被8整除 - crop_size_w = (w0 - 1) // 4 * 4 + 4 # 能被4整除 - crop_size = (crop_size_h, crop_size_w) # 6.6G - elif (tile_mode == 2): # hw都减半 - crop_size = (((h0 - 1) // 8 * 8 + 8) // 2, ((w0 - 1) // 8 * 8 + 8) // 2) # 5.6G - elif (tile_mode == 3): # hw都三分之一 - crop_size = (((h0 - 1) // 12 * 12 + 12) // 3, ((w0 - 1) // 12 * 12 + 12) // 3) # 4.2G - elif (tile_mode == 4): # hw都四分之一 - crop_size = (((h0 - 1) // 16 * 16 + 16) // 4, ((w0 - 1) // 16 * 16 + 16) // 4) # 3.7G - ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0] - pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1] - x = F.pad(x, (14, 14 + pw - w0, 14, 14 + ph - h0), 'reflect') - n, c, h, w = x.shape - se_mean0 = torch.zeros((n, 64, 1, 1)).to(x.device) - if ("Half" in x.type()): - se_mean0 = se_mean0.half() - n_patch = 0 - tmp_dict = {} - opt_res_dict = {} - for i in range(0, h - 28, crop_size[0]): - tmp_dict[i] = {} - for j in range(0, w - 28, crop_size[1]): - x_crop = x[:, :, i:i + crop_size[0] + 28, j:j + crop_size[1] + 28] - n, c1, h1, w1 = x_crop.shape - tmp0, x_crop = self.unet1.forward_a(x_crop) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(x_crop.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(x_crop, dim=(2, 3), keepdim=True) - se_mean0 += tmp_se_mean - n_patch += 1 - tmp_dict[i][j] = (tmp0, x_crop) - se_mean0 /= n_patch - se_mean1 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean1 = se_mean1.half() - for i in range(0, h - 28, crop_size[0]): - for j in range(0, w - 28, crop_size[1]): - tmp0, x_crop = tmp_dict[i][j] - x_crop = self.unet1.conv2.seblock.forward_mean(x_crop, se_mean0) - opt_unet1 = self.unet1.forward_b(tmp0, x_crop) - tmp_x1, tmp_x2 = self.unet2.forward_a(opt_unet1) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x2.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x2, dim=(2, 3), keepdim=True) - se_mean1 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2) - se_mean1 /= n_patch - se_mean0 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean0 = se_mean0.half() - for i in range(0, h - 28, crop_size[0]): - for j in range(0, w - 28, crop_size[1]): - opt_unet1, tmp_x1, tmp_x2 = tmp_dict[i][j] - tmp_x2 = self.unet2.conv2.seblock.forward_mean(tmp_x2, se_mean1) - tmp_x3 = self.unet2.forward_b(tmp_x2) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x3.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x3, dim=(2, 3), keepdim=True) - se_mean0 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2, tmp_x3) - se_mean0 /= n_patch - se_mean1 = torch.zeros((n, 64, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean1 = se_mean1.half() - for i in range(0, h - 28, crop_size[0]): - for j in range(0, w - 28, crop_size[1]): - opt_unet1, tmp_x1, tmp_x2, tmp_x3 = tmp_dict[i][j] - tmp_x3 = self.unet2.conv3.seblock.forward_mean(tmp_x3, se_mean0) - tmp_x4 = self.unet2.forward_c(tmp_x2, tmp_x3) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x4.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x4, dim=(2, 3), keepdim=True) - se_mean1 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x4) - se_mean1 /= n_patch - for i in range(0, h - 28, crop_size[0]): - opt_res_dict[i] = {} - for j in range(0, w - 28, crop_size[1]): - opt_unet1, tmp_x1, tmp_x4 = tmp_dict[i][j] - tmp_x4 = self.unet2.conv4.seblock.forward_mean(tmp_x4, se_mean1) - x0 = self.unet2.forward_d(tmp_x1, tmp_x4) - x1 = F.pad(opt_unet1, (-20, -20, -20, -20)) - x_crop = torch.add(x0, x1) # x0是unet2的最终输出 - opt_res_dict[i][j] = x_crop # - del tmp_dict - torch.cuda.empty_cache() - res = torch.zeros((n, c, h * 3 - 84, w * 3 - 84)).to(x.device) - if ("Half" in x.type()): - res = res.half() - for i in range(0, h - 28, crop_size[0]): - for j in range(0, w - 28, crop_size[1]): - res[:, :, i * 3:i * 3 + h1 * 3 - 84, j * 3:j * 3 + w1 * 3 - 84] = opt_res_dict[i][j] - del opt_res_dict - torch.cuda.empty_cache() - if (w0 != pw or h0 != ph): res = res[:, :, :h0 * 3, :w0 * 3] - return res - - -class UpCunet4x(nn.Module): # 完美tile,全程无损 - def __init__(self, in_channels=3, out_channels=3): - super(UpCunet4x, self).__init__() - self.unet1 = UNet1(in_channels, 64, deconv=True) - self.unet2 = UNet2(64, 64, deconv=False) - self.ps = nn.PixelShuffle(2) - self.conv_final = nn.Conv2d(64, 12, 3, 1, padding=0, bias=True) - - def forward(self, x, tile_mode): - n, c, h0, w0 = x.shape - x00 = x - if (tile_mode == 0): # 不tile - ph = ((h0 - 1) // 2 + 1) * 2 - pw = ((w0 - 1) // 2 + 1) * 2 - x = F.pad(x, (19, 19 + pw - w0, 19, 19 + ph - h0), 'reflect') # 需要保证被2整除 - x = self.unet1.forward(x) - x0 = self.unet2.forward(x) - x1 = F.pad(x, (-20, -20, -20, -20)) - x = torch.add(x0, x1) - x = self.conv_final(x) - x = F.pad(x, (-1, -1, -1, -1)) - x = self.ps(x) - if (w0 != pw or h0 != ph): x = x[:, :, :h0 * 4, :w0 * 4] - x += F.interpolate(x00, scale_factor=4, mode='nearest') - return x - elif (tile_mode == 1): # 对长边减半 - if (w0 >= h0): - crop_size_w = ((w0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除 - crop_size_h = (h0 - 1) // 2 * 2 + 2 # 能被2整除 - else: - crop_size_h = ((h0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除 - crop_size_w = (w0 - 1) // 2 * 2 + 2 # 能被2整除 - crop_size = (crop_size_h, crop_size_w) # 6.6G - elif (tile_mode == 2): # hw都减半 - crop_size = (((h0 - 1) // 4 * 4 + 4) // 2, ((w0 - 1) // 4 * 4 + 4) // 2) # 5.6G - elif (tile_mode == 3): # hw都三分之一 - crop_size = (((h0 - 1) // 6 * 6 + 6) // 3, ((w0 - 1) // 6 * 6 + 6) // 3) # 4.1G - elif (tile_mode == 4): # hw都四分之一 - crop_size = (((h0 - 1) // 8 * 8 + 8) // 4, ((w0 - 1) // 8 * 8 + 8) // 4) # 3.7G - ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0] - pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1] - x = F.pad(x, (19, 19 + pw - w0, 19, 19 + ph - h0), 'reflect') - n, c, h, w = x.shape - se_mean0 = torch.zeros((n, 64, 1, 1)).to(x.device) - if ("Half" in x.type()): - se_mean0 = se_mean0.half() - n_patch = 0 - tmp_dict = {} - opt_res_dict = {} - for i in range(0, h - 38, crop_size[0]): - tmp_dict[i] = {} - for j in range(0, w - 38, crop_size[1]): - x_crop = x[:, :, i:i + crop_size[0] + 38, j:j + crop_size[1] + 38] - n, c1, h1, w1 = x_crop.shape - tmp0, x_crop = self.unet1.forward_a(x_crop) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(x_crop.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(x_crop, dim=(2, 3), keepdim=True) - se_mean0 += tmp_se_mean - n_patch += 1 - tmp_dict[i][j] = (tmp0, x_crop) - se_mean0 /= n_patch - se_mean1 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean1 = se_mean1.half() - for i in range(0, h - 38, crop_size[0]): - for j in range(0, w - 38, crop_size[1]): - tmp0, x_crop = tmp_dict[i][j] - x_crop = self.unet1.conv2.seblock.forward_mean(x_crop, se_mean0) - opt_unet1 = self.unet1.forward_b(tmp0, x_crop) - tmp_x1, tmp_x2 = self.unet2.forward_a(opt_unet1) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x2.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x2, dim=(2, 3), keepdim=True) - se_mean1 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2) - se_mean1 /= n_patch - se_mean0 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean0 = se_mean0.half() - for i in range(0, h - 38, crop_size[0]): - for j in range(0, w - 38, crop_size[1]): - opt_unet1, tmp_x1, tmp_x2 = tmp_dict[i][j] - tmp_x2 = self.unet2.conv2.seblock.forward_mean(tmp_x2, se_mean1) - tmp_x3 = self.unet2.forward_b(tmp_x2) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x3.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x3, dim=(2, 3), keepdim=True) - se_mean0 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2, tmp_x3) - se_mean0 /= n_patch - se_mean1 = torch.zeros((n, 64, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean1 = se_mean1.half() - for i in range(0, h - 38, crop_size[0]): - for j in range(0, w - 38, crop_size[1]): - opt_unet1, tmp_x1, tmp_x2, tmp_x3 = tmp_dict[i][j] - tmp_x3 = self.unet2.conv3.seblock.forward_mean(tmp_x3, se_mean0) - tmp_x4 = self.unet2.forward_c(tmp_x2, tmp_x3) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x4.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x4, dim=(2, 3), keepdim=True) - se_mean1 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x4) - se_mean1 /= n_patch - for i in range(0, h - 38, crop_size[0]): - opt_res_dict[i] = {} - for j in range(0, w - 38, crop_size[1]): - opt_unet1, tmp_x1, tmp_x4 = tmp_dict[i][j] - tmp_x4 = self.unet2.conv4.seblock.forward_mean(tmp_x4, se_mean1) - x0 = self.unet2.forward_d(tmp_x1, tmp_x4) - x1 = F.pad(opt_unet1, (-20, -20, -20, -20)) - x_crop = torch.add(x0, x1) # x0是unet2的最终输出 - x_crop = self.conv_final(x_crop) - x_crop = F.pad(x_crop, (-1, -1, -1, -1)) - x_crop = self.ps(x_crop) - opt_res_dict[i][j] = x_crop - del tmp_dict - torch.cuda.empty_cache() - res = torch.zeros((n, c, h * 4 - 152, w * 4 - 152)).to(x.device) - if ("Half" in x.type()): - res = res.half() - for i in range(0, h - 38, crop_size[0]): - for j in range(0, w - 38, crop_size[1]): - # print(opt_res_dict[i][j].shape,res[:, :, i * 4:i * 4 + h1 * 4 - 144, j * 4:j * 4 + w1 * 4 - 144].shape) - res[:, :, i * 4:i * 4 + h1 * 4 - 152, j * 4:j * 4 + w1 * 4 - 152] = opt_res_dict[i][j] - del opt_res_dict - torch.cuda.empty_cache() - if (w0 != pw or h0 != ph): res = res[:, :, :h0 * 4, :w0 * 4] - res += F.interpolate(x00, scale_factor=4, mode='nearest') - return res # - - -class RealWaifuUpScaler(object): - def __init__(self, scale, weight_path, half, device): - weight = torch.load(weight_path, map_location="cpu") - self.model = eval("UpCunet%sx" % scale)() - if (half == True): - self.model = self.model.half().to(device) - else: - self.model = self.model.to(device) - self.model.load_state_dict(weight, strict=True) - self.model.eval() - self.half = half - self.device = device - - def np2tensor(self, np_frame): - if (self.half == False): - return torch.from_numpy(np.transpose(np_frame, (2, 0, 1))).unsqueeze(0).to(self.device).float() / 255 - else: - return torch.from_numpy(np.transpose(np_frame, (2, 0, 1))).unsqueeze(0).to(self.device).half() / 255 - - def tensor2np(self, tensor): - if (self.half == False): - return ( - np.transpose((tensor.data.squeeze() * 255.0).round().clamp_(0, 255).byte().cpu().numpy(), (1, 2, 0))) - else: - return (np.transpose((tensor.data.squeeze().float() * 255.0).round().clamp_(0, 255).byte().cpu().numpy(), - (1, 2, 0))) - - def __call__(self, frame, tile_mode): - with torch.no_grad(): - tensor = self.np2tensor(frame) - result = self.tensor2np(self.model(tensor, tile_mode)) - return result - - -if __name__ == "__main__": - ###########inference_img - import time, cv2, sys - from time import time as ttime - - for weight_path, scale in [("weights_v3/up2x-latest-denoise3x.pth", 2), ("weights_v3/up3x-latest-denoise3x.pth", 3), - ("weights_v3/up4x-latest-denoise3x.pth", 4)]: - for tile_mode in [0, 1, 2, 3, 4]: - upscaler2x = RealWaifuUpScaler(scale, weight_path, half=True, device="cuda:0") - input_dir = "%s/input_dir1" % root_path - output_dir = "%s/opt-dir-all-test" % root_path - os.makedirs(output_dir, exist_ok=True) - for name in os.listdir(input_dir): - print(name) - tmp = name.split(".") - inp_path = os.path.join(input_dir, name) - suffix = tmp[-1] - prefix = ".".join(tmp[:-1]) - tmp_path = os.path.join(root_path, "tmp", "%s.%s" % (int(time.time() * 1000000), suffix)) - print(inp_path, tmp_path) - # 支持中文路径 - # os.link(inp_path, tmp_path)#win用硬链接 - os.symlink(inp_path, tmp_path) # linux用软链接 - frame = cv2.imread(tmp_path)[:, :, [2, 1, 0]] - t0 = ttime() - result = upscaler2x(frame, tile_mode=tile_mode)[:, :, ::-1] - t1 = ttime() - print(prefix, "done", t1 - t0) - tmp_opt_path = os.path.join(root_path, "tmp", "%s.%s" % (int(time.time() * 1000000), suffix)) - cv2.imwrite(tmp_opt_path, result) - n = 0 - while (1): - if (n == 0): - suffix = "_%sx_tile%s.png" % (scale, tile_mode) - else: - suffix = "_%sx_tile%s_%s.png" % (scale, tile_mode, n) # - if (os.path.exists(os.path.join(output_dir, prefix + suffix)) == False): - break - else: - n += 1 - final_opt_path = os.path.join(output_dir, prefix + suffix) - os.rename(tmp_opt_path, final_opt_path) - os.remove(tmp_path) diff --git a/spaces/akhaliq/Mask2Former/mask2former/data/dataset_mappers/mask_former_panoptic_dataset_mapper.py b/spaces/akhaliq/Mask2Former/mask2former/data/dataset_mappers/mask_former_panoptic_dataset_mapper.py deleted file mode 100644 index ddbc2bd77fb1b17540dd5272cfc6534ee2b6e2df..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/Mask2Former/mask2former/data/dataset_mappers/mask_former_panoptic_dataset_mapper.py +++ /dev/null @@ -1,165 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import copy -import logging - -import numpy as np -import torch -from torch.nn import functional as F - -from detectron2.config import configurable -from detectron2.data import detection_utils as utils -from detectron2.data import transforms as T -from detectron2.structures import BitMasks, Instances - -from .mask_former_semantic_dataset_mapper import MaskFormerSemanticDatasetMapper - -__all__ = ["MaskFormerPanopticDatasetMapper"] - - -class MaskFormerPanopticDatasetMapper(MaskFormerSemanticDatasetMapper): - """ - A callable which takes a dataset dict in Detectron2 Dataset format, - and map it into a format used by MaskFormer for panoptic segmentation. - - The callable currently does the following: - - 1. Read the image from "file_name" - 2. Applies geometric transforms to the image and annotation - 3. Find and applies suitable cropping to the image and annotation - 4. Prepare image and annotation to Tensors - """ - - @configurable - def __init__( - self, - is_train=True, - *, - augmentations, - image_format, - ignore_label, - size_divisibility, - ): - """ - NOTE: this interface is experimental. - Args: - is_train: for training or inference - augmentations: a list of augmentations or deterministic transforms to apply - image_format: an image format supported by :func:`detection_utils.read_image`. - ignore_label: the label that is ignored to evaluation - size_divisibility: pad image size to be divisible by this value - """ - super().__init__( - is_train, - augmentations=augmentations, - image_format=image_format, - ignore_label=ignore_label, - size_divisibility=size_divisibility, - ) - - def __call__(self, dataset_dict): - """ - Args: - dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format. - - Returns: - dict: a format that builtin models in detectron2 accept - """ - assert self.is_train, "MaskFormerPanopticDatasetMapper should only be used for training!" - - dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below - image = utils.read_image(dataset_dict["file_name"], format=self.img_format) - utils.check_image_size(dataset_dict, image) - - # semantic segmentation - if "sem_seg_file_name" in dataset_dict: - # PyTorch transformation not implemented for uint16, so converting it to double first - sem_seg_gt = utils.read_image(dataset_dict.pop("sem_seg_file_name")).astype("double") - else: - sem_seg_gt = None - - # panoptic segmentation - if "pan_seg_file_name" in dataset_dict: - pan_seg_gt = utils.read_image(dataset_dict.pop("pan_seg_file_name"), "RGB") - segments_info = dataset_dict["segments_info"] - else: - pan_seg_gt = None - segments_info = None - - if pan_seg_gt is None: - raise ValueError( - "Cannot find 'pan_seg_file_name' for panoptic segmentation dataset {}.".format( - dataset_dict["file_name"] - ) - ) - - aug_input = T.AugInput(image, sem_seg=sem_seg_gt) - aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input) - image = aug_input.image - if sem_seg_gt is not None: - sem_seg_gt = aug_input.sem_seg - - # apply the same transformation to panoptic segmentation - pan_seg_gt = transforms.apply_segmentation(pan_seg_gt) - - from panopticapi.utils import rgb2id - - pan_seg_gt = rgb2id(pan_seg_gt) - - # Pad image and segmentation label here! - image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1))) - if sem_seg_gt is not None: - sem_seg_gt = torch.as_tensor(sem_seg_gt.astype("long")) - pan_seg_gt = torch.as_tensor(pan_seg_gt.astype("long")) - - if self.size_divisibility > 0: - image_size = (image.shape[-2], image.shape[-1]) - padding_size = [ - 0, - self.size_divisibility - image_size[1], - 0, - self.size_divisibility - image_size[0], - ] - image = F.pad(image, padding_size, value=128).contiguous() - if sem_seg_gt is not None: - sem_seg_gt = F.pad(sem_seg_gt, padding_size, value=self.ignore_label).contiguous() - pan_seg_gt = F.pad( - pan_seg_gt, padding_size, value=0 - ).contiguous() # 0 is the VOID panoptic label - - image_shape = (image.shape[-2], image.shape[-1]) # h, w - - # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory, - # but not efficient on large generic data structures due to the use of pickle & mp.Queue. - # Therefore it's important to use torch.Tensor. - dataset_dict["image"] = image - if sem_seg_gt is not None: - dataset_dict["sem_seg"] = sem_seg_gt.long() - - if "annotations" in dataset_dict: - raise ValueError("Pemantic segmentation dataset should not have 'annotations'.") - - # Prepare per-category binary masks - pan_seg_gt = pan_seg_gt.numpy() - instances = Instances(image_shape) - classes = [] - masks = [] - for segment_info in segments_info: - class_id = segment_info["category_id"] - if not segment_info["iscrowd"]: - classes.append(class_id) - masks.append(pan_seg_gt == segment_info["id"]) - - classes = np.array(classes) - instances.gt_classes = torch.tensor(classes, dtype=torch.int64) - if len(masks) == 0: - # Some image does not have annotation (all ignored) - instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1])) - else: - masks = BitMasks( - torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks]) - ) - instances.gt_masks = masks.tensor - - dataset_dict["instances"] = instances - - return dataset_dict diff --git a/spaces/alexrame/rewardedsoups/streamlit_app/data/locomotion/trajectories/3.html b/spaces/alexrame/rewardedsoups/streamlit_app/data/locomotion/trajectories/3.html deleted file mode 100644 index d06410e3a1e34432da3d40ef256127f0f30c1662..0000000000000000000000000000000000000000 --- a/spaces/alexrame/rewardedsoups/streamlit_app/data/locomotion/trajectories/3.html +++ /dev/null @@ -1,48 +0,0 @@ - - - - brax visualizer - - - - -
    - - - diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/rich/measure.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/rich/measure.py deleted file mode 100644 index aea238df93820df7caea490b842857405e19dc75..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/rich/measure.py +++ /dev/null @@ -1,149 +0,0 @@ -from operator import itemgetter -from typing import Callable, Iterable, NamedTuple, Optional, TYPE_CHECKING - -from . import errors -from .protocol import is_renderable, rich_cast - -if TYPE_CHECKING: - from .console import Console, ConsoleOptions, RenderableType - - -class Measurement(NamedTuple): - """Stores the minimum and maximum widths (in characters) required to render an object.""" - - minimum: int - """Minimum number of cells required to render.""" - maximum: int - """Maximum number of cells required to render.""" - - @property - def span(self) -> int: - """Get difference between maximum and minimum.""" - return self.maximum - self.minimum - - def normalize(self) -> "Measurement": - """Get measurement that ensures that minimum <= maximum and minimum >= 0 - - Returns: - Measurement: A normalized measurement. - """ - minimum, maximum = self - minimum = min(max(0, minimum), maximum) - return Measurement(max(0, minimum), max(0, max(minimum, maximum))) - - def with_maximum(self, width: int) -> "Measurement": - """Get a RenderableWith where the widths are <= width. - - Args: - width (int): Maximum desired width. - - Returns: - Measurement: New Measurement object. - """ - minimum, maximum = self - return Measurement(min(minimum, width), min(maximum, width)) - - def with_minimum(self, width: int) -> "Measurement": - """Get a RenderableWith where the widths are >= width. - - Args: - width (int): Minimum desired width. - - Returns: - Measurement: New Measurement object. - """ - minimum, maximum = self - width = max(0, width) - return Measurement(max(minimum, width), max(maximum, width)) - - def clamp( - self, min_width: Optional[int] = None, max_width: Optional[int] = None - ) -> "Measurement": - """Clamp a measurement within the specified range. - - Args: - min_width (int): Minimum desired width, or ``None`` for no minimum. Defaults to None. - max_width (int): Maximum desired width, or ``None`` for no maximum. Defaults to None. - - Returns: - Measurement: New Measurement object. - """ - measurement = self - if min_width is not None: - measurement = measurement.with_minimum(min_width) - if max_width is not None: - measurement = measurement.with_maximum(max_width) - return measurement - - @classmethod - def get( - cls, console: "Console", options: "ConsoleOptions", renderable: "RenderableType" - ) -> "Measurement": - """Get a measurement for a renderable. - - Args: - console (~rich.console.Console): Console instance. - options (~rich.console.ConsoleOptions): Console options. - renderable (RenderableType): An object that may be rendered with Rich. - - Raises: - errors.NotRenderableError: If the object is not renderable. - - Returns: - Measurement: Measurement object containing range of character widths required to render the object. - """ - _max_width = options.max_width - if _max_width < 1: - return Measurement(0, 0) - if isinstance(renderable, str): - renderable = console.render_str(renderable, markup=options.markup) - renderable = rich_cast(renderable) - if is_renderable(renderable): - get_console_width: Optional[ - Callable[["Console", "ConsoleOptions"], "Measurement"] - ] = getattr(renderable, "__rich_measure__", None) - if get_console_width is not None: - render_width = ( - get_console_width(console, options) - .normalize() - .with_maximum(_max_width) - ) - if render_width.maximum < 1: - return Measurement(0, 0) - return render_width.normalize() - else: - return Measurement(0, _max_width) - else: - raise errors.NotRenderableError( - f"Unable to get render width for {renderable!r}; " - "a str, Segment, or object with __rich_console__ method is required" - ) - - -def measure_renderables( - console: "Console", - options: "ConsoleOptions", - renderables: Iterable["RenderableType"], -) -> "Measurement": - """Get a measurement that would fit a number of renderables. - - Args: - console (~rich.console.Console): Console instance. - options (~rich.console.ConsoleOptions): Console options. - renderables (Iterable[RenderableType]): One or more renderable objects. - - Returns: - Measurement: Measurement object containing range of character widths required to - contain all given renderables. - """ - if not renderables: - return Measurement(0, 0) - get_measurement = Measurement.get - measurements = [ - get_measurement(console, options, renderable) for renderable in renderables - ] - measured_width = Measurement( - max(measurements, key=itemgetter(0)).minimum, - max(measurements, key=itemgetter(1)).maximum, - ) - return measured_width diff --git a/spaces/ali-ghamdan/deoldify/fastai/tabular/transform.py b/spaces/ali-ghamdan/deoldify/fastai/tabular/transform.py deleted file mode 100644 index d7bc255eaf5fd92467b9db28e67590c4981e4356..0000000000000000000000000000000000000000 --- a/spaces/ali-ghamdan/deoldify/fastai/tabular/transform.py +++ /dev/null @@ -1,195 +0,0 @@ -"Cleaning and feature engineering functions for structured data" -from ..torch_core import * -from pandas.api.types import is_numeric_dtype -from datetime import date, datetime -import calendar - -__all__ = ['add_datepart', 'cont_cat_split', 'Categorify', 'FillMissing', 'FillStrategy', 'Normalize', 'TabularProc', - 'add_elapsed_times', 'make_date', 'add_cyclic_datepart'] - -def make_date(df:DataFrame, date_field:str): - "Make sure `df[field_name]` is of the right date type." - field_dtype = df[date_field].dtype - if isinstance(field_dtype, pd.core.dtypes.dtypes.DatetimeTZDtype): - field_dtype = np.datetime64 - if not np.issubdtype(field_dtype, np.datetime64): - df[date_field] = pd.to_datetime(df[date_field], infer_datetime_format=True) - -def cyclic_dt_feat_names(time:bool=True, add_linear:bool=False)->List[str]: - "Return feature names of date/time cycles as produced by `cyclic_dt_features`." - fs = ['cos','sin'] - attr = [f'{r}_{f}' for r in 'weekday day_month month_year day_year'.split() for f in fs] - if time: attr += [f'{r}_{f}' for r in 'hour clock min sec'.split() for f in fs] - if add_linear: attr.append('year_lin') - return attr - -def cyclic_dt_features(d:Union[date,datetime], time:bool=True, add_linear:bool=False)->List[float]: - "Calculate the cos and sin of date/time cycles." - tt,fs = d.timetuple(), [np.cos, np.sin] - day_year,days_month = tt.tm_yday, calendar.monthrange(d.year, d.month)[1] - days_year = 366 if calendar.isleap(d.year) else 365 - rs = d.weekday()/7, (d.day-1)/days_month, (d.month-1)/12, (day_year-1)/days_year - feats = [f(r * 2 * np.pi) for r in rs for f in fs] - if time and isinstance(d, datetime) and type(d) != date: - rs = tt.tm_hour/24, tt.tm_hour%12/12, tt.tm_min/60, tt.tm_sec/60 - feats += [f(r * 2 * np.pi) for r in rs for f in fs] - if add_linear: - if type(d) == date: feats.append(d.year + rs[-1]) - else: - secs_in_year = (datetime(d.year+1, 1, 1) - datetime(d.year, 1, 1)).total_seconds() - feats.append(d.year + ((d - datetime(d.year, 1, 1)).total_seconds() / secs_in_year)) - return feats - -def add_cyclic_datepart(df:DataFrame, field_name:str, prefix:str=None, drop:bool=True, time:bool=False, add_linear:bool=False): - "Helper function that adds trigonometric date/time features to a date in the column `field_name` of `df`." - make_date(df, field_name) - field = df[field_name] - prefix = ifnone(prefix, re.sub('[Dd]ate$', '', field_name)) - series = field.apply(partial(cyclic_dt_features, time=time, add_linear=add_linear)) - columns = [prefix + c for c in cyclic_dt_feat_names(time, add_linear)] - df_feats = pd.DataFrame([item for item in series], columns=columns, index=series.index) - for column in columns: df[column] = df_feats[column] - if drop: df.drop(field_name, axis=1, inplace=True) - return df - -def add_datepart(df:DataFrame, field_name:str, prefix:str=None, drop:bool=True, time:bool=False): - "Helper function that adds columns relevant to a date in the column `field_name` of `df`." - make_date(df, field_name) - field = df[field_name] - prefix = ifnone(prefix, re.sub('[Dd]ate$', '', field_name)) - attr = ['Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Dayofyear', 'Is_month_end', 'Is_month_start', - 'Is_quarter_end', 'Is_quarter_start', 'Is_year_end', 'Is_year_start'] - if time: attr = attr + ['Hour', 'Minute', 'Second'] - for n in attr: df[prefix + n] = getattr(field.dt, n.lower()) - df[prefix + 'Elapsed'] = field.astype(np.int64) // 10 ** 9 - if drop: df.drop(field_name, axis=1, inplace=True) - return df - -def _get_elapsed(df:DataFrame,field_names:Collection[str], date_field:str, base_field:str, prefix:str): - for f in field_names: - day1 = np.timedelta64(1, 'D') - last_date,last_base,res = np.datetime64(),None,[] - for b,v,d in zip(df[base_field].values, df[f].values, df[date_field].values): - if last_base is None or b != last_base: - last_date,last_base = np.datetime64(),b - if v: last_date = d - res.append(((d-last_date).astype('timedelta64[D]') / day1)) - df[prefix + f] = res - return df - -def add_elapsed_times(df:DataFrame, field_names:Collection[str], date_field:str, base_field:str): - field_names = listify(field_names) - #Make sure date_field is a date and base_field a bool - df[field_names] = df[field_names].astype('bool') - make_date(df, date_field) - - work_df = df[field_names + [date_field, base_field]] - work_df = work_df.sort_values([base_field, date_field]) - work_df = _get_elapsed(work_df, field_names, date_field, base_field, 'After') - work_df = work_df.sort_values([base_field, date_field], ascending=[True, False]) - work_df = _get_elapsed(work_df, field_names, date_field, base_field, 'Before') - - for a in ['After' + f for f in field_names] + ['Before' + f for f in field_names]: - work_df[a] = work_df[a].fillna(0).astype(int) - - for a,s in zip([True, False], ['_bw', '_fw']): - work_df = work_df.set_index(date_field) - tmp = (work_df[[base_field] + field_names].sort_index(ascending=a) - .groupby(base_field).rolling(7, min_periods=1).sum()) - tmp.drop(base_field,1,inplace=True) - tmp.reset_index(inplace=True) - work_df.reset_index(inplace=True) - work_df = work_df.merge(tmp, 'left', [date_field, base_field], suffixes=['', s]) - work_df.drop(field_names,1,inplace=True) - return df.merge(work_df, 'left', [date_field, base_field]) - -def cont_cat_split(df, max_card=20, dep_var=None)->Tuple[List,List]: - "Helper function that returns column names of cont and cat variables from given df." - cont_names, cat_names = [], [] - for label in df: - if label == dep_var: continue - if df[label].dtype == int and df[label].unique().shape[0] > max_card or df[label].dtype == float: cont_names.append(label) - else: cat_names.append(label) - return cont_names, cat_names - -@dataclass -class TabularProc(): - "A processor for tabular dataframes." - cat_names:StrList - cont_names:StrList - - def __call__(self, df:DataFrame, test:bool=False): - "Apply the correct function to `df` depending on `test`." - func = self.apply_test if test else self.apply_train - func(df) - - def apply_train(self, df:DataFrame): - "Function applied to `df` if it's the train set." - raise NotImplementedError - def apply_test(self, df:DataFrame): - "Function applied to `df` if it's the test set." - self.apply_train(df) - -class Categorify(TabularProc): - "Transform the categorical variables to that type." - def apply_train(self, df:DataFrame): - "Transform `self.cat_names` columns in categorical." - self.categories = {} - for n in self.cat_names: - df.loc[:,n] = df.loc[:,n].astype('category').cat.as_ordered() - self.categories[n] = df[n].cat.categories - - def apply_test(self, df:DataFrame): - "Transform `self.cat_names` columns in categorical using the codes decided in `apply_train`." - for n in self.cat_names: - df.loc[:,n] = pd.Categorical(df[n], categories=self.categories[n], ordered=True) - -FillStrategy = IntEnum('FillStrategy', 'MEDIAN COMMON CONSTANT') - -@dataclass -class FillMissing(TabularProc): - "Fill the missing values in continuous columns." - fill_strategy:FillStrategy=FillStrategy.MEDIAN - add_col:bool=True - fill_val:float=0. - def apply_train(self, df:DataFrame): - "Fill missing values in `self.cont_names` according to `self.fill_strategy`." - self.na_dict = {} - for name in self.cont_names: - if pd.isnull(df[name]).sum(): - if self.add_col: - df[name+'_na'] = pd.isnull(df[name]) - if name+'_na' not in self.cat_names: self.cat_names.append(name+'_na') - if self.fill_strategy == FillStrategy.MEDIAN: filler = df[name].median() - elif self.fill_strategy == FillStrategy.CONSTANT: filler = self.fill_val - else: filler = df[name].dropna().value_counts().idxmax() - df[name] = df[name].fillna(filler) - self.na_dict[name] = filler - - def apply_test(self, df:DataFrame): - "Fill missing values in `self.cont_names` like in `apply_train`." - for name in self.cont_names: - if name in self.na_dict: - if self.add_col: - df[name+'_na'] = pd.isnull(df[name]) - if name+'_na' not in self.cat_names: self.cat_names.append(name+'_na') - df[name] = df[name].fillna(self.na_dict[name]) - elif pd.isnull(df[name]).sum() != 0: - raise Exception(f"""There are nan values in field {name} but there were none in the training set. - Please fix those manually.""") - -class Normalize(TabularProc): - "Normalize the continuous variables." - def apply_train(self, df:DataFrame): - "Compute the means and stds of `self.cont_names` columns to normalize them." - self.means,self.stds = {},{} - for n in self.cont_names: - assert is_numeric_dtype(df[n]), (f"""Cannot normalize '{n}' column as it isn't numerical. - Are you sure it doesn't belong in the categorical set of columns?""") - self.means[n],self.stds[n] = df[n].mean(),df[n].std() - df[n] = (df[n]-self.means[n]) / (1e-7 + self.stds[n]) - - def apply_test(self, df:DataFrame): - "Normalize `self.cont_names` with the same statistics as in `apply_train`." - for n in self.cont_names: - df[n] = (df[n]-self.means[n]) / (1e-7 + self.stds[n]) diff --git a/spaces/ali-ghamdan/deoldify/fastai/text/learner.py b/spaces/ali-ghamdan/deoldify/fastai/text/learner.py deleted file mode 100644 index 9592029818def7f65208a49fdda2379c11680e06..0000000000000000000000000000000000000000 --- a/spaces/ali-ghamdan/deoldify/fastai/text/learner.py +++ /dev/null @@ -1,303 +0,0 @@ -'Model training for NLP' -from ..torch_core import * -from ..basic_train import * -from ..callbacks import * -from ..data_block import CategoryList -from ..basic_data import * -from ..datasets import * -from ..metrics import accuracy -from ..train import GradientClipping -from ..layers import * -from .models import * -from .transform import * -from .data import * - -__all__ = ['RNNLearner', 'LanguageLearner', 'convert_weights', 'decode_spec_tokens', 'get_language_model', 'language_model_learner', - 'MultiBatchEncoder', 'get_text_classifier', 'text_classifier_learner', 'PoolingLinearClassifier'] - -_model_meta = {AWD_LSTM: {'hid_name':'emb_sz', 'url':URLs.WT103_FWD, 'url_bwd':URLs.WT103_BWD, - 'config_lm':awd_lstm_lm_config, 'split_lm': awd_lstm_lm_split, - 'config_clas':awd_lstm_clas_config, 'split_clas': awd_lstm_clas_split}, - Transformer: {'hid_name':'d_model', 'url':URLs.OPENAI_TRANSFORMER, - 'config_lm':tfmer_lm_config, 'split_lm': tfmer_lm_split, - 'config_clas':tfmer_clas_config, 'split_clas': tfmer_clas_split}, - TransformerXL: {'hid_name':'d_model', - 'config_lm':tfmerXL_lm_config, 'split_lm': tfmerXL_lm_split, - 'config_clas':tfmerXL_clas_config, 'split_clas': tfmerXL_clas_split}} - -def convert_weights(wgts:Weights, stoi_wgts:Dict[str,int], itos_new:Collection[str]) -> Weights: - "Convert the model `wgts` to go with a new vocabulary." - dec_bias, enc_wgts = wgts.get('1.decoder.bias', None), wgts['0.encoder.weight'] - wgts_m = enc_wgts.mean(0) - if dec_bias is not None: bias_m = dec_bias.mean(0) - new_w = enc_wgts.new_zeros((len(itos_new),enc_wgts.size(1))).zero_() - if dec_bias is not None: new_b = dec_bias.new_zeros((len(itos_new),)).zero_() - for i,w in enumerate(itos_new): - r = stoi_wgts[w] if w in stoi_wgts else -1 - new_w[i] = enc_wgts[r] if r>=0 else wgts_m - if dec_bias is not None: new_b[i] = dec_bias[r] if r>=0 else bias_m - wgts['0.encoder.weight'] = new_w - if '0.encoder_dp.emb.weight' in wgts: wgts['0.encoder_dp.emb.weight'] = new_w.clone() - wgts['1.decoder.weight'] = new_w.clone() - if dec_bias is not None: wgts['1.decoder.bias'] = new_b - return wgts - -class RNNLearner(Learner): - "Basic class for a `Learner` in NLP." - def __init__(self, data:DataBunch, model:nn.Module, split_func:OptSplitFunc=None, clip:float=None, - alpha:float=2., beta:float=1., metrics=None, **learn_kwargs): - is_class = (hasattr(data.train_ds, 'y') and (isinstance(data.train_ds.y, CategoryList) or - isinstance(data.train_ds.y, LMLabelList))) - metrics = ifnone(metrics, ([accuracy] if is_class else [])) - super().__init__(data, model, metrics=metrics, **learn_kwargs) - self.callbacks.append(RNNTrainer(self, alpha=alpha, beta=beta)) - if clip: self.callback_fns.append(partial(GradientClipping, clip=clip)) - if split_func: self.split(split_func) - - def save_encoder(self, name:str): - "Save the encoder to `name` inside the model directory." - if is_pathlike(name): self._test_writeable_path() - encoder = get_model(self.model)[0] - if hasattr(encoder, 'module'): encoder = encoder.module - torch.save(encoder.state_dict(), self.path/self.model_dir/f'{name}.pth') - - def load_encoder(self, name:str, device:torch.device=None): - "Load the encoder `name` from the model directory." - encoder = get_model(self.model)[0] - if device is None: device = self.data.device - if hasattr(encoder, 'module'): encoder = encoder.module - encoder.load_state_dict(torch.load(self.path/self.model_dir/f'{name}.pth', map_location=device)) - self.freeze() - - def load_pretrained(self, wgts_fname:str, itos_fname:str, strict:bool=True): - "Load a pretrained model and adapts it to the data vocabulary." - old_itos = pickle.load(open(itos_fname, 'rb')) - old_stoi = {v:k for k,v in enumerate(old_itos)} - wgts = torch.load(wgts_fname, map_location=lambda storage, loc: storage) - if 'model' in wgts: wgts = wgts['model'] - wgts = convert_weights(wgts, old_stoi, self.data.train_ds.vocab.itos) - self.model.load_state_dict(wgts, strict=strict) - - def get_preds(self, ds_type:DatasetType=DatasetType.Valid, activ:nn.Module=None, with_loss:bool=False, n_batch:Optional[int]=None, - pbar:Optional[PBar]=None, ordered:bool=False) -> List[Tensor]: - "Return predictions and targets on the valid, train, or test set, depending on `ds_type`." - self.model.reset() - if ordered: np.random.seed(42) - preds = super().get_preds(ds_type=ds_type, activ=activ, with_loss=with_loss, n_batch=n_batch, pbar=pbar) - if ordered and hasattr(self.dl(ds_type), 'sampler'): - np.random.seed(42) - sampler = [i for i in self.dl(ds_type).sampler] - reverse_sampler = np.argsort(sampler) - preds = [p[reverse_sampler] for p in preds] - return(preds) - -def decode_spec_tokens(tokens): - new_toks,rule,arg = [],None,None - for t in tokens: - if t in [TK_MAJ, TK_UP, TK_REP, TK_WREP]: rule = t - elif rule is None: new_toks.append(t) - elif rule == TK_MAJ: - new_toks.append(t[:1].upper() + t[1:].lower()) - rule = None - elif rule == TK_UP: - new_toks.append(t.upper()) - rule = None - elif arg is None: - try: arg = int(t) - except: rule = None - else: - if rule == TK_REP: new_toks.append(t * arg) - else: new_toks += [t] * arg - return new_toks - -class LanguageLearner(RNNLearner): - "Subclass of RNNLearner for predictions." - - def predict(self, text:str, n_words:int=1, no_unk:bool=True, temperature:float=1., min_p:float=None, sep:str=' ', - decoder=decode_spec_tokens): - "Return the `n_words` that come after `text`." - ds = self.data.single_dl.dataset - self.model.reset() - xb,yb = self.data.one_item(text) - new_idx = [] - for _ in range(n_words): #progress_bar(range(n_words), leave=False): - res = self.pred_batch(batch=(xb,yb))[0][-1] - #if len(new_idx) == 0: self.model[0].select_hidden([0]) - if no_unk: res[self.data.vocab.stoi[UNK]] = 0. - if min_p is not None: - if (res >= min_p).float().sum() == 0: - warn(f"There is no item with probability >= {min_p}, try a lower value.") - else: res[res < min_p] = 0. - if temperature != 1.: res.pow_(1 / temperature) - idx = torch.multinomial(res, 1).item() - new_idx.append(idx) - xb = xb.new_tensor([idx])[None] - return text + sep + sep.join(decoder(self.data.vocab.textify(new_idx, sep=None))) - - def beam_search(self, text:str, n_words:int, no_unk:bool=True, top_k:int=10, beam_sz:int=1000, temperature:float=1., - sep:str=' ', decoder=decode_spec_tokens): - "Return the `n_words` that come after `text` using beam search." - ds = self.data.single_dl.dataset - self.model.reset() - self.model.eval() - xb, yb = self.data.one_item(text) - nodes = None - nodes = xb.clone() - scores = xb.new_zeros(1).float() - with torch.no_grad(): - for k in progress_bar(range(n_words), leave=False): - out = F.log_softmax(self.model(xb)[0][:,-1], dim=-1) - if no_unk: out[:,self.data.vocab.stoi[UNK]] = -float('Inf') - values, indices = out.topk(top_k, dim=-1) - scores = (-values + scores[:,None]).view(-1) - indices_idx = torch.arange(0,nodes.size(0))[:,None].expand(nodes.size(0), top_k).contiguous().view(-1) - sort_idx = scores.argsort()[:beam_sz] - scores = scores[sort_idx] - nodes = torch.cat([nodes[:,None].expand(nodes.size(0),top_k,nodes.size(1)), - indices[:,:,None].expand(nodes.size(0),top_k,1),], dim=2) - nodes = nodes.view(-1, nodes.size(2))[sort_idx] - self.model[0].select_hidden(indices_idx[sort_idx]) - xb = nodes[:,-1][:,None] - if temperature != 1.: scores.div_(temperature) - node_idx = torch.multinomial(torch.exp(-scores), 1).item() - return text + sep + sep.join(decoder(self.data.vocab.textify([i.item() for i in nodes[node_idx][1:] ], sep=None))) - - def show_results(self, ds_type=DatasetType.Valid, rows:int=5, max_len:int=20): - from IPython.display import display, HTML - "Show `rows` result of predictions on `ds_type` dataset." - ds = self.dl(ds_type).dataset - x,y = self.data.one_batch(ds_type, detach=False, denorm=False) - preds = self.pred_batch(batch=(x,y)) - y = y.view(*x.size()) - z = preds.view(*x.size(),-1).argmax(dim=2) - xs = [ds.x.reconstruct(grab_idx(x, i)) for i in range(rows)] - ys = [ds.x.reconstruct(grab_idx(y, i)) for i in range(rows)] - zs = [ds.x.reconstruct(grab_idx(z, i)) for i in range(rows)] - items,names = [],['text', 'target', 'pred'] - for i, (x,y,z) in enumerate(zip(xs,ys,zs)): - txt_x = ' '.join(x.text.split(' ')[:max_len]) - txt_y = ' '.join(y.text.split(' ')[max_len-1:2*max_len-1]) - txt_z = ' '.join(z.text.split(' ')[max_len-1:2*max_len-1]) - items.append([txt_x, txt_y, txt_z]) - items = np.array(items) - df = pd.DataFrame({n:items[:,i] for i,n in enumerate(names)}, columns=names) - with pd.option_context('display.max_colwidth', -1): - display(HTML(df.to_html(index=False))) - -def get_language_model(arch:Callable, vocab_sz:int, config:dict=None, drop_mult:float=1.): - "Create a language model from `arch` and its `config`, maybe `pretrained`." - meta = _model_meta[arch] - config = ifnone(config, meta['config_lm']).copy() - for k in config.keys(): - if k.endswith('_p'): config[k] *= drop_mult - tie_weights,output_p,out_bias = map(config.pop, ['tie_weights', 'output_p', 'out_bias']) - init = config.pop('init') if 'init' in config else None - encoder = arch(vocab_sz, **config) - enc = encoder.encoder if tie_weights else None - decoder = LinearDecoder(vocab_sz, config[meta['hid_name']], output_p, tie_encoder=enc, bias=out_bias) - model = SequentialRNN(encoder, decoder) - return model if init is None else model.apply(init) - -def language_model_learner(data:DataBunch, arch, config:dict=None, drop_mult:float=1., pretrained:bool=True, - pretrained_fnames:OptStrTuple=None, **learn_kwargs) -> 'LanguageLearner': - "Create a `Learner` with a language model from `data` and `arch`." - model = get_language_model(arch, len(data.vocab.itos), config=config, drop_mult=drop_mult) - meta = _model_meta[arch] - learn = LanguageLearner(data, model, split_func=meta['split_lm'], **learn_kwargs) - url = 'url_bwd' if data.backwards else 'url' - if pretrained or pretrained_fnames: - if pretrained_fnames is not None: - fnames = [learn.path/learn.model_dir/f'{fn}.{ext}' for fn,ext in zip(pretrained_fnames, ['pth', 'pkl'])] - else: - if url not in meta: - warn("There are no pretrained weights for that architecture yet!") - return learn - model_path = untar_data(meta[url] , data=False) - fnames = [list(model_path.glob(f'*.{ext}'))[0] for ext in ['pth', 'pkl']] - learn.load_pretrained(*fnames) - learn.freeze() - return learn - -def masked_concat_pool(outputs, mask): - "Pool MultiBatchEncoder outputs into one vector [last_hidden, max_pool, avg_pool]." - output = outputs[-1] - avg_pool = output.masked_fill(mask[:, :, None], 0).mean(dim=1) - avg_pool *= output.size(1) / (output.size(1)-mask.type(avg_pool.dtype).sum(dim=1))[:,None] - max_pool = output.masked_fill(mask[:,:,None], -float('inf')).max(dim=1)[0] - x = torch.cat([output[:,-1], max_pool, avg_pool], 1) - return x - -class PoolingLinearClassifier(Module): - "Create a linear classifier with pooling." - def __init__(self, layers:Collection[int], drops:Collection[float]): - mod_layers = [] - if len(drops) != len(layers)-1: raise ValueError("Number of layers and dropout values do not match.") - activs = [nn.ReLU(inplace=True)] * (len(layers) - 2) + [None] - for n_in, n_out, p, actn in zip(layers[:-1], layers[1:], drops, activs): - mod_layers += bn_drop_lin(n_in, n_out, p=p, actn=actn) - self.layers = nn.Sequential(*mod_layers) - - def forward(self, input:Tuple[Tensor,Tensor, Tensor])->Tuple[Tensor,Tensor,Tensor]: - raw_outputs,outputs,mask = input - x = masked_concat_pool(outputs, mask) - x = self.layers(x) - return x, raw_outputs, outputs - -class MultiBatchEncoder(Module): - "Create an encoder over `module` that can process a full sentence." - def __init__(self, bptt:int, max_len:int, module:nn.Module, pad_idx:int=1): - self.max_len,self.bptt,self.module,self.pad_idx = max_len,bptt,module,pad_idx - - def concat(self, arrs:Collection[Tensor])->Tensor: - "Concatenate the `arrs` along the batch dimension." - return [torch.cat([l[si] for l in arrs], dim=1) for si in range_of(arrs[0])] - - def reset(self): - if hasattr(self.module, 'reset'): self.module.reset() - - def forward(self, input:LongTensor)->Tuple[Tensor,Tensor]: - bs,sl = input.size() - self.reset() - raw_outputs,outputs,masks = [],[],[] - for i in range(0, sl, self.bptt): - r, o = self.module(input[:,i: min(i+self.bptt, sl)]) - if i>(sl-self.max_len): - masks.append(input[:,i: min(i+self.bptt, sl)] == self.pad_idx) - raw_outputs.append(r) - outputs.append(o) - return self.concat(raw_outputs),self.concat(outputs),torch.cat(masks,dim=1) - -def get_text_classifier(arch:Callable, vocab_sz:int, n_class:int, bptt:int=70, max_len:int=20*70, config:dict=None, - drop_mult:float=1., lin_ftrs:Collection[int]=None, ps:Collection[float]=None, - pad_idx:int=1) -> nn.Module: - "Create a text classifier from `arch` and its `config`, maybe `pretrained`." - meta = _model_meta[arch] - config = ifnone(config, meta['config_clas']).copy() - for k in config.keys(): - if k.endswith('_p'): config[k] *= drop_mult - if lin_ftrs is None: lin_ftrs = [50] - if ps is None: ps = [0.1]*len(lin_ftrs) - layers = [config[meta['hid_name']] * 3] + lin_ftrs + [n_class] - ps = [config.pop('output_p')] + ps - init = config.pop('init') if 'init' in config else None - encoder = MultiBatchEncoder(bptt, max_len, arch(vocab_sz, **config), pad_idx=pad_idx) - model = SequentialRNN(encoder, PoolingLinearClassifier(layers, ps)) - return model if init is None else model.apply(init) - -def text_classifier_learner(data:DataBunch, arch:Callable, bptt:int=70, max_len:int=70*20, config:dict=None, - pretrained:bool=True, drop_mult:float=1., lin_ftrs:Collection[int]=None, - ps:Collection[float]=None, **learn_kwargs) -> 'TextClassifierLearner': - "Create a `Learner` with a text classifier from `data` and `arch`." - model = get_text_classifier(arch, len(data.vocab.itos), data.c, bptt=bptt, max_len=max_len, - config=config, drop_mult=drop_mult, lin_ftrs=lin_ftrs, ps=ps) - meta = _model_meta[arch] - learn = RNNLearner(data, model, split_func=meta['split_clas'], **learn_kwargs) - if pretrained: - if 'url' not in meta: - warn("There are no pretrained weights for that architecture yet!") - return learn - model_path = untar_data(meta['url'], data=False) - fnames = [list(model_path.glob(f'*.{ext}'))[0] for ext in ['pth', 'pkl']] - learn.load_pretrained(*fnames, strict=False) - learn.freeze() - return learn diff --git a/spaces/anon9i9/finetuned_diffusion_test/utils.py b/spaces/anon9i9/finetuned_diffusion_test/utils.py deleted file mode 100644 index ff1c065d186347ca51b47d010a697dbe1814695c..0000000000000000000000000000000000000000 --- a/spaces/anon9i9/finetuned_diffusion_test/utils.py +++ /dev/null @@ -1,6 +0,0 @@ -def is_google_colab(): - try: - import google.colab - return True - except: - return False \ No newline at end of file diff --git a/spaces/anupam210/Flight_ATA_Class/extract_text.py b/spaces/anupam210/Flight_ATA_Class/extract_text.py deleted file mode 100644 index 8ba5f6298cc33acfe47751d588046f07c2f1a182..0000000000000000000000000000000000000000 --- a/spaces/anupam210/Flight_ATA_Class/extract_text.py +++ /dev/null @@ -1,47 +0,0 @@ -from preprocessing_images import preprocessing_function -from datetime import datetime -from azure.storage.blob import BlobClient -from msrest.authentication import CognitiveServicesCredentials -#importing azure packages -from azure.cognitiveservices.vision.computervision import ComputerVisionClient -from azure.cognitiveservices.vision.computervision.models import OperationStatusCodes - -#ocr extraction using azure computer vision API -def azure_ocr(pdf_url,computervision_client): - try: - read_response = computervision_client.read(pdf_url,raw=True) - read_operation_location = read_response.headers["Operation-Location"] - operation_id = read_operation_location.split("/")[-1] - while True: - read_result = computervision_client.get_read_result(operation_id) - if read_result.status not in ['notStarted', 'running']: - break - words = [] - if read_result.status == OperationStatusCodes.succeeded: - for text_result in read_result.analyze_result.read_results: - for line in text_result.lines: - words.append(line.text) - all_text = ' '.join(words) - return all_text - except Exception as e: - raise Exception(e) -def extract_text_from_url(test_pdf_url): - try: - preprocessing_function(test_pdf_url) - my_blob = 'test_clean_pdf' + datetime.now().strftime('%Y_%m_%d_%H_%M_%S') - blob = BlobClient.from_connection_string(conn_str=connection_string, container_name= my_container, blob_name=my_blob) - with open("answer_paper.pdf", "rb") as data: - blob.upload_blob(data) - computervision_client = ComputerVisionClient(endpoint, CognitiveServicesCredentials(subscription_key)) - text = azure_ocr(blob.url, computervision_client) - text = text.lower() - n = text.find("150 word") - if n > 0: - text = text[n+10:] - - elif text.find("150 ward") > 0: - nn = text.find("150 ward") - text = text[nn+10:] - return text - except Exception as e: - raise Exception(e) \ No newline at end of file diff --git a/spaces/aphenx/bingo/src/components/chat-list.tsx b/spaces/aphenx/bingo/src/components/chat-list.tsx deleted file mode 100644 index 624a78ef0d7be0f1192cf02a81e2e9cf214cb193..0000000000000000000000000000000000000000 --- a/spaces/aphenx/bingo/src/components/chat-list.tsx +++ /dev/null @@ -1,28 +0,0 @@ -import React from 'react' - -import { Separator } from '@/components/ui/separator' -import { ChatMessage } from '@/components/chat-message' -import { ChatMessageModel } from '@/lib/bots/bing/types' - -export interface ChatList { - messages: ChatMessageModel[] -} - -export function ChatList({ messages }: ChatList) { - if (!messages.length) { - return null - } - - return ( -
    - {messages.map((message, index) => ( - - - {index < messages.length - 1 && ( - - )} - - ))} -
    - ) -} diff --git a/spaces/arch-123/bingo/tests/kblob.ts b/spaces/arch-123/bingo/tests/kblob.ts deleted file mode 100644 index 9e15b41c1c94a690beb61b23cdb42fc78767ccd2..0000000000000000000000000000000000000000 --- a/spaces/arch-123/bingo/tests/kblob.ts +++ /dev/null @@ -1,27 +0,0 @@ -import FormData from 'form-data' - -import { fetch } from '@/lib/isomorphic' - -const formData = new FormData() - -const knowledgeRequest = {"imageInfo":{"url":"https://www.baidu.com/img/PCfb_5bf082d29588c07f842ccde3f97243ea.png"},"knowledgeRequest":{"invokedSkills":["ImageById"],"subscriptionId":"Bing.Chat.Multimodal","invokedSkillsRequestData":{"enableFaceBlur":true},"convoData":{"convoid":"51D|BingProdUnAuthenticatedUsers|E3DCA904FF236C67C3450163BCEC64CFF3F618CC8A4AFD75FD518F5ED0ADA080","convotone":"Creative"}}} - -formData.append('knowledgeRequest', JSON.stringify(knowledgeRequest)) - - -fetch('https://bing.vcanbb.top/images/kblob', - { - method: 'POST', - body: formData.getBuffer(), - headers: { - "sec-ch-ua": "\"Not/A)Brand\";v=\"99\", \"Google Chrome\";v=\"115\", \"Chromium\";v=\"115\"", - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": "\"Windows\"", - "Referer": "https://bing.vcanbb.top/web/index.html", - "Referrer-Policy": "origin-when-cross-origin", - ...formData.getHeaders() - } - - } -).then(res => res.text()) -.then(res => console.log('res', res)) diff --git a/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/layers/generic/gated_conv.py b/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/layers/generic/gated_conv.py deleted file mode 100644 index 9a29c4499f970db538a4b99c3c05cba22576195f..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/layers/generic/gated_conv.py +++ /dev/null @@ -1,37 +0,0 @@ -from torch import nn - -from .normalization import LayerNorm - - -class GatedConvBlock(nn.Module): - """Gated convolutional block as in https://arxiv.org/pdf/1612.08083.pdf - Args: - in_out_channels (int): number of input/output channels. - kernel_size (int): convolution kernel size. - dropout_p (float): dropout rate. - """ - - def __init__(self, in_out_channels, kernel_size, dropout_p, num_layers): - super().__init__() - # class arguments - self.dropout_p = dropout_p - self.num_layers = num_layers - # define layers - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.layers = nn.ModuleList() - for _ in range(num_layers): - self.conv_layers += [nn.Conv1d(in_out_channels, 2 * in_out_channels, kernel_size, padding=kernel_size // 2)] - self.norm_layers += [LayerNorm(2 * in_out_channels)] - - def forward(self, x, x_mask): - o = x - res = x - for idx in range(self.num_layers): - o = nn.functional.dropout(o, p=self.dropout_p, training=self.training) - o = self.conv_layers[idx](o * x_mask) - o = self.norm_layers[idx](o) - o = nn.functional.glu(o, dim=1) - o = res + o - res = o - return o diff --git a/spaces/artificialguybr/video-dubbing/TTS/docs/source/docker_images.md b/spaces/artificialguybr/video-dubbing/TTS/docs/source/docker_images.md deleted file mode 100644 index d08a55837d33f44785a03207408f8dabca8fa07f..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/video-dubbing/TTS/docs/source/docker_images.md +++ /dev/null @@ -1,56 +0,0 @@ -(docker_images)= -## Docker images -We provide docker images to be able to test TTS without having to setup your own environment. - -### Using premade images -You can use premade images built automatically from the latest TTS version. - -#### CPU version -```bash -docker pull ghcr.io/coqui-ai/tts-cpu -``` -#### GPU version -```bash -docker pull ghcr.io/coqui-ai/tts -``` - -### Building your own image -```bash -docker build -t tts . -``` - -## Basic inference -Basic usage: generating an audio file from a text passed as argument. -You can pass any tts argument after the image name. - -### CPU version -```bash -docker run --rm -v ~/tts-output:/root/tts-output ghcr.io/coqui-ai/tts-cpu --text "Hello." --out_path /root/tts-output/hello.wav -``` -### GPU version -For the GPU version, you need to have the latest NVIDIA drivers installed. -With `nvidia-smi` you can check the CUDA version supported, it must be >= 11.8 - -```bash -docker run --rm --gpus all -v ~/tts-output:/root/tts-output ghcr.io/coqui-ai/tts --text "Hello." --out_path /root/tts-output/hello.wav --use_cuda true -``` - -## Start a server -Starting a TTS server: -Start the container and get a shell inside it. - -### CPU version -```bash -docker run --rm -it -p 5002:5002 --entrypoint /bin/bash ghcr.io/coqui-ai/tts-cpu -python3 TTS/server/server.py --list_models #To get the list of available models -python3 TTS/server/server.py --model_name tts_models/en/vctk/vits -``` - -### GPU version -```bash -docker run --rm -it -p 5002:5002 --gpus all --entrypoint /bin/bash ghcr.io/coqui-ai/tts -python3 TTS/server/server.py --list_models #To get the list of available models -python3 TTS/server/server.py --model_name tts_models/en/vctk/vits --use_cuda true -``` - -Click [there](http://[::1]:5002/) and have fun with the server! \ No newline at end of file diff --git a/spaces/arxify/RVC-beta-v2-0618/docs/README.ko.md b/spaces/arxify/RVC-beta-v2-0618/docs/README.ko.md deleted file mode 100644 index 80897efac0c0aaab172a39f32474622d8a229f3b..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/docs/README.ko.md +++ /dev/null @@ -1,112 +0,0 @@ -
    - -

    Retrieval-based-Voice-Conversion-WebUI

    -VITS 기반의 간단하고 사용하기 쉬운 음성 변환 프레임워크.

    - -[![madewithlove](https://forthebadge.com/images/badges/built-with-love.svg)](https://github.com/liujing04/Retrieval-based-Voice-Conversion-WebUI) - -
    - -[![Open In Colab](https://img.shields.io/badge/Colab-F9AB00?style=for-the-badge&logo=googlecolab&color=525252)](https://colab.research.google.com/github/liujing04/Retrieval-based-Voice-Conversion-WebUI/blob/main/Retrieval_based_Voice_Conversion_WebUI.ipynb) -[![Licence](https://img.shields.io/github/license/liujing04/Retrieval-based-Voice-Conversion-WebUI?style=for-the-badge)](https://github.com/liujing04/Retrieval-based-Voice-Conversion-WebUI/blob/main/%E4%BD%BF%E7%94%A8%E9%9C%80%E9%81%B5%E5%AE%88%E7%9A%84%E5%8D%8F%E8%AE%AE-LICENSE.txt) -[![Huggingface](https://img.shields.io/badge/🤗%20-Spaces-yellow.svg?style=for-the-badge)](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/) - -[![Discord](https://img.shields.io/badge/RVC%20Developers-Discord-7289DA?style=for-the-badge&logo=discord&logoColor=white)](https://discord.gg/HcsmBBGyVk) - -
    - ---- - -[**업데이트 로그**](https://github.com/liujing04/Retrieval-based-Voice-Conversion-WebUI/blob/main/Changelog_KO.md) - -[**English**](./README.en.md) | [**中文简体**](../README.md) | [**日本語**](./README.ja.md) | [**한국어**](./README.ko.md) ([**韓國語**](./README.ko.han.md)) - -> [데모 영상](https://www.bilibili.com/video/BV1pm4y1z7Gm/)을 확인해 보세요! - -> RVC를 활용한 실시간 음성변환: [w-okada/voice-changer](https://github.com/w-okada/voice-changer) - -> 기본 모델은 50시간 가량의 고퀄리티 오픈 소스 VCTK 데이터셋을 사용하였으므로, 저작권상의 염려가 없으니 안심하고 사용하시기 바랍니다. - -> 저작권 문제가 없는 고퀄리티의 노래를 이후에도 계속해서 훈련할 예정입니다. - -## 소개 - -본 Repo는 다음과 같은 특징을 가지고 있습니다: - -- top1 검색을 이용하여 입력 음색 특징을 훈련 세트 음색 특징으로 대체하여 음색의 누출을 방지; -- 상대적으로 낮은 성능의 GPU에서도 빠른 훈련 가능; -- 적은 양의 데이터로 훈련해도 좋은 결과를 얻을 수 있음 (최소 10분 이상의 저잡음 음성 데이터를 사용하는 것을 권장); -- 모델 융합을 통한 음색의 변조 가능 (ckpt 처리 탭->ckpt 병합 선택); -- 사용하기 쉬운 WebUI (웹 인터페이스); -- UVR5 모델을 이용하여 목소리와 배경음악의 빠른 분리; - -## 환경의 준비 - -poetry를 통해 dependecies를 설치하는 것을 권장합니다. - -다음 명령은 Python 버전 3.8 이상의 환경에서 실행되어야 합니다: - -```bash -# PyTorch 관련 주요 dependencies 설치, 이미 설치되어 있는 경우 건너뛰기 가능 -# 참조: https://pytorch.org/get-started/locally/ -pip install torch torchvision torchaudio - -# Windows + Nvidia Ampere Architecture(RTX30xx)를 사용하고 있다면, https://github.com/liujing04/Retrieval-based-Voice-Conversion-WebUI/issues/21 에서 명시된 것과 같이 PyTorch에 맞는 CUDA 버전을 지정해야 합니다. -#pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu117 - -# Poetry 설치, 이미 설치되어 있는 경우 건너뛰기 가능 -# Reference: https://python-poetry.org/docs/#installation -curl -sSL https://install.python-poetry.org | python3 - - -# Dependecies 설치 -poetry install -``` - -pip를 활용하여 dependencies를 설치하여도 무방합니다. - -```bash -pip install -r requirements.txt -``` - -## 기타 사전 모델 준비 - -RVC 모델은 추론과 훈련을 위하여 다른 사전 모델이 필요합니다. - -[Huggingface space](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/)를 통해서 다운로드 할 수 있습니다. - -다음은 RVC에 필요한 사전 모델 및 기타 파일 목록입니다: - -```bash -hubert_base.pt - -./pretrained - -./uvr5_weights - -# Windows를 사용하는 경우 이 사전도 필요할 수 있습니다. FFmpeg가 설치되어 있으면 건너뛰어도 됩니다. -ffmpeg.exe -``` - -그 후 이하의 명령을 사용하여 WebUI를 시작할 수 있습니다: - -```bash -python infer-web.py -``` - -Windows를 사용하는 경우 `RVC-beta.7z`를 다운로드 및 압축 해제하여 RVC를 직접 사용하거나 `go-web.bat`을 사용하여 WebUi를 시작할 수 있습니다. - -## 참고 - -- [ContentVec](https://github.com/auspicious3000/contentvec/) -- [VITS](https://github.com/jaywalnut310/vits) -- [HIFIGAN](https://github.com/jik876/hifi-gan) -- [Gradio](https://github.com/gradio-app/gradio) -- [FFmpeg](https://github.com/FFmpeg/FFmpeg) -- [Ultimate Vocal Remover](https://github.com/Anjok07/ultimatevocalremovergui) -- [audio-slicer](https://github.com/openvpi/audio-slicer) - -## 모든 기여자 분들의 노력에 감사드립니다. - - - - diff --git a/spaces/arxify/RVC-beta-v2-0618/envfilescheck.bat b/spaces/arxify/RVC-beta-v2-0618/envfilescheck.bat deleted file mode 100644 index 547f2aef8621821b86e4d9898f86e0128f55500e..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/envfilescheck.bat +++ /dev/null @@ -1,348 +0,0 @@ -@echo off && chcp 65001 - -echo working dir is %cd% -echo downloading requirement aria2 check. -echo= -dir /a:d/b | findstr "aria2" > flag.txt -findstr "aria2" flag.txt >nul -if %errorlevel% ==0 ( - echo aria2 checked. - echo= -) else ( - echo failed. please downloading aria2 from webpage! - echo unzip it and put in this directory! - timeout /T 5 - start https://github.com/aria2/aria2/releases/tag/release-1.36.0 - echo= - goto end -) - -echo envfiles checking start. -echo= - -for /f %%x in ('findstr /i /c:"aria2" "flag.txt"') do (set aria2=%%x)&goto endSch -:endSch - -set d32=f0D32k.pth -set d40=f0D40k.pth -set d48=f0D48k.pth -set g32=f0G32k.pth -set g40=f0G40k.pth -set g48=f0G48k.pth - -set d40v2=f0D40k.pth -set g40v2=f0G40k.pth - -set dld32=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D32k.pth -set dld40=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D40k.pth -set dld48=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D48k.pth -set dlg32=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G32k.pth -set dlg40=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G40k.pth -set dlg48=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G48k.pth - -set dld40v2=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0D40k.pth -set dlg40v2=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0G40k.pth - -set hp2_all=HP2_all_vocals.pth -set hp3_all=HP3_all_vocals.pth -set hp5_only=HP5_only_main_vocal.pth -set VR_DeEchoAggressive=VR-DeEchoAggressive.pth -set VR_DeEchoDeReverb=VR-DeEchoDeReverb.pth -set VR_DeEchoNormal=VR-DeEchoNormal.pth -set onnx_dereverb=vocals.onnx - -set dlhp2_all=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP2_all_vocals.pth -set dlhp3_all=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP3_all_vocals.pth -set dlhp5_only=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP5_only_main_vocal.pth -set dlVR_DeEchoAggressive=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/VR-DeEchoAggressive.pth -set dlVR_DeEchoDeReverb=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/VR-DeEchoDeReverb.pth -set dlVR_DeEchoNormal=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/VR-DeEchoNormal.pth -set dlonnx_dereverb=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/onnx_dereverb_By_FoxJoy/vocals.onnx - -set hb=hubert_base.pt - -set dlhb=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt - -echo dir check start. -echo= - -if exist "%~dp0pretrained" ( - echo dir .\pretrained checked. - ) else ( - echo failed. generating dir .\pretrained. - mkdir pretrained - ) -if exist "%~dp0pretrained_v2" ( - echo dir .\pretrained_v2 checked. - ) else ( - echo failed. generating dir .\pretrained_v2. - mkdir pretrained_v2 - ) -if exist "%~dp0uvr5_weights" ( - echo dir .\uvr5_weights checked. - ) else ( - echo failed. generating dir .\uvr5_weights. - mkdir uvr5_weights - ) -if exist "%~dp0uvr5_weights\onnx_dereverb_By_FoxJoy" ( - echo dir .\uvr5_weights\onnx_dereverb_By_FoxJoy checked. - ) else ( - echo failed. generating dir .\uvr5_weights\onnx_dereverb_By_FoxJoy. - mkdir uvr5_weights\onnx_dereverb_By_FoxJoy - ) - -echo= -echo dir check finished. - -echo= -echo required files check start. - -echo checking D32k.pth -if exist "%~dp0pretrained\D32k.pth" ( - echo D32k.pth in .\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D32k.pth -d %~dp0pretrained -o D32k.pth - if exist "%~dp0pretrained\D32k.pth" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking D40k.pth -if exist "%~dp0pretrained\D40k.pth" ( - echo D40k.pth in .\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D40k.pth -d %~dp0pretrained -o D40k.pth - if exist "%~dp0pretrained\D40k.pth" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking D40k.pth -if exist "%~dp0pretrained_v2\D40k.pth" ( - echo D40k.pth in .\pretrained_v2 checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/D40k.pth -d %~dp0pretrained_v2 -o D40k.pth - if exist "%~dp0pretrained_v2\D40k.pth" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking D48k.pth -if exist "%~dp0pretrained\D48k.pth" ( - echo D48k.pth in .\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D48k.pth -d %~dp0pretrained -o D48k.pth - if exist "%~dp0pretrained\D48k.pth" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking G32k.pth -if exist "%~dp0pretrained\G32k.pth" ( - echo G32k.pth in .\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G32k.pth -d %~dp0pretrained -o G32k.pth - if exist "%~dp0pretrained\G32k.pth" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking G40k.pth -if exist "%~dp0pretrained\G40k.pth" ( - echo G40k.pth in .\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G40k.pth -d %~dp0pretrained -o G40k.pth - if exist "%~dp0pretrained\G40k.pth" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking G40k.pth -if exist "%~dp0pretrained_v2\G40k.pth" ( - echo G40k.pth in .\pretrained_v2 checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/G40k.pth -d %~dp0pretrained_v2 -o G40k.pth - if exist "%~dp0pretrained_v2\G40k.pth" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking G48k.pth -if exist "%~dp0pretrained\G48k.pth" ( - echo G48k.pth in .\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G48k.pth -d %~dp0pretrained -o G48k.pth - if exist "%~dp0pretrained\G48k.pth" (echo download successful.) else (echo please try again! - echo=) - ) - -echo checking %d32% -if exist "%~dp0pretrained\%d32%" ( - echo %d32% in .\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dld32% -d %~dp0pretrained -o %d32% - if exist "%~dp0pretrained\%d32%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %d40% -if exist "%~dp0pretrained\%d40%" ( - echo %d40% in .\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dld40% -d %~dp0pretrained -o %d40% - if exist "%~dp0pretrained\%d40%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %d40v2% -if exist "%~dp0pretrained_v2\%d40v2%" ( - echo %d40v2% in .\pretrained_v2 checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dld40v2% -d %~dp0pretrained_v2 -o %d40v2% - if exist "%~dp0pretrained_v2\%d40v2%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %d48% -if exist "%~dp0pretrained\%d48%" ( - echo %d48% in .\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dld48% -d %~dp0pretrained -o %d48% - if exist "%~dp0pretrained\%d48%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %g32% -if exist "%~dp0pretrained\%g32%" ( - echo %g32% in .\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlg32% -d %~dp0pretrained -o %g32% - if exist "%~dp0pretrained\%g32%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %g40% -if exist "%~dp0pretrained\%g40%" ( - echo %g40% in .\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlg40% -d %~dp0pretrained -o %g40% - if exist "%~dp0pretrained\%g40%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %g40v2% -if exist "%~dp0pretrained_v2\%g40v2%" ( - echo %g40v2% in .\pretrained_v2 checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlg40v2% -d %~dp0pretrained_v2 -o %g40v2% - if exist "%~dp0pretrained_v2\%g40v2%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %g48% -if exist "%~dp0pretrained\%g48%" ( - echo %g48% in .\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlg48% -d %~dp0\pretrained -o %g48% - if exist "%~dp0pretrained\%g48%" (echo download successful.) else (echo please try again! - echo=) - ) - -echo checking %hp2_all% -if exist "%~dp0uvr5_weights\%hp2_all%" ( - echo %hp2_all% in .\uvr5_weights checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlhp2_all% -d %~dp0\uvr5_weights -o %hp2_all% - if exist "%~dp0uvr5_weights\%hp2_all%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %hp3_all% -if exist "%~dp0uvr5_weights\%hp3_all%" ( - echo %hp3_all% in .\uvr5_weights checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlhp3_all% -d %~dp0\uvr5_weights -o %hp3_all% - if exist "%~dp0uvr5_weights\%hp3_all%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %hp5_only% -if exist "%~dp0uvr5_weights\%hp5_only%" ( - echo %hp5_only% in .\uvr5_weights checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlhp5_only% -d %~dp0\uvr5_weights -o %hp5_only% - if exist "%~dp0uvr5_weights\%hp5_only%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %VR_DeEchoAggressive% -if exist "%~dp0uvr5_weights\%VR_DeEchoAggressive%" ( - echo %VR_DeEchoAggressive% in .\uvr5_weights checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlVR_DeEchoAggressive% -d %~dp0\uvr5_weights -o %VR_DeEchoAggressive% - if exist "%~dp0uvr5_weights\%VR_DeEchoAggressive%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %VR_DeEchoDeReverb% -if exist "%~dp0uvr5_weights\%VR_DeEchoDeReverb%" ( - echo %VR_DeEchoDeReverb% in .\uvr5_weights checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlVR_DeEchoDeReverb% -d %~dp0\uvr5_weights -o %VR_DeEchoDeReverb% - if exist "%~dp0uvr5_weights\%VR_DeEchoDeReverb%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %VR_DeEchoNormal% -if exist "%~dp0uvr5_weights\%VR_DeEchoNormal%" ( - echo %VR_DeEchoNormal% in .\uvr5_weights checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlVR_DeEchoNormal% -d %~dp0\uvr5_weights -o %VR_DeEchoNormal% - if exist "%~dp0uvr5_weights\%VR_DeEchoNormal%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %onnx_dereverb% -if exist "%~dp0uvr5_weights\onnx_dereverb_By_FoxJoy\%onnx_dereverb%" ( - echo %onnx_dereverb% in .\uvr5_weights\onnx_dereverb_By_FoxJoy checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlonnx_dereverb% -d %~dp0\uvr5_weights\onnx_dereverb_By_FoxJoy -o %onnx_dereverb% - if exist "%~dp0uvr5_weights\onnx_dereverb_By_FoxJoy\%onnx_dereverb%" (echo download successful.) else (echo please try again! - echo=) - ) - -echo checking %hb% -if exist "%~dp0%hb%" ( - echo %hb% in .\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlhb% -d %~dp0 -o %hb% - if exist "%~dp0%hb%" (echo download successful.) else (echo please try again! - echo=) - ) - -echo required files check finished. -echo envfiles check complete. -pause -:end -del flag.txt diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/SelfTest/Hash/test_SHA1.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/SelfTest/Hash/test_SHA1.py deleted file mode 100644 index a883a44b5075e1536f3e177d6bd74b7980ce88fd..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/SelfTest/Hash/test_SHA1.py +++ /dev/null @@ -1,84 +0,0 @@ -# -*- coding: utf-8 -*- -# -# SelfTest/Hash/SHA1.py: Self-test for the SHA-1 hash function -# -# Written in 2008 by Dwayne C. Litzenberger -# -# =================================================================== -# The contents of this file are dedicated to the public domain. To -# the extent that dedication to the public domain is not available, -# everyone is granted a worldwide, perpetual, royalty-free, -# non-exclusive license to exercise all rights associated with the -# contents of this file for any purpose whatsoever. -# No rights are reserved. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS -# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN -# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -# =================================================================== - -"""Self-test suite for Crypto.Hash.SHA""" - -from binascii import hexlify - -from Crypto.SelfTest.loader import load_test_vectors - -# Test vectors from various sources -# This is a list of (expected_result, input[, description]) tuples. -test_data_various = [ - # FIPS PUB 180-2, A.1 - "One-Block Message" - ('a9993e364706816aba3e25717850c26c9cd0d89d', 'abc'), - - # FIPS PUB 180-2, A.2 - "Multi-Block Message" - ('84983e441c3bd26ebaae4aa1f95129e5e54670f1', - 'abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq'), - - # FIPS PUB 180-2, A.3 - "Long Message" -# ('34aa973cd4c4daa4f61eeb2bdbad27316534016f', -# 'a' * 10**6, -# '"a" * 10**6'), - - # RFC 3174: Section 7.3, "TEST4" (multiple of 512 bits) - ('dea356a2cddd90c7a7ecedc5ebb563934f460452', - '01234567' * 80, - '"01234567" * 80'), -] - -def get_tests(config={}): - from Crypto.Hash import SHA1 - from .common import make_hash_tests - - tests = [] - - test_vectors = load_test_vectors(("Hash", "SHA1"), - "SHA1ShortMsg.rsp", - "KAT SHA-1", - { "len" : lambda x: int(x) } ) or [] - - test_data = test_data_various[:] - for tv in test_vectors: - try: - if tv.startswith('['): - continue - except AttributeError: - pass - if tv.len == 0: - tv.msg = b"" - test_data.append((hexlify(tv.md), tv.msg, tv.desc)) - - tests = make_hash_tests(SHA1, "SHA1", test_data, - digest_size=20, - oid="1.3.14.3.2.26") - return tests - -if __name__ == '__main__': - import unittest - suite = lambda: unittest.TestSuite(get_tests()) - unittest.main(defaultTest='suite') - -# vim:set ts=4 sw=4 sts=4 expandtab: diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/utils/mimebundle.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/utils/mimebundle.py deleted file mode 100644 index b75802560099dc8c4b818a7452b54cc224b8cfd0..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/utils/mimebundle.py +++ /dev/null @@ -1,83 +0,0 @@ -from .html import spec_to_html - - -def spec_to_mimebundle( - spec, - format, - mode=None, - vega_version=None, - vegaembed_version=None, - vegalite_version=None, - **kwargs, -): - """Convert a vega/vega-lite specification to a mimebundle - - The mimebundle type is controlled by the ``format`` argument, which can be - one of the following ['html', 'json', 'png', 'svg', 'pdf', 'vega', 'vega-lite'] - - Parameters - ---------- - spec : dict - a dictionary representing a vega-lite plot spec - format : string {'html', 'json', 'png', 'svg', 'pdf', 'vega', 'vega-lite'} - the file format to be saved. - mode : string {'vega', 'vega-lite'} - The rendering mode. - vega_version : string - The version of vega.js to use - vegaembed_version : string - The version of vegaembed.js to use - vegalite_version : string - The version of vegalite.js to use. Only required if mode=='vega-lite' - **kwargs : - Additional arguments will be passed to the generating function - - Returns - ------- - output : dict - a mime-bundle representing the image - - Note - ---- - The png, svg, pdf, and vega outputs require the altair_saver package - to be installed. - """ - if mode not in ["vega", "vega-lite"]: - raise ValueError("mode must be either 'vega' or 'vega-lite'") - - if mode == "vega" and format == "vega": - if vega_version is None: - raise ValueError("Must specify vega_version") - return {"application/vnd.vega.v{}+json".format(vega_version[0]): spec} - if format in ["png", "svg", "pdf", "vega"]: - try: - import altair_saver - except ImportError: - raise ValueError( - "Saving charts in {fmt!r} format requires the altair_saver package: " - "see http://github.com/altair-viz/altair_saver/".format(fmt=format) - ) - return altair_saver.render(spec, format, mode=mode, **kwargs) - if format == "html": - html = spec_to_html( - spec, - mode=mode, - vega_version=vega_version, - vegaembed_version=vegaembed_version, - vegalite_version=vegalite_version, - **kwargs, - ) - return {"text/html": html} - if format == "vega-lite": - assert mode == "vega-lite" # sanity check: should never be False - if mode == "vega": - raise ValueError("Cannot convert a vega spec to vegalite") - if vegalite_version is None: - raise ValueError("Must specify vegalite_version") - return {"application/vnd.vegalite.v{}+json".format(vegalite_version[0]): spec} - if format == "json": - return {"application/json": spec} - raise ValueError( - "format must be one of " - "['html', 'json', 'png', 'svg', 'pdf', 'vega', 'vega-lite']" - ) diff --git a/spaces/asafAdge/Detic/tools/convert-thirdparty-pretrained-model-to-d2.py b/spaces/asafAdge/Detic/tools/convert-thirdparty-pretrained-model-to-d2.py deleted file mode 100644 index ec042b8ce48d193b40fd1e6311b2cc4b0c4e4086..0000000000000000000000000000000000000000 --- a/spaces/asafAdge/Detic/tools/convert-thirdparty-pretrained-model-to-d2.py +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import argparse -import pickle -import torch - -""" -Usage: - -cd DETIC_ROOT/models/ -wget https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/resnet50_miil_21k.pth -python ../tools/convert-thirdparty-pretrained-model-to-d2.py --path resnet50_miil_21k.pth - -wget https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth -python ../tools/convert-thirdparty-pretrained-model-to-d2.py --path swin_base_patch4_window7_224_22k.pth - -""" - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument('--path', default='') - args = parser.parse_args() - - print('Loading', args.path) - model = torch.load(args.path, map_location="cpu") - # import pdb; pdb.set_trace() - if 'model' in model: - model = model['model'] - if 'state_dict' in model: - model = model['state_dict'] - ret = { - "model": model, - "__author__": "third_party", - "matching_heuristics": True - } - out_path = args.path.replace('.pth', '.pkl') - print('Saving to', out_path) - pickle.dump(ret, open(out_path, "wb")) diff --git a/spaces/atimughal662/InfoFusion/src/gradio_utils/prompt_form.py b/spaces/atimughal662/InfoFusion/src/gradio_utils/prompt_form.py deleted file mode 100644 index d79b51833d207c867e5ceb1040169193bed4bf9a..0000000000000000000000000000000000000000 --- a/spaces/atimughal662/InfoFusion/src/gradio_utils/prompt_form.py +++ /dev/null @@ -1,108 +0,0 @@ -import os -import math - -import gradio as gr - - -def make_chatbots(output_label0, output_label0_model2, **kwargs): - visible_models = kwargs['visible_models'] - all_models = kwargs['all_models'] - - text_outputs = [] - chat_kwargs = [] - for model_state_locki, model_state_lock in enumerate(kwargs['model_states']): - if os.environ.get('DEBUG_MODEL_LOCK'): - model_name = model_state_lock["base_model"] + " : " + model_state_lock["inference_server"] - else: - model_name = model_state_lock["base_model"] - output_label = f'h2oGPT [{model_name}]' - min_width = 250 if kwargs['gradio_size'] in ['small', 'large', 'medium'] else 160 - chat_kwargs.append(dict(label=output_label, elem_classes='chatsmall', - height=kwargs['height'] or 400, min_width=min_width, - show_copy_button=kwargs['show_copy_button'], - visible=kwargs['model_lock'] and (visible_models is None or - model_state_locki in visible_models or - all_models[model_state_locki] in visible_models - ))) - - # base view on initial visible choice - if visible_models: - len_visible = len(visible_models) - else: - len_visible = len(kwargs['model_states']) - if kwargs['model_lock_columns'] == -1: - kwargs['model_lock_columns'] = len_visible - if kwargs['model_lock_columns'] is None: - kwargs['model_lock_columns'] = 3 - - ncols = kwargs['model_lock_columns'] - if kwargs['model_states'] == 0: - nrows = 0 - else: - nrows = math.ceil(len_visible / kwargs['model_lock_columns']) - - if kwargs['model_lock_columns'] == 0: - # not using model_lock - pass - elif nrows <= 1: - with gr.Row(): - for chat_kwargs1, model_state_lock in zip(chat_kwargs, kwargs['model_states']): - text_outputs.append(gr.Chatbot(**chat_kwargs1)) - elif nrows == kwargs['model_states']: - with gr.Row(): - for chat_kwargs1, model_state_lock in zip(chat_kwargs, kwargs['model_states']): - text_outputs.append(gr.Chatbot(**chat_kwargs1)) - elif nrows == 2: - with gr.Row(): - for mii, (chat_kwargs1, model_state_lock) in enumerate(zip(chat_kwargs, kwargs['model_states'])): - if mii >= len_visible / 2: - continue - text_outputs.append(gr.Chatbot(**chat_kwargs1)) - with gr.Row(): - for mii, (chat_kwargs1, model_state_lock) in enumerate(zip(chat_kwargs, kwargs['model_states'])): - if mii < len_visible / 2: - continue - text_outputs.append(gr.Chatbot(**chat_kwargs1)) - elif nrows == 3: - with gr.Row(): - for mii, (chat_kwargs1, model_state_lock) in enumerate(zip(chat_kwargs, kwargs['model_states'])): - if mii >= 1 * len_visible / 3: - continue - text_outputs.append(gr.Chatbot(**chat_kwargs1)) - with gr.Row(): - for mii, (chat_kwargs1, model_state_lock) in enumerate(zip(chat_kwargs, kwargs['model_states'])): - if mii < 1 * len_visible / 3 or mii >= 2 * len_visible / 3: - continue - text_outputs.append(gr.Chatbot(**chat_kwargs1)) - with gr.Row(): - for mii, (chat_kwargs1, model_state_lock) in enumerate(zip(chat_kwargs, kwargs['model_states'])): - if mii < 2 * len_visible / 3: - continue - text_outputs.append(gr.Chatbot(**chat_kwargs1)) - elif nrows >= 4: - with gr.Row(): - for mii, (chat_kwargs1, model_state_lock) in enumerate(zip(chat_kwargs, kwargs['model_states'])): - if mii >= 1 * len_visible / 4: - continue - text_outputs.append(gr.Chatbot(**chat_kwargs1)) - with gr.Row(): - for mii, (chat_kwargs1, model_state_lock) in enumerate(zip(chat_kwargs, kwargs['model_states'])): - if mii < 1 * len_visible / 4 or mii >= 2 * len_visible / 4: - continue - text_outputs.append(gr.Chatbot(**chat_kwargs1)) - with gr.Row(): - for mii, (chat_kwargs1, model_state_lock) in enumerate(zip(chat_kwargs, kwargs['model_states'])): - if mii < 2 * len_visible / 4 or mii >= 3 * len_visible / 4: - continue - text_outputs.append(gr.Chatbot(**chat_kwargs1)) - with gr.Row(): - for mii, (chat_kwargs1, model_state_lock) in enumerate(zip(chat_kwargs, kwargs['model_states'])): - if mii < 3 * len_visible / 4: - continue - text_outputs.append(gr.Chatbot(**chat_kwargs1)) - - with gr.Row(): - text_output = gr.Chatbot(label=output_label0, visible=not kwargs['model_lock'], height=kwargs['height'] or 400) - text_output2 = gr.Chatbot(label=output_label0_model2, - visible=False and not kwargs['model_lock'], height=kwargs['height'] or 400) - return text_output, text_output2, text_outputs diff --git a/spaces/awacke1/08-KitchenSink/app.py b/spaces/awacke1/08-KitchenSink/app.py deleted file mode 100644 index 95a8cd2a6414b7adcee4c612d0b9f15081b0eef4..0000000000000000000000000000000000000000 --- a/spaces/awacke1/08-KitchenSink/app.py +++ /dev/null @@ -1,32 +0,0 @@ -import importlib -import gradio as gr -import os -import sys -import copy -import pathlib - -# At least one demo fails when caching examples -# Temporary fix just to get the build to pass -os.environ["SYSTEM"] = "SPACES" - -demo_dir = pathlib.Path(__file__).parent / "demos" - - -all_demos = [] -demo_module = None -for p in os.listdir("./demos"): - old_path = copy.deepcopy(sys.path) - sys.path = [os.path.join(demo_dir, p)] + sys.path - if demo_module is None: - demo_module = importlib.import_module(f"run") - else: - demo_module = importlib.reload(demo_module) - all_demos.append((p, demo_module.demo)) - -with gr.Blocks() as mega_demo: - with gr.Tabs(): - for demo_name, demo in all_demos: - with gr.TabItem(demo_name): - demo.render() - -mega_demo.launch() diff --git a/spaces/awacke1/Gamification-Word-Search/try2.py b/spaces/awacke1/Gamification-Word-Search/try2.py deleted file mode 100644 index f7b6053f98b8466ab51b14819853e209ef2a9d4e..0000000000000000000000000000000000000000 --- a/spaces/awacke1/Gamification-Word-Search/try2.py +++ /dev/null @@ -1,72 +0,0 @@ -import streamlit as st -import random -import string - -board_size = 15 -words = [] -board = [[' ' for _ in range(board_size)] for _ in range(board_size)] - -def load_word_list(): - global words - try: - with open("word_list.txt", "r") as f: - words = f.read().split("\n") - except FileNotFoundError: - pass - st.text_area("Enter a list of words (one per line)", "\n".join(words)) - st.sidebar.subheader("Word List:") - for i, word in enumerate(words): - st.sidebar.write(f"{i+1}. {word}") - -def save_word_list(): - global words - with open("word_list.txt", "w") as f: - f.write("\n".join(words)) - st.write("Word list saved successfully!") - -def generate_board(): - global board, words - board = [[' ' for _ in range(board_size)] for _ in range(board_size)] - for word in words: - word = word.upper() - row, col = random.randint(0, board_size - 1), random.randint(0, board_size - 1) - direction = random.choice(['horizontal', 'vertical', 'diagonal']) - if direction == 'horizontal' and col + len(word) <= board_size: - for i, letter in enumerate(word): - board[row][col+i] = letter - elif direction == 'vertical' and row + len(word) <= board_size: - for i, letter in enumerate(word): - board[row+i][col] = letter - elif direction == 'diagonal' and row + len(word) <= board_size and col + len(word) <= board_size: - for i, letter in enumerate(word): - board[row+i][col+i] = letter - for i in range(board_size): - for j in range(board_size): - if board[i][j] == ' ': - board[i][j] = random.choice(string.ascii_uppercase) - -buttons = { - "Load Word List": load_word_list, - "Save Word List": save_word_list, - "Generate Board": generate_board -} - -for button_label, button_func in buttons.items(): - if st.sidebar.button(button_label): - words = st.text_area("Enter a list of words (one per line)", "\n".join(words)).split("\n") - button_func() - st.sidebar.subheader("Word List:") - for i, word in enumerate(words): - st.sidebar.write(f"{i+1}. {word}") - -words = st.text_area("Enter a list of words (one per line)", "\n".join(words)) -if st.button("Save Word List", key="save_word_list_btn"): - words = words.split("\n") - save_word_list() - -st.sidebar.subheader("Word List:") -for i, word in enumerate(words): - st.sidebar.write(f"{i+1}. {word}") - -st.write("Word Search Board:") -st.table(board) diff --git a/spaces/awacke1/Google-Maps-Web-Service-Py/app.py b/spaces/awacke1/Google-Maps-Web-Service-Py/app.py deleted file mode 100644 index 211785cf739484e9945e6e5befd86c89ce8f180c..0000000000000000000000000000000000000000 --- a/spaces/awacke1/Google-Maps-Web-Service-Py/app.py +++ /dev/null @@ -1,185 +0,0 @@ -import googlemaps -import os -#GM_TOKEN=os.environ.get("GM_TOKEN") # Get Google Maps Token Here: https://console.cloud.google.com/google/maps-apis/ - -from datetime import datetime - -# googlemaps_TOKEN = os.environ.get("googlemaps_TOKEN") -# gmaps = googlemaps.Client(key=googlemaps_TOKEN) -gmaps = googlemaps.Client(key='AIzaSyDybq2mxujekZVivmr03Y5-GGHXesn4TLI') - - -def GetMapInfo(inputText): - #geocode_result = gmaps.geocode('640 Jackson Street, St. Paul, MN 55101') - geocode_result = gmaps.geocode(inputText) - - geo_address = geocode_result[0]['formatted_address'] - geo_directions = geocode_result[0]['geometry']['location'] - geo_geocode = geocode_result[0]['geometry']['location_type'] - - lat = geo_directions['lat'] - lng = geo_directions['lng'] - - reverse_geocode_result = gmaps.reverse_geocode((lat, lng)) - - now = datetime.now() - directions_result = gmaps.directions("Sydney Town Hall","Parramatta, NSW",mode="transit", departure_time=now) - #addressvalidation_result = gmaps.addressvalidation(['1600 Amphitheatre Pk'], regionCode='US', locality='Mountain View', enableUspsCass=True) - - #return geocode_result, reverse_geocode_result, directions_result, addressvalidation_result - #return geo_address, geo_directions, geo_geocode, reverse_geocode_result, directions_result, addressvalidation_result - return geo_address, geo_directions, geo_geocode - -from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration -import torch -import gradio as gr -from datasets import load_dataset - -# PersistDataset ----- -import os -import csv -from gradio import inputs, outputs -import huggingface_hub -from huggingface_hub import Repository, hf_hub_download, upload_file -from datetime import datetime - -#fastapi is where its at: share your app, share your api -import fastapi - -from typing import List, Dict -import httpx -import pandas as pd -import datasets as ds - -UseMemory=True -HF_TOKEN=os.environ.get("HF_TOKEN") - -def SaveResult(text, outputfileName): - basedir = os.path.dirname(__file__) - savePath = outputfileName - print("Saving: " + text + " to " + savePath) - from os.path import exists - file_exists = exists(savePath) - if file_exists: - with open(outputfileName, "a") as f: #append - f.write(str(text.replace("\n"," "))) - f.write('\n') - else: - with open(outputfileName, "w") as f: #write - f.write(str("time, message, text\n")) # one time only to get column headers for CSV file - f.write(str(text.replace("\n"," "))) - f.write('\n') - return - - -def store_message(name: str, message: str, outputfileName: str): - basedir = os.path.dirname(__file__) - savePath = outputfileName - - # if file doesnt exist, create it with labels - from os.path import exists - file_exists = exists(savePath) - - if (file_exists==False): - with open(savePath, "w") as f: #write - f.write(str("time, message, text\n")) # one time only to get column headers for CSV file - if name and message: - writer = csv.DictWriter(f, fieldnames=["time", "message", "name"]) - writer.writerow( - {"time": str(datetime.now()), "message": message.strip(), "name": name.strip() } - ) - df = pd.read_csv(savePath) - df = df.sort_values(df.columns[0],ascending=False) - else: - if name and message: - with open(savePath, "a") as csvfile: - writer = csv.DictWriter(csvfile, fieldnames=[ "time", "message", "name", ]) - writer.writerow( - {"time": str(datetime.now()), "message": message.strip(), "name": name.strip() } - ) - df = pd.read_csv(savePath) - df = df.sort_values(df.columns[0],ascending=False) - return df - -mname = "facebook/blenderbot-400M-distill" -model = BlenderbotForConditionalGeneration.from_pretrained(mname) -tokenizer = BlenderbotTokenizer.from_pretrained(mname) - -def take_last_tokens(inputs, note_history, history): - if inputs['input_ids'].shape[1] > 128: - inputs['input_ids'] = torch.tensor([inputs['input_ids'][0][-128:].tolist()]) - inputs['attention_mask'] = torch.tensor([inputs['attention_mask'][0][-128:].tolist()]) - note_history = ['
    '.join(note_history[0].split(' ')[2:])] - history = history[1:] - return inputs, note_history, history - -def add_note_to_history(note, note_history):# good example of non async since we wait around til we know it went okay. - note_history.append(note) - note_history = ' '.join(note_history) - return [note_history] - -title = "💬ChatBack🧠💾" -description = """Chatbot With persistent memory dataset allowing multiagent system AI to access a shared dataset as memory pool with stored interactions. - Current Best SOTA Chatbot: https://huggingface.co/facebook/blenderbot-400M-distill?text=Hey+my+name+is+ChatBack%21+Are+you+ready+to+rock%3F """ - -def get_base(filename): - basedir = os.path.dirname(__file__) - print(basedir) - #loadPath = basedir + "\\" + filename # works on windows - loadPath = basedir + filename # works on ubuntu - print(loadPath) - return loadPath - -def chat(message, history): - history = history or [] - if history: - history_useful = [' '.join([str(a[0])+' '+str(a[1]) for a in history])] - else: - history_useful = [] - - history_useful = add_note_to_history(message, history_useful) - inputs = tokenizer(history_useful, return_tensors="pt") - inputs, history_useful, history = take_last_tokens(inputs, history_useful, history) - reply_ids = model.generate(**inputs) - response = tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0] - history_useful = add_note_to_history(response, history_useful) - list_history = history_useful[0].split(' ') - history.append((list_history[-2], list_history[-1])) - - df=pd.DataFrame() - - if UseMemory: - #outputfileName = 'ChatbotMemory.csv' - outputfileName = 'ChatbotMemory3.csv' # Test first time file create - df = store_message(message, response, outputfileName) # Save to dataset - basedir = get_base(outputfileName) - - return history, df, basedir - - - - -with gr.Blocks() as demo: - gr.Markdown("

    🍰 AI Google Maps Demonstration🎨

    ") - - with gr.Row(): - t1 = gr.Textbox(lines=1, default="", label="Chat Text:") - b1 = gr.Button("Respond and Retrieve Messages") - b2 = gr.Button("Get Map Information") - - with gr.Row(): # inputs and buttons - s1 = gr.State([]) - df1 = gr.Dataframe(wrap=True, max_rows=1000, overflow_row_behaviour= "paginate") - with gr.Row(): # inputs and buttons - file = gr.File(label="File") - s2 = gr.Markdown() - with gr.Row(): - df21 = gr.Textbox(lines=4, default="", label="Geocode1:") - df22 = gr.Textbox(lines=4, default="", label="Geocode2:") - df23 = gr.Textbox(lines=4, default="", label="Geocode3:") - df3 = gr.Dataframe(wrap=True, max_rows=1000, overflow_row_behaviour= "paginate") - df4 = gr.Dataframe(wrap=True, max_rows=1000, overflow_row_behaviour= "paginate") - b1.click(fn=chat, inputs=[t1, s1], outputs=[s1, df1, file]) - b2.click(fn=GetMapInfo, inputs=[t1], outputs=[df21, df22, df23]) - -demo.launch(debug=True, show_error=True) diff --git a/spaces/awacke1/GradioContinualGenerator/README.md b/spaces/awacke1/GradioContinualGenerator/README.md deleted file mode 100644 index 5980b1419232cf99703e3c03e3539c8808dd6daa..0000000000000000000000000000000000000000 --- a/spaces/awacke1/GradioContinualGenerator/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: GradioContinualGenerator -emoji: 📉 -colorFrom: green -colorTo: gray -sdk: gradio -sdk_version: 3.18.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/awacke1/Tank.Moves.Tank.Fires.Tank.AvoidsObstacles.Combat/app.py b/spaces/awacke1/Tank.Moves.Tank.Fires.Tank.AvoidsObstacles.Combat/app.py deleted file mode 100644 index 773dfef0cdb50d15b5a6392576ea4ab6f32e1b76..0000000000000000000000000000000000000000 --- a/spaces/awacke1/Tank.Moves.Tank.Fires.Tank.AvoidsObstacles.Combat/app.py +++ /dev/null @@ -1,66 +0,0 @@ -import streamlit as st - -def move_tank(): - return "Tank moved." - -def shoot_cannon(): - return "Cannon fired." - -def avoid_obstacle(): - return "Obstacle avoided." - -controls = {"m": move_tank, "s": shoot_cannon, "a": avoid_obstacle} - -def get_best_mission(player1_switches, player2_switches): - # Implement your code to calculate the best mission here - # Based on the theory of Atari Combat - # Return the best mission as a string - return "Mission 3" - -def main(): - st.title("Atari Combat Adventure Game") - st.write("Welcome to the Atari Combat Adventure Game!") - st.write("You are about to embark on a journey that will test your tank combat skills and strategic thinking.") - - # Take input from the user for Player 1 - st.subheader("Player 1") - p1_s1 = st.selectbox("Switch 1", [0, 1]) - p1_s2 = st.selectbox("Switch 2", [0, 1]) - p1_s3 = st.selectbox("Switch 3", [0, 1]) - p1_s4 = st.selectbox("Switch 4", [0, 1]) - - # Take input from the user for Player 2 - st.subheader("Player 2") - p2_s1 = st.selectbox("Switch 1", [0, 1], key="p2_s1") - p2_s2 = st.selectbox("Switch 2", [0, 1], key="p2_s2") - p2_s3 = st.selectbox("Switch 3", [0, 1], key="p2_s3") - p2_s4 = st.selectbox("Switch 4", [0, 1], key="p2_s4") - - # Calculate the best mission based on the inputs - best_mission = get_best_mission([p1_s1, p1_s2, p1_s3, p1_s4], [p2_s1, p2_s2, p2_s3, p2_s4]) - - # Output the best mission to the user - st.subheader("Best Mission") - st.write(best_mission) - - # Start the game - st.write("Let's start the game!") - st.write("You are in a tank and your opponent is on the other side of the battlefield.") - st.write("Use the following keys to control your tank:") - st.write("'m' to move, 's' to shoot, 'a' to avoid obstacles.") - - while True: - # Get input from the user - key_pressed = st.text_input("Press a key to continue...") - - # Get the corresponding control function using the key pressed - control_function = controls.get(key_pressed.lower()) - - # Call the control function and print the output - if control_function: - st.write(control_function()) - else: - st.write("Invalid input. Please try again.") - -if __name__ == "__main__": - main() diff --git a/spaces/awacke1/Text2SpeechSentimentSave/README.md b/spaces/awacke1/Text2SpeechSentimentSave/README.md deleted file mode 100644 index 95140138f65a3b9334fecf8405f643c62568ef80..0000000000000000000000000000000000000000 --- a/spaces/awacke1/Text2SpeechSentimentSave/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: 🗣️ NLP Text2SpeechSentimentSave 💽 -emoji: 🗣️📚💽 -colorFrom: pink -colorTo: gray -sdk: gradio -sdk_version: 3.0.9 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/awacke1/VizLib-Altair/README.md b/spaces/awacke1/VizLib-Altair/README.md deleted file mode 100644 index 00d4c7507c8f0509ad617556d24145157c6ef62b..0000000000000000000000000000000000000000 --- a/spaces/awacke1/VizLib-Altair/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: VizLib Altair -emoji: 📚 -colorFrom: yellow -colorTo: blue -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/shadowmap_pars_vertex.glsl.js b/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/shadowmap_pars_vertex.glsl.js deleted file mode 100644 index 990521088de1038e7c3bee961622f56d809c77ba..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/shadowmap_pars_vertex.glsl.js +++ /dev/null @@ -1,34 +0,0 @@ -export default /* glsl */` -#ifdef USE_SHADOWMAP - - #if NUM_DIR_LIGHTS > 0 - - uniform mat4 directionalShadowMatrix[ NUM_DIR_LIGHTS ]; - varying vec4 vDirectionalShadowCoord[ NUM_DIR_LIGHTS ]; - - #endif - - #if NUM_SPOT_LIGHTS > 0 - - uniform mat4 spotShadowMatrix[ NUM_SPOT_LIGHTS ]; - varying vec4 vSpotShadowCoord[ NUM_SPOT_LIGHTS ]; - - #endif - - #if NUM_POINT_LIGHTS > 0 - - uniform mat4 pointShadowMatrix[ NUM_POINT_LIGHTS ]; - varying vec4 vPointShadowCoord[ NUM_POINT_LIGHTS ]; - - #endif - - /* - #if NUM_RECT_AREA_LIGHTS > 0 - - // TODO (abelnation): uniforms for area light shadows - - #endif - */ - -#endif -`; diff --git a/spaces/bhvsh/stroke-prediction/apps/pred.py b/spaces/bhvsh/stroke-prediction/apps/pred.py deleted file mode 100644 index 25f08d49b9940830242ac304db41a7273c7408f9..0000000000000000000000000000000000000000 --- a/spaces/bhvsh/stroke-prediction/apps/pred.py +++ /dev/null @@ -1,113 +0,0 @@ -import streamlit as st -import lightgbm -import pickle -import numpy as np -from sklearn.preprocessing import RobustScaler -from sklearn.decomposition import PCA -model = pickle.load(open("/home/user/app/apps/models/gbm/gbm-model-pickle.sav", 'rb')) -scaler = pickle.load(open("/home/user/app/apps/models/gbm/gbm-scaler.sav", 'rb')) - -def app(): - with st.sidebar: - st.title('Stroke Prediction using Machine Learning') - - st.write('This model which predicts whether a patient is likely to get a stroke based on the parameters like gender, age various diseases and smoking status.') - st.markdown('_For Machine Learning - 19CS601_') - - st.write('It may take a few moments to complete this survey.') - - with st.container(): - st.subheader('Stage 1: Personal Questions') - - ch_gender = st.selectbox( - 'Gender: ', - ('Male', 'Female', 'Others')) - - ch_age = st.number_input('Age: ',min_value=0, max_value=150, value=18,step=1) - - ch_restype = st.radio( - 'Residence Type: ', - ('Urban', 'Rural')) - - ch_marital = st.radio( - 'Did you ever get married? ', - ('Yes', 'No')) - - ch_worktype = st.selectbox( - 'Work type: ', - ('I\'m a child.', 'I\'m self employed', 'Working for the Private.','Working for the Government.','Never worked for anyone.')) - - st.subheader('Stage 2: Health Questions') - - ch_height = st.number_input('Height (in m): ',min_value=0.0, max_value=500.0, value=175.0,step=0.1) - - ch_weight = st.number_input('Weight (in kg): ',min_value=0.0, max_value=5000.0, value=75.0,step=0.01) - - calc_bmi = ch_weight / (ch_height/100)**2 - - ch_bmi = st.number_input('BMI: (Optional)',min_value=0.0, max_value=60.0, value=calc_bmi,step=0.01) - - ch_agl = st.number_input('Average Glucose Level (in mg/dL): ',min_value=50.0, max_value=300.0, value=50.0,step=0.01) - - ch_smokingstat = st.selectbox( - 'Smoking status: ', - ('Never smoked', 'Formerly smoked', 'I\'m an active smoker','I prefer not to speak')) - - st.write('Are you currently suffering from these diseases?') - - ch_hypertn = st.checkbox('Hypertension') - - ch_hearttn = st.checkbox('Heart Disease') - - submit = st.button('Submit') - - if submit: - - ch_gender = 0 if ch_gender=="Female" else 1 if ch_gender=="Male" else 2 - ch_marital = 1 if ch_marital=="Yes" else 0 - ch_worktype = 1 if ch_worktype=="Never worked for anyone." else 4 if ch_worktype=="I\'m a child." else 3 if ch_worktype=="I\'m self employed" else 2 if ch_worktype=="Working for the Private." else 0 - ch_restype = 1 if ch_restype=="Urban" else 1 - ch_smokingstat = 3 if ch_smokingstat=="I\'m an active smoker" else 1 if ch_smokingstat=="Formerly smoked" else 2 if ch_smokingstat=="Never smoked" else 0 - ch_hypertn = 0 if ch_hypertn==False else 1 if ch_hypertn==True else 999 - ch_hearttn = 0 if ch_hearttn==False else 1 if ch_hearttn==True else 999 - - input = scaler.transform([[ch_gender,ch_age,ch_hypertn,ch_hearttn,ch_marital,ch_worktype,ch_restype,ch_agl,ch_bmi,ch_smokingstat]]) - - prediction = model.predict(input) - predictval = model.predict_proba(input) - - with st.expander("Results"): - if prediction==0: - str_result = 'The model predicts that with the probability of %.2f%%, you won\'t be suffering from stroke in the future.'%(predictval[0][0]*100) - st.success(str_result) - st.write(""" - The best way to help prevent a stroke is to eat a healthy diet, exercise regularly, and avoid smoking and drinking too much alcohol. - These lifestyle changes can reduce your risk of problems like: - - arteries becoming clogged with fatty substances (atherosclerosis) - - high blood pressure - - high cholesterol levels - If you have already had a stroke, making these changes can help reduce your risk of having another stroke in the future. - - """) - st.write("Source: [National Health Service (NHS) - United Kingdom](https://www.nhs.uk/conditions/stroke/prevention/)") - - elif prediction==1: - str_result = 'The model predicts that with the probability of %.2f%%, you will be suffering from stroke in the future.'%(predictval[0][1]*100) - st.error(str_result) - if predictval[0][1] >= 0.85: - st.subheader("Please seek medical attention as early as possible to mitigate the stroke disease.") - st.write(""" - The best way to help prevent a stroke is to eat a healthy diet, exercise regularly, and avoid smoking and drinking too much alcohol. - These lifestyle changes can reduce your risk of problems like: - - arteries becoming clogged with fatty substances (atherosclerosis) - - high blood pressure - - high cholesterol levels - If you have already had a stroke, making these changes can help reduce your risk of having another stroke in the future. - - """) - st.write("Source: [National Health Service (NHS) - United Kingdom](https://www.nhs.uk/conditions/stroke/prevention/)") - - else: - st.error('NaN: Unexpected error') - st.markdown("Debug: Selected input:") - st.code(input) \ No newline at end of file diff --git a/spaces/bioriAsaeru/text-to-voice/Black Decker Complete Guide Contemporary Sheds.md b/spaces/bioriAsaeru/text-to-voice/Black Decker Complete Guide Contemporary Sheds.md deleted file mode 100644 index 1a30f7929c6d1bb389abf2b35984ba05b8e1d83e..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Black Decker Complete Guide Contemporary Sheds.md +++ /dev/null @@ -1,7 +0,0 @@ -
    -

    The most popular plans from previous editions are preserved, from small garage-style sheds with overhead doors, to kit sheds, to contemporary utility sheds with a dramatic flair. This new edition delves into new styles that are drawing strong interest today, including tiny sheds, miniature tool sheds, and even small habitable sheds that are designed to function mostly as a quiet retreat for practicing a particular hobby or activity. As with all of the hardworking, practical sheds from earlier editions, the new varieties include full-color step by step photos, complete building plan drawings with cutting lists, and clear how-to instructions.

    -

    black decker complete guide contemporary sheds


    Download File ⚙⚙⚙ https://urloso.com/2uyS33



    -

    Shed-building, like any other building process, starts with good techniques. That's why the general skills section has been updated and improved. With this complete guide, you can build just about any shed you dream of. Plus, you'll find information on new tools and products that will make your project go faster and more smoothly. Rounded out with helpful information on important considerations like siting and zoning, Black & Decker Complete Guide to Sheds 3rd Edition truly is a complete guide to this very popular DIY activity.

    -

    You will find everything you need to know to build a shed of your very own. This volume offers complete plans for sheds of all types and sizes, along with thorough information on how to design and build them. Color photos.

    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/bioriAsaeru/text-to-voice/Download Materi Pmr Wira Pdf.md b/spaces/bioriAsaeru/text-to-voice/Download Materi Pmr Wira Pdf.md deleted file mode 100644 index cec166ff897e48ab59cd1dbdd7e2e588fa7907f9..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Download Materi Pmr Wira Pdf.md +++ /dev/null @@ -1,60 +0,0 @@ -

    download materi pmr wira pdf


    Download ··· https://urloso.com/2uyRNX



    - -grub-mkconfig /usr/sbin/grub-mkconfig - - goltoof: sudo update-grub - - you are not supposed to edit the config manually - - giit can be a pain in the ass when done wrong - - if you want to learn, i'd recommend hacking grub-pc as a starting point - - ejv: it worked!!! is that right? - - looks like it needs to run update-grub again, but it did work - - correct - - if you haven't changed any other config then you're good - - you can probably just leave it alone now - - ejv: sweet, thanks again - - my pleasure - - ejv: why would i want to edit the config manually? im so used to windows where the "conf" file is a separate file - - looks like it's saved! - - which is to say, linux is a "live and let live" OS, i think you'll enjoy it - - ejv: it appears so, but i dont get the whole shell/dos config file idea - - it is a good change of pace - - shell/dos config file is one of the foundations of the OS, btw - - kernel configuration has it's own config file - - kernel config files are separate from any other config files - - ah, well for me, no matter what i do, i get the grub prompt when booting, i just can't go any further - - i have a problem with grub2 - - i keep getting an error 15 - - basso: What type of error? - - I get something about not being able to find the grub menu - - basso: What method are you using to boot? - - from grub - - basso: What are you booting 4fefd39f24
    -
    -
    -

    diff --git a/spaces/bioriAsaeru/text-to-voice/Izotope Free Download For Mac.md b/spaces/bioriAsaeru/text-to-voice/Izotope Free Download For Mac.md deleted file mode 100644 index 82cd4886888597b96434e670dba52fc25996da9d..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Izotope Free Download For Mac.md +++ /dev/null @@ -1,24 +0,0 @@ - -

    iZotope Vocal Doubler is a free plug-in designed to enhance your vocal with a natural doubling effect, adding richness and depth. Our acclaimed doubling technology has been reimagined with a brand new interactive UI dedicated to helping you achieve a great double faster than ever.

    -

    Izotope Free Download For Mac


    Download 🆗 https://urloso.com/2uyPrs



    -

    Download iZotope Ozone 10 Advanced for Mac free latest full version standalone offline setup for Mac OS X. Ozone Advanced 2022 v10.2 for macOS is a comprehensive, yet easy-to-use application that enables you to create, edit, and master the sound in a professional way.

    -

    Ozone Advanced is a competent and efficient piece of software designed from the ground up to edit sound in an elegant way. This impressive program has the ability to fulfill the needs of both individuals and industries. The interface of the application is well organized and anyone can use it. IZotope Ozone Advanced gives sound engineers and musicians a free hand to edit the sounds without any limitations and boundaries. By using its New source separation technology, you can easily correct instrument levels before starting the master or add excitement to key sections.

    -

    iZotope ozone 9 for mac is a complete set of tools for editing the sounds professionally. This glorious audio editing software for mac can fulfil the requirements of industries and individual. It helps build mastering easier with AI-powered helpful audio tools that provide sound editors a rapid start line. With many latest audio editing tools and options, it gives sound engineers and musicians a free-hand to edit the sounds without borderlines and limitations.

    -

    The New supply separation technology in Master Re-balance allows editors to correct instrument levels before beginning the master or add excitement to key sections. Its fabulous NKS supports enables users to master whereas creating music with Machine or Complete Control. Users simply need to open gas on the fly and simply add professional polish whereas creating music on the hardware victimization many different presets and accessible parameters mapped to hardware controls. Sound editors will add loudness, width while not touching the Daw. You can download Auto-Tune Pro 8.1.1 DMG.

    -

    Click on the button below to start downloading IZotope Ozone 9 for mac OS X. We are here to provide clean and fast download for IZotope Ozone 9 v9.01. This link is resume able within 24 hours. Keep visiting themacgo the world of dmgs.

    -

    Freeware programs can be downloaded used free of charge and without any time limitations. Freeware products can be used free of charge for both personal and professional (commercial use).

    -

    -

    Open Source software is software with source code that anyone can inspect, modify or enhance. Programs released under this license can be used at no cost for both personal and commercial purposes. There are many different open source licenses but they all must comply with the Open Source Definition - in brief: the software can be freely used, modified and shared.

    -

    This license is commonly used for video games and it allows users to download and play the game for free. Basically, a product is offered Free to Play (Freemium) and the user can decide if he wants to pay the money (Premium) for additional features, services, virtual or physical goods that expand the functionality of the game. In some cases, ads may be show to the users.

    -

    Demo programs have a limited functionality for free, but charge for an advanced set of features or for the removal of advertisements from the program's interfaces. In some cases, all the functionality is disabled until the license is purchased. Demos are usually not time-limited (like Trial software) but the functionality is limited.

    -

    This software is no longer available for the download. This could be due to the program being discontinued, having a security issue or for other reasons.

    -

    iZotope VocalSynth v2 for Mac is an impressive application which provides you a vocal experience that evolves and adopts a unique style and it opens up a whole new world of vocal possibilities. This plugin allows you to get the old-school sound instantly or lets you create new school of vocal sounds. You can also download Xfer Records SerumFX for Mac.

    -

    iZotope VocalSynth v2 for Mac has been equipped with Biovox which will let you create as well as manipulate biological imprint with a new vocal effect. Human vocal characteristics like vowel shapes, nasality and formants are adjusted for a very smooth as well as textural vocal treatment to any of the audio signal. You have the option of selecting from various different waveshapes like warm. Subtle and all out fuzz distortion. You can alao customize the drive amount with Drive control and use post filtering to change the spectrum. You can also get quick as well as parallel processing with Dry/Wet control. All in all iZotope VocalSynth v2 for Mac is an imposing application that provides you vocal experience that evolves as well as adopts a unique style. You can also download Wondershare TunesGo for Mac.

    -

    iZotope Neoverb is the most intelligent reverb plug-in for music producers that combines Exponential Audio technology with an intuitive, AI-powered workflow to help music producers and mix engineers quickly find the right space for their vocals and instruments. This is a full-featured plug-in that delivers some of the most powerful tools in audio restoration, mastering, and vocal production. It offers advanced controls allowing the producers to customize and dial in the specific sound they are searching for. With this amazing tool, you can create sophisticated custom reverbs without wasting time on menus. It allows you to choose the style you want and will automatically select and mix reverb combinations and advanced settings as you move the sliders. You can also download MAGIX Music Maker 2016 Premium Free Download.

    -

    Avid site gives a link for the Izotope free version but doesnt say which version will work with your system....I've tried various versions 6, 7 ect but none of them will license...I've asked Izotope but havnt heard back yet. I'm on Sierra with MC 2018.12.1. Any thoughts?

    -

    If so, go into your Avid Account, find the product there, copy the Serial that is listed there, then click the provided link, which will take you to the iZotope site which will let you download RX Elements and activate it.

    -

    When you click the link in your avid account, you are sent to izotope. When you click the 'download' button, you are required to sign in or create an account. Once you sign in, you are forwarded to the 'legacy products' page, where all of the old versions of RX are, from version 1-6 and including both the advanced and rx versions. Nowhere is it clear which version to download. The version that worked for me was RX6 ELEMENTS.

    -

    Back just in time for its 15th anniversary, Vinyl is a plug-in that lets you simulate the dust, scratches, and warp ... Developer : Izotope | Type : Plugin | OS : Win 32Bit, Win 64Bit, Mac 32Bit, Mac 64Bit | Format : VST, VST3, AU, RTAS, AAX | Tags : EQ, Filter, Gain, Saturation, Vinyl SimulatorSoftware 1 - 4 of 4 .flex-sidebar display: flex;flex-direction: column;justify-content: space-between;flex-grow: 1;height: 100%; .flex-sidebar-item height:100%; .flex-sticky-boxposition: -webkit-sticky; position: sticky; top: 2rem;margin-bottom:20px; .flex-ad-boxbackground-color:#ccc; .flex-content-boxbackground-color:#333; freestar.config.enabled_slots.push( placementName: "looperman-com_300x250-A1", slotId: "looperman-com_300x250-A1" ); freestar.config.enabled_slots.push( placementName: "looperman-com_300x250-A2", slotId: "looperman-com_300x250-A2" ); freestar.config.enabled_slots.push( placementName: "looperman-com_300x250_1", slotId: "looperman-com_300x250_1" ); freestar.config.enabled_slots.push( placementName: "looperman-com_300x250_2", slotId: "looperman-com_300x250_2" ); Latest Free Software

  • Zither by DIXONBEATS new
  • Essence Plus by Ronan Fed new
  • Audio Wah by Nembrini Audio new
  • Transformant by Igorski new
  • Regrader by Igorski new
  • freestar.config.enabled_slots.push( placementName: "looperman-com_300x250_3", slotId: "looperman-com_300x250_3" ); freestar.config.enabled_slots.push( placementName: "looperman-com_300x250_4", slotId: "looperman-com_300x250_4" ); From The Blog
  • The Ultimate Black Friday 2022 Audio Deals List1
  • Black Friday 2022 Loopcloud 2 Months For $2 + Sublab For FREE0
  • Loopcloud Holiday Sale Up To 50% Off0
  • Massive Savings On FL Studio End Of Year Bundle2
  • FREE Waves LOFI SPACE Plugin - Black Friday 20210
  • Subscribe to our mailing listBe the first to hear about new posts and offers

    -

    Spitfire Audio have released a new sample library that offers the sound of a symphony orchestra resampled through 200 cassette players, and are offering it for free to anyone spending over $299£249 in the Black Weekend sale.

    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/bipin/image2story/gpt2_story_gen.py b/spaces/bipin/image2story/gpt2_story_gen.py deleted file mode 100644 index d6906f9cb18683e1bfb3a936ddcbbefd4eb3f02c..0000000000000000000000000000000000000000 --- a/spaces/bipin/image2story/gpt2_story_gen.py +++ /dev/null @@ -1,14 +0,0 @@ -from transformers import pipeline - - -def generate_story(image_caption, image, genre, n_stories): - - story_gen = pipeline( - "text-generation", - "pranavpsv/genre-story-generator-v2" - ) - - input = f" <{genre}> {image_caption}" - stories = '\n\n'.join([f"Story {i+1}\n{story_gen(input)[0]['generated_text'].strip(input)}" for i in range(n_stories)]) - - return stories diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/modeling/meta_arch/panoptic_fpn.py b/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/modeling/meta_arch/panoptic_fpn.py deleted file mode 100644 index b31e1c8dc06913d413ae829426e0625fdd5c2f38..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/modeling/meta_arch/panoptic_fpn.py +++ /dev/null @@ -1,269 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) Facebook, Inc. and its affiliates. - -import logging -from typing import Dict, List -import torch -from torch import nn - -from detectron2.config import configurable -from detectron2.structures import ImageList - -from ..postprocessing import detector_postprocess, sem_seg_postprocess -from .build import META_ARCH_REGISTRY -from .rcnn import GeneralizedRCNN -from .semantic_seg import build_sem_seg_head - -__all__ = ["PanopticFPN"] - - -@META_ARCH_REGISTRY.register() -class PanopticFPN(GeneralizedRCNN): - """ - Implement the paper :paper:`PanopticFPN`. - """ - - @configurable - def __init__( - self, - *, - sem_seg_head: nn.Module, - combine_overlap_thresh: float = 0.5, - combine_stuff_area_thresh: float = 4096, - combine_instances_score_thresh: float = 0.5, - **kwargs, - ): - """ - NOTE: this interface is experimental. - - Args: - sem_seg_head: a module for the semantic segmentation head. - combine_overlap_thresh: combine masks into one instances if - they have enough overlap - combine_stuff_area_thresh: ignore stuff areas smaller than this threshold - combine_instances_score_thresh: ignore instances whose score is - smaller than this threshold - - Other arguments are the same as :class:`GeneralizedRCNN`. - """ - super().__init__(**kwargs) - self.sem_seg_head = sem_seg_head - # options when combining instance & semantic outputs - self.combine_overlap_thresh = combine_overlap_thresh - self.combine_stuff_area_thresh = combine_stuff_area_thresh - self.combine_instances_score_thresh = combine_instances_score_thresh - - @classmethod - def from_config(cls, cfg): - ret = super().from_config(cfg) - ret.update( - { - "combine_overlap_thresh": cfg.MODEL.PANOPTIC_FPN.COMBINE.OVERLAP_THRESH, - "combine_stuff_area_thresh": cfg.MODEL.PANOPTIC_FPN.COMBINE.STUFF_AREA_LIMIT, - "combine_instances_score_thresh": cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH, # noqa - } - ) - ret["sem_seg_head"] = build_sem_seg_head(cfg, ret["backbone"].output_shape()) - logger = logging.getLogger(__name__) - if not cfg.MODEL.PANOPTIC_FPN.COMBINE.ENABLED: - logger.warning( - "PANOPTIC_FPN.COMBINED.ENABLED is no longer used. " - " model.inference(do_postprocess=) should be used to toggle postprocessing." - ) - if cfg.MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT != 1.0: - w = cfg.MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT - logger.warning( - "PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT should be replaced by weights on each ROI head." - ) - - def update_weight(x): - if isinstance(x, dict): - return {k: v * w for k, v in x.items()} - else: - return x * w - - roi_heads = ret["roi_heads"] - roi_heads.box_predictor.loss_weight = update_weight(roi_heads.box_predictor.loss_weight) - roi_heads.mask_head.loss_weight = update_weight(roi_heads.mask_head.loss_weight) - return ret - - def forward(self, batched_inputs): - """ - Args: - batched_inputs: a list, batched outputs of :class:`DatasetMapper`. - Each item in the list contains the inputs for one image. - - For now, each item in the list is a dict that contains: - - * "image": Tensor, image in (C, H, W) format. - * "instances": Instances - * "sem_seg": semantic segmentation ground truth. - * Other information that's included in the original dicts, such as: - "height", "width" (int): the output resolution of the model, used in inference. - See :meth:`postprocess` for details. - - Returns: - list[dict]: - each dict has the results for one image. The dict contains the following keys: - - * "instances": see :meth:`GeneralizedRCNN.forward` for its format. - * "sem_seg": see :meth:`SemanticSegmentor.forward` for its format. - * "panoptic_seg": See the return value of - :func:`combine_semantic_and_instance_outputs` for its format. - """ - if not self.training: - return self.inference(batched_inputs) - images = self.preprocess_image(batched_inputs) - features = self.backbone(images.tensor) - - assert "sem_seg" in batched_inputs[0] - gt_sem_seg = [x["sem_seg"].to(self.device) for x in batched_inputs] - gt_sem_seg = ImageList.from_tensors( - gt_sem_seg, - self.backbone.size_divisibility, - self.sem_seg_head.ignore_value, - self.backbone.padding_constraints, - ).tensor - sem_seg_results, sem_seg_losses = self.sem_seg_head(features, gt_sem_seg) - - gt_instances = [x["instances"].to(self.device) for x in batched_inputs] - proposals, proposal_losses = self.proposal_generator(images, features, gt_instances) - detector_results, detector_losses = self.roi_heads( - images, features, proposals, gt_instances - ) - - losses = sem_seg_losses - losses.update(proposal_losses) - losses.update(detector_losses) - return losses - - def inference(self, batched_inputs: List[Dict[str, torch.Tensor]], do_postprocess: bool = True): - """ - Run inference on the given inputs. - - Args: - batched_inputs (list[dict]): same as in :meth:`forward` - do_postprocess (bool): whether to apply post-processing on the outputs. - - Returns: - When do_postprocess=True, see docs in :meth:`forward`. - Otherwise, returns a (list[Instances], list[Tensor]) that contains - the raw detector outputs, and raw semantic segmentation outputs. - """ - images = self.preprocess_image(batched_inputs) - features = self.backbone(images.tensor) - sem_seg_results, sem_seg_losses = self.sem_seg_head(features, None) - proposals, _ = self.proposal_generator(images, features, None) - detector_results, _ = self.roi_heads(images, features, proposals, None) - - if do_postprocess: - processed_results = [] - for sem_seg_result, detector_result, input_per_image, image_size in zip( - sem_seg_results, detector_results, batched_inputs, images.image_sizes - ): - height = input_per_image.get("height", image_size[0]) - width = input_per_image.get("width", image_size[1]) - sem_seg_r = sem_seg_postprocess(sem_seg_result, image_size, height, width) - detector_r = detector_postprocess(detector_result, height, width) - - processed_results.append({"sem_seg": sem_seg_r, "instances": detector_r}) - - panoptic_r = combine_semantic_and_instance_outputs( - detector_r, - sem_seg_r.argmax(dim=0), - self.combine_overlap_thresh, - self.combine_stuff_area_thresh, - self.combine_instances_score_thresh, - ) - processed_results[-1]["panoptic_seg"] = panoptic_r - return processed_results - else: - return detector_results, sem_seg_results - - -def combine_semantic_and_instance_outputs( - instance_results, - semantic_results, - overlap_threshold, - stuff_area_thresh, - instances_score_thresh, -): - """ - Implement a simple combining logic following - "combine_semantic_and_instance_predictions.py" in panopticapi - to produce panoptic segmentation outputs. - - Args: - instance_results: output of :func:`detector_postprocess`. - semantic_results: an (H, W) tensor, each element is the contiguous semantic - category id - - Returns: - panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment. - segments_info (list[dict]): Describe each segment in `panoptic_seg`. - Each dict contains keys "id", "category_id", "isthing". - """ - panoptic_seg = torch.zeros_like(semantic_results, dtype=torch.int32) - - # sort instance outputs by scores - sorted_inds = torch.argsort(-instance_results.scores) - - current_segment_id = 0 - segments_info = [] - - instance_masks = instance_results.pred_masks.to(dtype=torch.bool, device=panoptic_seg.device) - - # Add instances one-by-one, check for overlaps with existing ones - for inst_id in sorted_inds: - score = instance_results.scores[inst_id].item() - if score < instances_score_thresh: - break - mask = instance_masks[inst_id] # H,W - mask_area = mask.sum().item() - - if mask_area == 0: - continue - - intersect = (mask > 0) & (panoptic_seg > 0) - intersect_area = intersect.sum().item() - - if intersect_area * 1.0 / mask_area > overlap_threshold: - continue - - if intersect_area > 0: - mask = mask & (panoptic_seg == 0) - - current_segment_id += 1 - panoptic_seg[mask] = current_segment_id - segments_info.append( - { - "id": current_segment_id, - "isthing": True, - "score": score, - "category_id": instance_results.pred_classes[inst_id].item(), - "instance_id": inst_id.item(), - } - ) - - # Add semantic results to remaining empty areas - semantic_labels = torch.unique(semantic_results).cpu().tolist() - for semantic_label in semantic_labels: - if semantic_label == 0: # 0 is a special "thing" class - continue - mask = (semantic_results == semantic_label) & (panoptic_seg == 0) - mask_area = mask.sum().item() - if mask_area < stuff_area_thresh: - continue - - current_segment_id += 1 - panoptic_seg[mask] = current_segment_id - segments_info.append( - { - "id": current_segment_id, - "isthing": False, - "category_id": semantic_label, - "area": mask_area, - } - ) - - return panoptic_seg, segments_info diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/docs/tutorials/data_loading.md b/spaces/brjathu/HMR2.0/vendor/detectron2/docs/tutorials/data_loading.md deleted file mode 100644 index 1d2769fc513abb0981a140f3a6b6432538704261..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/docs/tutorials/data_loading.md +++ /dev/null @@ -1,95 +0,0 @@ - -# Dataloader - -Dataloader is the component that provides data to models. -A dataloader usually (but not necessarily) takes raw information from [datasets](./datasets.md), -and process them into a format needed by the model. - -## How the Existing Dataloader Works - -Detectron2 contains a builtin data loading pipeline. -It's good to understand how it works, in case you need to write a custom one. - -Detectron2 provides two functions -[build_detection_{train,test}_loader](../modules/data.html#detectron2.data.build_detection_train_loader) -that create a default data loader from a given config. -Here is how `build_detection_{train,test}_loader` work: - -1. It takes the name of a registered dataset (e.g., "coco_2017_train") and loads a `list[dict]` representing the dataset items - in a lightweight format. These dataset items are not yet ready to be used by the model (e.g., images are - not loaded into memory, random augmentations have not been applied, etc.). - Details about the dataset format and dataset registration can be found in - [datasets](./datasets.md). -2. Each dict in this list is mapped by a function ("mapper"): - * Users can customize this mapping function by specifying the "mapper" argument in - `build_detection_{train,test}_loader`. The default mapper is [DatasetMapper](../modules/data.html#detectron2.data.DatasetMapper). - * The output format of the mapper can be arbitrary, as long as it is accepted by the consumer of this data loader (usually the model). - The outputs of the default mapper, after batching, follow the default model input format documented in - [Use Models](./models.html#model-input-format). - * The role of the mapper is to transform the lightweight representation of a dataset item into a format - that is ready for the model to consume (including, e.g., read images, perform random data augmentation and convert to torch Tensors). - If you would like to perform custom transformations to data, you often want a custom mapper. -3. The outputs of the mapper are batched (simply into a list). -4. This batched data is the output of the data loader. Typically, it's also the input of - `model.forward()`. - - -## Write a Custom Dataloader - -Using a different "mapper" with `build_detection_{train,test}_loader(mapper=)` works for most use cases -of custom data loading. -For example, if you want to resize all images to a fixed size for training, use: - -```python -import detectron2.data.transforms as T -from detectron2.data import DatasetMapper # the default mapper -dataloader = build_detection_train_loader(cfg, - mapper=DatasetMapper(cfg, is_train=True, augmentations=[ - T.Resize((800, 800)) - ])) -# use this dataloader instead of the default -``` -If the arguments of the default [DatasetMapper](../modules/data.html#detectron2.data.DatasetMapper) -does not provide what you need, you may write a custom mapper function and use it instead, e.g.: - -```python -from detectron2.data import detection_utils as utils - # Show how to implement a minimal mapper, similar to the default DatasetMapper -def mapper(dataset_dict): - dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below - # can use other ways to read image - image = utils.read_image(dataset_dict["file_name"], format="BGR") - # See "Data Augmentation" tutorial for details usage - auginput = T.AugInput(image) - transform = T.Resize((800, 800))(auginput) - image = torch.from_numpy(auginput.image.transpose(2, 0, 1)) - annos = [ - utils.transform_instance_annotations(annotation, [transform], image.shape[1:]) - for annotation in dataset_dict.pop("annotations") - ] - return { - # create the format that the model expects - "image": image, - "instances": utils.annotations_to_instances(annos, image.shape[1:]) - } -dataloader = build_detection_train_loader(cfg, mapper=mapper) -``` - -If you want to change not only the mapper (e.g., in order to implement different sampling or batching logic), -`build_detection_train_loader` won't work and you will need to write a different data loader. -The data loader is simply a -python iterator that produces [the format](./models.md) that the model accepts. -You can implement it using any tools you like. - -No matter what to implement, it's recommended to -check out [API documentation of detectron2.data](../modules/data) to learn more about the APIs of -these functions. - -## Use a Custom Dataloader - -If you use [DefaultTrainer](../modules/engine.html#detectron2.engine.defaults.DefaultTrainer), -you can overwrite its `build_{train,test}_loader` method to use your own dataloader. -See the [deeplab dataloader](../../projects/DeepLab/train_net.py) -for an example. - -If you write your own training loop, you can plug in your data loader easily. diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/projects/Panoptic-DeepLab/panoptic_deeplab/target_generator.py b/spaces/brjathu/HMR2.0/vendor/detectron2/projects/Panoptic-DeepLab/panoptic_deeplab/target_generator.py deleted file mode 100644 index a575c672494327e0e13c51de04ceca0f2bddc102..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/projects/Panoptic-DeepLab/panoptic_deeplab/target_generator.py +++ /dev/null @@ -1,155 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# Reference: https://github.com/bowenc0221/panoptic-deeplab/blob/aa934324b55a34ce95fea143aea1cb7a6dbe04bd/segmentation/data/transforms/target_transforms.py#L11 # noqa -import numpy as np -import torch - - -class PanopticDeepLabTargetGenerator(object): - """ - Generates training targets for Panoptic-DeepLab. - """ - - def __init__( - self, - ignore_label, - thing_ids, - sigma=8, - ignore_stuff_in_offset=False, - small_instance_area=0, - small_instance_weight=1, - ignore_crowd_in_semantic=False, - ): - """ - Args: - ignore_label: Integer, the ignore label for semantic segmentation. - thing_ids: Set, a set of ids from contiguous category ids belonging - to thing categories. - sigma: the sigma for Gaussian kernel. - ignore_stuff_in_offset: Boolean, whether to ignore stuff region when - training the offset branch. - small_instance_area: Integer, indicates largest area for small instances. - small_instance_weight: Integer, indicates semantic loss weights for - small instances. - ignore_crowd_in_semantic: Boolean, whether to ignore crowd region in - semantic segmentation branch, crowd region is ignored in the original - TensorFlow implementation. - """ - self.ignore_label = ignore_label - self.thing_ids = set(thing_ids) - self.ignore_stuff_in_offset = ignore_stuff_in_offset - self.small_instance_area = small_instance_area - self.small_instance_weight = small_instance_weight - self.ignore_crowd_in_semantic = ignore_crowd_in_semantic - - # Generate the default Gaussian image for each center - self.sigma = sigma - size = 6 * sigma + 3 - x = np.arange(0, size, 1, float) - y = x[:, np.newaxis] - x0, y0 = 3 * sigma + 1, 3 * sigma + 1 - self.g = np.exp(-((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma**2)) - - def __call__(self, panoptic, segments_info): - """Generates the training target. - reference: https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/preparation/createPanopticImgs.py # noqa - reference: https://github.com/facebookresearch/detectron2/blob/main/datasets/prepare_panoptic_fpn.py#L18 # noqa - - Args: - panoptic: numpy.array, panoptic label, we assume it is already - converted from rgb image by panopticapi.utils.rgb2id. - segments_info (list[dict]): see detectron2 documentation of "Use Custom Datasets". - - Returns: - A dictionary with fields: - - sem_seg: Tensor, semantic label, shape=(H, W). - - center: Tensor, center heatmap, shape=(H, W). - - center_points: List, center coordinates, with tuple - (y-coord, x-coord). - - offset: Tensor, offset, shape=(2, H, W), first dim is - (offset_y, offset_x). - - sem_seg_weights: Tensor, loss weight for semantic prediction, - shape=(H, W). - - center_weights: Tensor, ignore region of center prediction, - shape=(H, W), used as weights for center regression 0 is - ignore, 1 is has instance. Multiply this mask to loss. - - offset_weights: Tensor, ignore region of offset prediction, - shape=(H, W), used as weights for offset regression 0 is - ignore, 1 is has instance. Multiply this mask to loss. - """ - height, width = panoptic.shape[0], panoptic.shape[1] - semantic = np.zeros_like(panoptic, dtype=np.uint8) + self.ignore_label - center = np.zeros((height, width), dtype=np.float32) - center_pts = [] - offset = np.zeros((2, height, width), dtype=np.float32) - y_coord, x_coord = np.meshgrid( - np.arange(height, dtype=np.float32), np.arange(width, dtype=np.float32), indexing="ij" - ) - # Generate pixel-wise loss weights - semantic_weights = np.ones_like(panoptic, dtype=np.uint8) - # 0: ignore, 1: has instance - # three conditions for a region to be ignored for instance branches: - # (1) It is labeled as `ignore_label` - # (2) It is crowd region (iscrowd=1) - # (3) (Optional) It is stuff region (for offset branch) - center_weights = np.zeros_like(panoptic, dtype=np.uint8) - offset_weights = np.zeros_like(panoptic, dtype=np.uint8) - for seg in segments_info: - cat_id = seg["category_id"] - if not (self.ignore_crowd_in_semantic and seg["iscrowd"]): - semantic[panoptic == seg["id"]] = cat_id - if not seg["iscrowd"]: - # Ignored regions are not in `segments_info`. - # Handle crowd region. - center_weights[panoptic == seg["id"]] = 1 - if not self.ignore_stuff_in_offset or cat_id in self.thing_ids: - offset_weights[panoptic == seg["id"]] = 1 - if cat_id in self.thing_ids: - # find instance center - mask_index = np.where(panoptic == seg["id"]) - if len(mask_index[0]) == 0: - # the instance is completely cropped - continue - - # Find instance area - ins_area = len(mask_index[0]) - if ins_area < self.small_instance_area: - semantic_weights[panoptic == seg["id"]] = self.small_instance_weight - - center_y, center_x = np.mean(mask_index[0]), np.mean(mask_index[1]) - center_pts.append([center_y, center_x]) - - # generate center heatmap - y, x = int(round(center_y)), int(round(center_x)) - sigma = self.sigma - # upper left - ul = int(np.round(x - 3 * sigma - 1)), int(np.round(y - 3 * sigma - 1)) - # bottom right - br = int(np.round(x + 3 * sigma + 2)), int(np.round(y + 3 * sigma + 2)) - - # start and end indices in default Gaussian image - gaussian_x0, gaussian_x1 = max(0, -ul[0]), min(br[0], width) - ul[0] - gaussian_y0, gaussian_y1 = max(0, -ul[1]), min(br[1], height) - ul[1] - - # start and end indices in center heatmap image - center_x0, center_x1 = max(0, ul[0]), min(br[0], width) - center_y0, center_y1 = max(0, ul[1]), min(br[1], height) - center[center_y0:center_y1, center_x0:center_x1] = np.maximum( - center[center_y0:center_y1, center_x0:center_x1], - self.g[gaussian_y0:gaussian_y1, gaussian_x0:gaussian_x1], - ) - - # generate offset (2, h, w) -> (y-dir, x-dir) - offset[0][mask_index] = center_y - y_coord[mask_index] - offset[1][mask_index] = center_x - x_coord[mask_index] - - center_weights = center_weights[None] - offset_weights = offset_weights[None] - return dict( - sem_seg=torch.as_tensor(semantic.astype("long")), - center=torch.as_tensor(center.astype(np.float32)), - center_points=center_pts, - offset=torch.as_tensor(offset.astype(np.float32)), - sem_seg_weights=torch.as_tensor(semantic_weights.astype(np.float32)), - center_weights=torch.as_tensor(center_weights.astype(np.float32)), - offset_weights=torch.as_tensor(offset_weights.astype(np.float32)), - ) diff --git a/spaces/cadige/04-Gradio-SOTA/app (1).py b/spaces/cadige/04-Gradio-SOTA/app (1).py deleted file mode 100644 index c1cd92499cf1c7d2a91b4dc226bf2d558ff67661..0000000000000000000000000000000000000000 --- a/spaces/cadige/04-Gradio-SOTA/app (1).py +++ /dev/null @@ -1,51 +0,0 @@ -import gradio as gr -from qasrl_model_pipeline import QASRL_Pipeline - -models = ["kleinay/qanom-seq2seq-model-baseline", - "kleinay/qanom-seq2seq-model-joint"] -pipelines = {model: QASRL_Pipeline(model) for model in models} - - -description = f"""Using Seq2Seq T5 model which takes a sequence of items and outputs another sequence this model generates Questions and Answers (QA) with focus on Semantic Role Labeling (SRL)""" -title="Seq2Seq T5 Questions and Answers (QA) with Semantic Role Labeling (SRL)" -examples = [[models[0], "In March and April the patient

    had two falls. One was related to asthma, heart palpitations. The second was due to syncope and post covid vaccination dizziness during exercise. The patient is now getting an EKG. Former EKG had shown that there was a bundle branch block. Patient had some uncontrolled immune system reactions like anaphylaxis and shortness of breath.", True, "fall"], - [models[1], "In March and April the patient had two falls. One was related to asthma, heart palpitations. The second was due to syncope and post covid vaccination dizziness during exercise. The patient is now getting an EKG. Former EKG had shown that there was a bundle branch block. Patient had some uncontrolled immune system reactions

    like anaphylaxis and shortness of breath.", True, "reactions"], - [models[0], "In March and April the patient had two falls. One was related

    to asthma, heart palpitations. The second was due to syncope and post covid vaccination dizziness during exercise. The patient is now getting an EKG. Former EKG had shown that there was a bundle branch block. Patient had some uncontrolled immune system reactions like anaphylaxis and shortness of breath.", True, "relate"], - [models[1], "In March and April the patient

    had two falls. One was related to asthma, heart palpitations. The second was due to syncope and post covid vaccination dizziness during exercise. The patient is now getting an EKG. Former EKG had shown that there was a bundle branch block. Patient had some uncontrolled immune system reactions like anaphylaxis and shortness of breath.", False, "fall"]] - -input_sent_box_label = "Insert sentence here. Mark the predicate by adding the token '

    ' before it." -verb_form_inp_placeholder = "e.g. 'decide' for the nominalization 'decision', 'teach' for 'teacher', etc." -links = """

    -QASRL Website | Model Repo at Huggingface Hub -

    """ -def call(model_name, sentence, is_nominal, verb_form): - predicate_marker="

    " - if predicate_marker not in sentence: - raise ValueError("You must highlight one word of the sentence as a predicate using preceding '

    '.") - - if not verb_form: - if is_nominal: - raise ValueError("You should provide the verbal form of the nominalization") - - toks = sentence.split(" ") - pred_idx = toks.index(predicate_marker) - predicate = toks(pred_idx+1) - verb_form=predicate - pipeline = pipelines[model_name] - pipe_out = pipeline([sentence], - predicate_marker=predicate_marker, - predicate_type="nominal" if is_nominal else "verbal", - verb_form=verb_form)[0] - return pipe_out["QAs"], pipe_out["generated_text"] -iface = gr.Interface(fn=call, - inputs=[gr.inputs.Radio(choices=models, default=models[0], label="Model"), - gr.inputs.Textbox(placeholder=input_sent_box_label, label="Sentence", lines=4), - gr.inputs.Checkbox(default=True, label="Is Nominalization?"), - gr.inputs.Textbox(placeholder=verb_form_inp_placeholder, label="Verbal form (for nominalizations)", default='')], - outputs=[gr.outputs.JSON(label="Model Output - QASRL"), gr.outputs.Textbox(label="Raw output sequence")], - title=title, - description=description, - article=links, - examples=examples ) - -iface.launch() \ No newline at end of file diff --git a/spaces/chansung/zero2story/interfaces/plot_gen_ui.py b/spaces/chansung/zero2story/interfaces/plot_gen_ui.py deleted file mode 100644 index 41157d8c0492b9b76027e472b6ad53c72f5b2373..0000000000000000000000000000000000000000 --- a/spaces/chansung/zero2story/interfaces/plot_gen_ui.py +++ /dev/null @@ -1,113 +0,0 @@ -import re -import gradio as gr -from interfaces import utils -from modules import get_llm_factory - -async def plot_gen( - temperature, - genre, place, mood, - side_char_enable1, side_char_enable2, side_char_enable3, - main_char_name, main_char_age, main_char_personality, main_char_job, - side_char_name1, side_char_age1, side_char_personality1, side_char_job1, - side_char_name2, side_char_age2, side_char_personality2, side_char_job2, - side_char_name3, side_char_age3, side_char_personality3, side_char_job3, - llm_type="PaLM" -): - factory = get_llm_factory(llm_type) - prompts = factory.create_prompt_manager().prompts - llm_service = factory.create_llm_service() - - side_char_prompt = utils.add_side_character( - [side_char_enable1, side_char_enable2, side_char_enable3], - [side_char_name1, side_char_name2, side_char_name3], - [side_char_job1, side_char_job2, side_char_job3], - [side_char_age1, side_char_age2, side_char_age3], - [side_char_personality1, side_char_personality2, side_char_personality3], - ) - prompt = prompts['plot_gen']['main_plot_gen'].format( - genre=genre, place=place, mood=mood, - main_char_name=main_char_name, - main_char_job=main_char_job, - main_char_age=main_char_age, - main_char_personality=main_char_personality, - side_char_placeholder=side_char_prompt, - ) - - print(f"generated prompt:\n{prompt}") - parameters = llm_service.make_params(mode="text", temperature=temperature, top_k=40, top_p=1.0, max_output_tokens=4096) - response_json = await utils.retry_until_valid_json(prompt, parameters=parameters) - - return ( - response_json['title'], - f"## {response_json['title']}", - response_json['outline']['rising action'], - response_json['outline']['crisis'], - response_json['outline']['climax'], - response_json['outline']['falling action'], - response_json['outline']['denouement'], - ) - - -async def first_story_gen( - title, - rising_action, crisis, climax, falling_action, denouement, - genre, place, mood, - side_char_enable1, side_char_enable2, side_char_enable3, - main_char_name, main_char_age, main_char_personality, main_char_job, - side_char_name1, side_char_age1, side_char_personality1, side_char_job1, - side_char_name2, side_char_age2, side_char_personality2, side_char_job2, - side_char_name3, side_char_age3, side_char_personality3, side_char_job3, - cursors, cur_cursor, - llm_type="PaLM" -): - factory = get_llm_factory(llm_type) - prompts = factory.create_prompt_manager().prompts - llm_service = factory.create_llm_service() - - side_char_prompt = utils.add_side_character( - [side_char_enable1, side_char_enable2, side_char_enable3], - [side_char_name1, side_char_name2, side_char_name3], - [side_char_job1, side_char_job2, side_char_job3], - [side_char_age1, side_char_age2, side_char_age3], - [side_char_personality1, side_char_personality2, side_char_personality3], - ) - prompt = prompts['plot_gen']['first_story_gen'].format( - genre=genre, place=place, mood=mood, - main_char_name=main_char_name, - main_char_job=main_char_job, - main_char_age=main_char_age, - main_char_personality=main_char_personality, - side_char_placeholder=side_char_prompt, - title=title, - rising_action=rising_action, - crisis=crisis, - climax=climax, - falling_action=falling_action, - denouement=denouement, - ) - - print(f"generated prompt:\n{prompt}") - parameters = llm_service.make_params(mode="text", temperature=1.0, top_k=40, top_p=1.0, max_output_tokens=4096) - response_json = await utils.retry_until_valid_json(prompt, parameters=parameters) - - chapter_title = response_json["chapter_title"] - pattern = r"Chapter\s+\d+\s*[:.]" - chapter_title = re.sub(pattern, "", chapter_title) - - cursors.append({ - "title": chapter_title, - "plot_type": "rising action", - "story": "\n\n".join(response_json["paragraphs"]) - }) - - return ( - f"### {chapter_title} (\"rising action\")", - "\n\n".join(response_json["paragraphs"]), - cursors, - cur_cursor, - gr.update(interactive=True), - gr.update(interactive=True), - gr.update(value=response_json["actions"][0], interactive=True), - gr.update(value=response_json["actions"][1], interactive=True), - gr.update(value=response_json["actions"][2], interactive=True), - ) \ No newline at end of file diff --git a/spaces/chasetank/manual_assistant/InnovationHub/llm/chain.py b/spaces/chasetank/manual_assistant/InnovationHub/llm/chain.py deleted file mode 100644 index e3ef690848081e148942f9cc6a405fb9243d2b15..0000000000000000000000000000000000000000 --- a/spaces/chasetank/manual_assistant/InnovationHub/llm/chain.py +++ /dev/null @@ -1,127 +0,0 @@ -import gradio -from langchain.embeddings import HuggingFaceBgeEmbeddings -from langchain.vectorstores import FAISS -from langchain import OpenAI, ConversationChain, LLMChain, PromptTemplate -from langchain.chains.conversation.memory import ConversationBufferMemory - -def chat(question, vehicle, k=10, temperature=0.01): - chatgpt_chain = create_chatgpt_chain(temperature=temperature) - response = ask_question(question=question, vehicle=vehicle, k=k, embeddings=model_norm, chatgpt_chain=chatgpt_chain) - return response - -def create_chatgpt_chain(temperature): - template = """ - {chat_history} - Human: {question} - AI: - """ - prompt_template = PromptTemplate(input_variables=["chat_history", "question"], template=template) - return LLMChain(llm=OpenAI(temperature=temperature,model_name="gpt-3.5-turbo"),prompt=prompt_template,verbose=True,memory=ConversationBufferMemory(memory_key="chat_history")) - -def ask_question(question, vehicle, k, embeddings, chatgpt_chain): - index = FAISS.load_local(folder_path=db_paths[vehicle], embeddings=embeddings) - prompt = get_prompt(question=question, vehicle=vehicle, k=k) - response = chatgpt_chain.run(question=prompt) - return response - -def get_prompt(question, vehicle, k): - prompt = f""" - I need information from my {vehicle} manual. - I will provide an excerpt from the manual. Use the excerpt and nothing else to answer the question. - You must refer to the excerpt as "{vehicle} Manual" in your response. Here is the excerpt: - """ - - index = FAISS.load_local(folder_path=db_paths[vehicle], embeddings=model_norm) - similar_docs = index.similarity_search(query=question, k=k) - context = [] - for d in similar_docs: - content = d.page_content - context.append(content) - - user_input = prompt + '\n[EXCERPT]' + '\n'.join(context[:k]) + '\nQuestion: ' + question - return user_input - -db_paths = { - "2023 AMG C-Coupe-Cab": "data/amg_c_coupe_cab", - "2023 AMG C-Sedan": "data/amg_c_sedan", - "2023 AMG E-Coupe-Cab": "data/amg_e_coupe_cab", - "2023 AMG E-Sedan_wagon": "data/amg_e_sedan_wagon", - "2023 AMG_EQE-Sedan": "data/amg_eqe_sedan", - "2023 AMG_GLE-suv": "data/amg_gle_suv", - "2023 AMG_GLS SUV": "data/amg_gls_suv", - "2023 C-Cab": "data/c_cab", - "2023 C-Coupe": "data/c_coupe", - "2023 C-Sedan": "data/c_sedan", - "2023 CLA": "data/cla", - "2023 E-Cab": "data/e_cab", - "2023 E-Coupe": "data/e_coupe", - "2023 E-Sedan": "data/e_sedan", - "2023 E-wagon": "data/e_wagon", - "2023 eqb SUV": "data/eqb_suv", - "2023 EQE-Sedan": "data/eqe_sedan", - "2023 EQS_Sedan": "data/eqs_sedan", - "2023 EQS SUV": "data/eqs_suv", - "2023 GLA": "data/gla", - "2023 GLB": "data/glb", - "2023 GLC-Coupe": "data/glc_coupe", - "2023 GLE-Coupe": "data/gle_coupe", - "2023 GLE-suv": "data/gle_suv", - "2023 GLS SUV": "data/gls_suv" -} - -vehicle_options = [ - "2023 AMG C-Coupe-Cab", - "2023 AMG C-Sedan", - "2023 AMG E-Coupe-Cab", - "2023 AMG E-Sedan_wagon", - "2023 AMG_EQE-Sedan", - "2023 AMG_GLE-suv", - "2023 AMG_GLS SUV", - "2023 C-Cab", - "2023 C-Coupe", - "2023 C-Sedan", - "2023 CLA", - "2023 E-Cab", - "2023 E-Coupe", - "2023 E-Sedan", - "2023 E-wagon", - "2023 eqb SUV", - "2023 EQE-Sedan", - "2023 EQS SUV", - "2023 EQS_Sedan", - "2023 GLA", - "2023 GLB", - "2023 GLC-Coupe", - "2023 GLE-Coupe", - "2023 GLE-suv", - "2023 GLS SUV", -] - -model_name = "BAAI/bge-large-en" -model_kwargs = {'device': 'cpu'} -encode_kwargs = {'normalize_embeddings': True} # set True to compute cosine similarity -model_norm = HuggingFaceBgeEmbeddings( - model_name=model_name, - model_kwargs=model_kwargs, - encode_kwargs=encode_kwargs -) - -def start_ui(): - chatbot_interface = gradio.Interface( - fn=chat, - inputs=["text", - gradio.inputs.Dropdown(vehicle_options, label="Select Mercedes-Benz Owner's Manual") - #gradio.inputs.Slider(minimum=1, maximum=10, step=1, label="k") - ], - outputs="text", - title="Mercedes-Benz Owner's Manual", - description="Ask a question and get answers from Mercedes-Benz Owner's Manual.Disclaimer: THIS IS NOT OFFICIAL AND MAY NOT BE AVAILABLE ALL THE TIME. ALWAYS LOOK AT THE OFFICIAL DOCUMENTATION at https://www.mbusa.com/en/owners/manuals", - examples=[["What are the different features of the dashboard console?", "2023 S-Class", 10, 0.01], - ["What is flacon? Which page has that information? Show me all the exact content from that page", "2023 S-Class", 10, 0.01], - ["What is hyperscreen?", "2023 EQS", 10, 0.01], - ["Where can I find my vin?", "2023 EQS", 10, 0.01], - ["Does it take more than 30 minutes to charge? Which page has that information? Show me all the exact content from that page", "2023 EQE", 10, 0.01]], - article = '

    visitor badge
    ' - ) - - chatbot_interface.launch() \ No newline at end of file diff --git a/spaces/chendl/compositional_test/transformers/examples/research_projects/mm-imdb/README.md b/spaces/chendl/compositional_test/transformers/examples/research_projects/mm-imdb/README.md deleted file mode 100644 index 7cfc2a7487ba71fc2dc7c49c4b0380935b775d07..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/examples/research_projects/mm-imdb/README.md +++ /dev/null @@ -1,23 +0,0 @@ -## MM-IMDb - -Based on the script [`run_mmimdb.py`](https://github.com/huggingface/transformers/blob/main/examples/research_projects/mm-imdb/run_mmimdb.py). - -[MM-IMDb](http://lisi1.unal.edu.co/mmimdb/) is a Multimodal dataset with around 26,000 movies including images, plots and other metadata. - -### Training on MM-IMDb - -``` -python run_mmimdb.py \ - --data_dir /path/to/mmimdb/dataset/ \ - --model_type bert \ - --model_name_or_path bert-base-uncased \ - --output_dir /path/to/save/dir/ \ - --do_train \ - --do_eval \ - --max_seq_len 512 \ - --gradient_accumulation_steps 20 \ - --num_image_embeds 3 \ - --num_train_epochs 100 \ - --patience 5 -``` - diff --git a/spaces/chikoto/Umamusume-DeBERTa-VITS2-TTS-JP/text/english.py b/spaces/chikoto/Umamusume-DeBERTa-VITS2-TTS-JP/text/english.py deleted file mode 100644 index 0f9339c9ed771dab5136978eaaab194ec3fe2395..0000000000000000000000000000000000000000 --- a/spaces/chikoto/Umamusume-DeBERTa-VITS2-TTS-JP/text/english.py +++ /dev/null @@ -1,214 +0,0 @@ -import pickle -import os -import re -from g2p_en import G2p - -from text import symbols - -current_file_path = os.path.dirname(__file__) -CMU_DICT_PATH = os.path.join(current_file_path, "cmudict.rep") -CACHE_PATH = os.path.join(current_file_path, "cmudict_cache.pickle") -_g2p = G2p() - -arpa = { - "AH0", - "S", - "AH1", - "EY2", - "AE2", - "EH0", - "OW2", - "UH0", - "NG", - "B", - "G", - "AY0", - "M", - "AA0", - "F", - "AO0", - "ER2", - "UH1", - "IY1", - "AH2", - "DH", - "IY0", - "EY1", - "IH0", - "K", - "N", - "W", - "IY2", - "T", - "AA1", - "ER1", - "EH2", - "OY0", - "UH2", - "UW1", - "Z", - "AW2", - "AW1", - "V", - "UW2", - "AA2", - "ER", - "AW0", - "UW0", - "R", - "OW1", - "EH1", - "ZH", - "AE0", - "IH2", - "IH", - "Y", - "JH", - "P", - "AY1", - "EY0", - "OY2", - "TH", - "HH", - "D", - "ER0", - "CH", - "AO1", - "AE1", - "AO2", - "OY1", - "AY2", - "IH1", - "OW0", - "L", - "SH", -} - - -def post_replace_ph(ph): - rep_map = { - ":": ",", - ";": ",", - ",": ",", - "。": ".", - "!": "!", - "?": "?", - "\n": ".", - "·": ",", - "、": ",", - "...": "…", - "v": "V", - } - if ph in rep_map.keys(): - ph = rep_map[ph] - if ph in symbols: - return ph - if ph not in symbols: - ph = "UNK" - return ph - - -def read_dict(): - g2p_dict = {} - start_line = 49 - with open(CMU_DICT_PATH) as f: - line = f.readline() - line_index = 1 - while line: - if line_index >= start_line: - line = line.strip() - word_split = line.split(" ") - word = word_split[0] - - syllable_split = word_split[1].split(" - ") - g2p_dict[word] = [] - for syllable in syllable_split: - phone_split = syllable.split(" ") - g2p_dict[word].append(phone_split) - - line_index = line_index + 1 - line = f.readline() - - return g2p_dict - - -def cache_dict(g2p_dict, file_path): - with open(file_path, "wb") as pickle_file: - pickle.dump(g2p_dict, pickle_file) - - -def get_dict(): - if os.path.exists(CACHE_PATH): - with open(CACHE_PATH, "rb") as pickle_file: - g2p_dict = pickle.load(pickle_file) - else: - g2p_dict = read_dict() - cache_dict(g2p_dict, CACHE_PATH) - - return g2p_dict - - -eng_dict = get_dict() - - -def refine_ph(phn): - tone = 0 - if re.search(r"\d$", phn): - tone = int(phn[-1]) + 1 - phn = phn[:-1] - return phn.lower(), tone - - -def refine_syllables(syllables): - tones = [] - phonemes = [] - for phn_list in syllables: - for i in range(len(phn_list)): - phn = phn_list[i] - phn, tone = refine_ph(phn) - phonemes.append(phn) - tones.append(tone) - return phonemes, tones - - -def text_normalize(text): - # todo: eng text normalize - return text - - -def g2p(text): - phones = [] - tones = [] - words = re.split(r"([,;.\-\?\!\s+])", text) - for w in words: - if w.upper() in eng_dict: - phns, tns = refine_syllables(eng_dict[w.upper()]) - phones += phns - tones += tns - else: - phone_list = list(filter(lambda p: p != " ", _g2p(w))) - for ph in phone_list: - if ph in arpa: - ph, tn = refine_ph(ph) - phones.append(ph) - tones.append(tn) - else: - phones.append(ph) - tones.append(0) - # todo: implement word2ph - word2ph = [1 for i in phones] - - phones = [post_replace_ph(i) for i in phones] - return phones, tones, word2ph - - -if __name__ == "__main__": - # print(get_dict()) - # print(eng_word_to_phoneme("hello")) - print(g2p("In this paper, we propose 1 DSPGAN, a GAN-based universal vocoder.")) - # all_phones = set() - # for k, syllables in eng_dict.items(): - # for group in syllables: - # for ph in group: - # all_phones.add(ph) - # print(all_phones) diff --git a/spaces/chilge/taoli/mel_processing.py b/spaces/chilge/taoli/mel_processing.py deleted file mode 100644 index 99c5b35beb83f3b288af0fac5b49ebf2c69f062c..0000000000000000000000000000000000000000 --- a/spaces/chilge/taoli/mel_processing.py +++ /dev/null @@ -1,112 +0,0 @@ -import math -import os -import random -import torch -from torch import nn -import torch.nn.functional as F -import torch.utils.data -import numpy as np -import librosa -import librosa.util as librosa_util -from librosa.util import normalize, pad_center, tiny -from scipy.signal import get_window -from scipy.io.wavfile import read -from librosa.filters import mel as librosa_mel_fn - -MAX_WAV_VALUE = 32768.0 - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - """ - PARAMS - ------ - C: compression factor - """ - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - """ - PARAMS - ------ - C: compression factor used to compress - """ - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - output = dynamic_range_compression_torch(magnitudes) - return output - - -def spectral_de_normalize_torch(magnitudes): - output = dynamic_range_decompression_torch(magnitudes) - return output - - -mel_basis = {} -hann_window = {} - - -def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - return spec - - -def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): - global mel_basis - dtype_device = str(spec.dtype) + '_' + str(spec.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device) - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - return spec - - -def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global mel_basis, hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device) - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - - return spec diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/anyio/_core/_typedattr.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/anyio/_core/_typedattr.py deleted file mode 100644 index bf9202eeab91d263f4badade4601efd111b91523..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/anyio/_core/_typedattr.py +++ /dev/null @@ -1,83 +0,0 @@ -from __future__ import annotations - -import sys -from typing import Any, Callable, Mapping, TypeVar, overload - -from ._exceptions import TypedAttributeLookupError - -if sys.version_info >= (3, 8): - from typing import final -else: - from typing_extensions import final - -T_Attr = TypeVar("T_Attr") -T_Default = TypeVar("T_Default") -undefined = object() - - -def typed_attribute() -> Any: - """Return a unique object, used to mark typed attributes.""" - return object() - - -class TypedAttributeSet: - """ - Superclass for typed attribute collections. - - Checks that every public attribute of every subclass has a type annotation. - """ - - def __init_subclass__(cls) -> None: - annotations: dict[str, Any] = getattr(cls, "__annotations__", {}) - for attrname in dir(cls): - if not attrname.startswith("_") and attrname not in annotations: - raise TypeError( - f"Attribute {attrname!r} is missing its type annotation" - ) - - super().__init_subclass__() - - -class TypedAttributeProvider: - """Base class for classes that wish to provide typed extra attributes.""" - - @property - def extra_attributes(self) -> Mapping[T_Attr, Callable[[], T_Attr]]: - """ - A mapping of the extra attributes to callables that return the corresponding values. - - If the provider wraps another provider, the attributes from that wrapper should also be - included in the returned mapping (but the wrapper may override the callables from the - wrapped instance). - - """ - return {} - - @overload - def extra(self, attribute: T_Attr) -> T_Attr: - ... - - @overload - def extra(self, attribute: T_Attr, default: T_Default) -> T_Attr | T_Default: - ... - - @final - def extra(self, attribute: Any, default: object = undefined) -> object: - """ - extra(attribute, default=undefined) - - Return the value of the given typed extra attribute. - - :param attribute: the attribute (member of a :class:`~TypedAttributeSet`) to look for - :param default: the value that should be returned if no value is found for the attribute - :raises ~anyio.TypedAttributeLookupError: if the search failed and no default value was - given - - """ - try: - return self.extra_attributes[attribute]() - except KeyError: - if default is undefined: - raise TypedAttributeLookupError("Attribute not found") from None - else: - return default diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/cffi/model.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/cffi/model.py deleted file mode 100644 index ad1c1764893d0257c0e75eeb61b0a359e89adf0f..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/cffi/model.py +++ /dev/null @@ -1,617 +0,0 @@ -import types -import weakref - -from .lock import allocate_lock -from .error import CDefError, VerificationError, VerificationMissing - -# type qualifiers -Q_CONST = 0x01 -Q_RESTRICT = 0x02 -Q_VOLATILE = 0x04 - -def qualify(quals, replace_with): - if quals & Q_CONST: - replace_with = ' const ' + replace_with.lstrip() - if quals & Q_VOLATILE: - replace_with = ' volatile ' + replace_with.lstrip() - if quals & Q_RESTRICT: - # It seems that __restrict is supported by gcc and msvc. - # If you hit some different compiler, add a #define in - # _cffi_include.h for it (and in its copies, documented there) - replace_with = ' __restrict ' + replace_with.lstrip() - return replace_with - - -class BaseTypeByIdentity(object): - is_array_type = False - is_raw_function = False - - def get_c_name(self, replace_with='', context='a C file', quals=0): - result = self.c_name_with_marker - assert result.count('&') == 1 - # some logic duplication with ffi.getctype()... :-( - replace_with = replace_with.strip() - if replace_with: - if replace_with.startswith('*') and '&[' in result: - replace_with = '(%s)' % replace_with - elif not replace_with[0] in '[(': - replace_with = ' ' + replace_with - replace_with = qualify(quals, replace_with) - result = result.replace('&', replace_with) - if '$' in result: - raise VerificationError( - "cannot generate '%s' in %s: unknown type name" - % (self._get_c_name(), context)) - return result - - def _get_c_name(self): - return self.c_name_with_marker.replace('&', '') - - def has_c_name(self): - return '$' not in self._get_c_name() - - def is_integer_type(self): - return False - - def get_cached_btype(self, ffi, finishlist, can_delay=False): - try: - BType = ffi._cached_btypes[self] - except KeyError: - BType = self.build_backend_type(ffi, finishlist) - BType2 = ffi._cached_btypes.setdefault(self, BType) - assert BType2 is BType - return BType - - def __repr__(self): - return '<%s>' % (self._get_c_name(),) - - def _get_items(self): - return [(name, getattr(self, name)) for name in self._attrs_] - - -class BaseType(BaseTypeByIdentity): - - def __eq__(self, other): - return (self.__class__ == other.__class__ and - self._get_items() == other._get_items()) - - def __ne__(self, other): - return not self == other - - def __hash__(self): - return hash((self.__class__, tuple(self._get_items()))) - - -class VoidType(BaseType): - _attrs_ = () - - def __init__(self): - self.c_name_with_marker = 'void&' - - def build_backend_type(self, ffi, finishlist): - return global_cache(self, ffi, 'new_void_type') - -void_type = VoidType() - - -class BasePrimitiveType(BaseType): - def is_complex_type(self): - return False - - -class PrimitiveType(BasePrimitiveType): - _attrs_ = ('name',) - - ALL_PRIMITIVE_TYPES = { - 'char': 'c', - 'short': 'i', - 'int': 'i', - 'long': 'i', - 'long long': 'i', - 'signed char': 'i', - 'unsigned char': 'i', - 'unsigned short': 'i', - 'unsigned int': 'i', - 'unsigned long': 'i', - 'unsigned long long': 'i', - 'float': 'f', - 'double': 'f', - 'long double': 'f', - 'float _Complex': 'j', - 'double _Complex': 'j', - '_Bool': 'i', - # the following types are not primitive in the C sense - 'wchar_t': 'c', - 'char16_t': 'c', - 'char32_t': 'c', - 'int8_t': 'i', - 'uint8_t': 'i', - 'int16_t': 'i', - 'uint16_t': 'i', - 'int32_t': 'i', - 'uint32_t': 'i', - 'int64_t': 'i', - 'uint64_t': 'i', - 'int_least8_t': 'i', - 'uint_least8_t': 'i', - 'int_least16_t': 'i', - 'uint_least16_t': 'i', - 'int_least32_t': 'i', - 'uint_least32_t': 'i', - 'int_least64_t': 'i', - 'uint_least64_t': 'i', - 'int_fast8_t': 'i', - 'uint_fast8_t': 'i', - 'int_fast16_t': 'i', - 'uint_fast16_t': 'i', - 'int_fast32_t': 'i', - 'uint_fast32_t': 'i', - 'int_fast64_t': 'i', - 'uint_fast64_t': 'i', - 'intptr_t': 'i', - 'uintptr_t': 'i', - 'intmax_t': 'i', - 'uintmax_t': 'i', - 'ptrdiff_t': 'i', - 'size_t': 'i', - 'ssize_t': 'i', - } - - def __init__(self, name): - assert name in self.ALL_PRIMITIVE_TYPES - self.name = name - self.c_name_with_marker = name + '&' - - def is_char_type(self): - return self.ALL_PRIMITIVE_TYPES[self.name] == 'c' - def is_integer_type(self): - return self.ALL_PRIMITIVE_TYPES[self.name] == 'i' - def is_float_type(self): - return self.ALL_PRIMITIVE_TYPES[self.name] == 'f' - def is_complex_type(self): - return self.ALL_PRIMITIVE_TYPES[self.name] == 'j' - - def build_backend_type(self, ffi, finishlist): - return global_cache(self, ffi, 'new_primitive_type', self.name) - - -class UnknownIntegerType(BasePrimitiveType): - _attrs_ = ('name',) - - def __init__(self, name): - self.name = name - self.c_name_with_marker = name + '&' - - def is_integer_type(self): - return True - - def build_backend_type(self, ffi, finishlist): - raise NotImplementedError("integer type '%s' can only be used after " - "compilation" % self.name) - -class UnknownFloatType(BasePrimitiveType): - _attrs_ = ('name', ) - - def __init__(self, name): - self.name = name - self.c_name_with_marker = name + '&' - - def build_backend_type(self, ffi, finishlist): - raise NotImplementedError("float type '%s' can only be used after " - "compilation" % self.name) - - -class BaseFunctionType(BaseType): - _attrs_ = ('args', 'result', 'ellipsis', 'abi') - - def __init__(self, args, result, ellipsis, abi=None): - self.args = args - self.result = result - self.ellipsis = ellipsis - self.abi = abi - # - reprargs = [arg._get_c_name() for arg in self.args] - if self.ellipsis: - reprargs.append('...') - reprargs = reprargs or ['void'] - replace_with = self._base_pattern % (', '.join(reprargs),) - if abi is not None: - replace_with = replace_with[:1] + abi + ' ' + replace_with[1:] - self.c_name_with_marker = ( - self.result.c_name_with_marker.replace('&', replace_with)) - - -class RawFunctionType(BaseFunctionType): - # Corresponds to a C type like 'int(int)', which is the C type of - # a function, but not a pointer-to-function. The backend has no - # notion of such a type; it's used temporarily by parsing. - _base_pattern = '(&)(%s)' - is_raw_function = True - - def build_backend_type(self, ffi, finishlist): - raise CDefError("cannot render the type %r: it is a function " - "type, not a pointer-to-function type" % (self,)) - - def as_function_pointer(self): - return FunctionPtrType(self.args, self.result, self.ellipsis, self.abi) - - -class FunctionPtrType(BaseFunctionType): - _base_pattern = '(*&)(%s)' - - def build_backend_type(self, ffi, finishlist): - result = self.result.get_cached_btype(ffi, finishlist) - args = [] - for tp in self.args: - args.append(tp.get_cached_btype(ffi, finishlist)) - abi_args = () - if self.abi == "__stdcall": - if not self.ellipsis: # __stdcall ignored for variadic funcs - try: - abi_args = (ffi._backend.FFI_STDCALL,) - except AttributeError: - pass - return global_cache(self, ffi, 'new_function_type', - tuple(args), result, self.ellipsis, *abi_args) - - def as_raw_function(self): - return RawFunctionType(self.args, self.result, self.ellipsis, self.abi) - - -class PointerType(BaseType): - _attrs_ = ('totype', 'quals') - - def __init__(self, totype, quals=0): - self.totype = totype - self.quals = quals - extra = qualify(quals, " *&") - if totype.is_array_type: - extra = "(%s)" % (extra.lstrip(),) - self.c_name_with_marker = totype.c_name_with_marker.replace('&', extra) - - def build_backend_type(self, ffi, finishlist): - BItem = self.totype.get_cached_btype(ffi, finishlist, can_delay=True) - return global_cache(self, ffi, 'new_pointer_type', BItem) - -voidp_type = PointerType(void_type) - -def ConstPointerType(totype): - return PointerType(totype, Q_CONST) - -const_voidp_type = ConstPointerType(void_type) - - -class NamedPointerType(PointerType): - _attrs_ = ('totype', 'name') - - def __init__(self, totype, name, quals=0): - PointerType.__init__(self, totype, quals) - self.name = name - self.c_name_with_marker = name + '&' - - -class ArrayType(BaseType): - _attrs_ = ('item', 'length') - is_array_type = True - - def __init__(self, item, length): - self.item = item - self.length = length - # - if length is None: - brackets = '&[]' - elif length == '...': - brackets = '&[/*...*/]' - else: - brackets = '&[%s]' % length - self.c_name_with_marker = ( - self.item.c_name_with_marker.replace('&', brackets)) - - def length_is_unknown(self): - return isinstance(self.length, str) - - def resolve_length(self, newlength): - return ArrayType(self.item, newlength) - - def build_backend_type(self, ffi, finishlist): - if self.length_is_unknown(): - raise CDefError("cannot render the type %r: unknown length" % - (self,)) - self.item.get_cached_btype(ffi, finishlist) # force the item BType - BPtrItem = PointerType(self.item).get_cached_btype(ffi, finishlist) - return global_cache(self, ffi, 'new_array_type', BPtrItem, self.length) - -char_array_type = ArrayType(PrimitiveType('char'), None) - - -class StructOrUnionOrEnum(BaseTypeByIdentity): - _attrs_ = ('name',) - forcename = None - - def build_c_name_with_marker(self): - name = self.forcename or '%s %s' % (self.kind, self.name) - self.c_name_with_marker = name + '&' - - def force_the_name(self, forcename): - self.forcename = forcename - self.build_c_name_with_marker() - - def get_official_name(self): - assert self.c_name_with_marker.endswith('&') - return self.c_name_with_marker[:-1] - - -class StructOrUnion(StructOrUnionOrEnum): - fixedlayout = None - completed = 0 - partial = False - packed = 0 - - def __init__(self, name, fldnames, fldtypes, fldbitsize, fldquals=None): - self.name = name - self.fldnames = fldnames - self.fldtypes = fldtypes - self.fldbitsize = fldbitsize - self.fldquals = fldquals - self.build_c_name_with_marker() - - def anonymous_struct_fields(self): - if self.fldtypes is not None: - for name, type in zip(self.fldnames, self.fldtypes): - if name == '' and isinstance(type, StructOrUnion): - yield type - - def enumfields(self, expand_anonymous_struct_union=True): - fldquals = self.fldquals - if fldquals is None: - fldquals = (0,) * len(self.fldnames) - for name, type, bitsize, quals in zip(self.fldnames, self.fldtypes, - self.fldbitsize, fldquals): - if (name == '' and isinstance(type, StructOrUnion) - and expand_anonymous_struct_union): - # nested anonymous struct/union - for result in type.enumfields(): - yield result - else: - yield (name, type, bitsize, quals) - - def force_flatten(self): - # force the struct or union to have a declaration that lists - # directly all fields returned by enumfields(), flattening - # nested anonymous structs/unions. - names = [] - types = [] - bitsizes = [] - fldquals = [] - for name, type, bitsize, quals in self.enumfields(): - names.append(name) - types.append(type) - bitsizes.append(bitsize) - fldquals.append(quals) - self.fldnames = tuple(names) - self.fldtypes = tuple(types) - self.fldbitsize = tuple(bitsizes) - self.fldquals = tuple(fldquals) - - def get_cached_btype(self, ffi, finishlist, can_delay=False): - BType = StructOrUnionOrEnum.get_cached_btype(self, ffi, finishlist, - can_delay) - if not can_delay: - self.finish_backend_type(ffi, finishlist) - return BType - - def finish_backend_type(self, ffi, finishlist): - if self.completed: - if self.completed != 2: - raise NotImplementedError("recursive structure declaration " - "for '%s'" % (self.name,)) - return - BType = ffi._cached_btypes[self] - # - self.completed = 1 - # - if self.fldtypes is None: - pass # not completing it: it's an opaque struct - # - elif self.fixedlayout is None: - fldtypes = [tp.get_cached_btype(ffi, finishlist) - for tp in self.fldtypes] - lst = list(zip(self.fldnames, fldtypes, self.fldbitsize)) - extra_flags = () - if self.packed: - if self.packed == 1: - extra_flags = (8,) # SF_PACKED - else: - extra_flags = (0, self.packed) - ffi._backend.complete_struct_or_union(BType, lst, self, - -1, -1, *extra_flags) - # - else: - fldtypes = [] - fieldofs, fieldsize, totalsize, totalalignment = self.fixedlayout - for i in range(len(self.fldnames)): - fsize = fieldsize[i] - ftype = self.fldtypes[i] - # - if isinstance(ftype, ArrayType) and ftype.length_is_unknown(): - # fix the length to match the total size - BItemType = ftype.item.get_cached_btype(ffi, finishlist) - nlen, nrest = divmod(fsize, ffi.sizeof(BItemType)) - if nrest != 0: - self._verification_error( - "field '%s.%s' has a bogus size?" % ( - self.name, self.fldnames[i] or '{}')) - ftype = ftype.resolve_length(nlen) - self.fldtypes = (self.fldtypes[:i] + (ftype,) + - self.fldtypes[i+1:]) - # - BFieldType = ftype.get_cached_btype(ffi, finishlist) - if isinstance(ftype, ArrayType) and ftype.length is None: - assert fsize == 0 - else: - bitemsize = ffi.sizeof(BFieldType) - if bitemsize != fsize: - self._verification_error( - "field '%s.%s' is declared as %d bytes, but is " - "really %d bytes" % (self.name, - self.fldnames[i] or '{}', - bitemsize, fsize)) - fldtypes.append(BFieldType) - # - lst = list(zip(self.fldnames, fldtypes, self.fldbitsize, fieldofs)) - ffi._backend.complete_struct_or_union(BType, lst, self, - totalsize, totalalignment) - self.completed = 2 - - def _verification_error(self, msg): - raise VerificationError(msg) - - def check_not_partial(self): - if self.partial and self.fixedlayout is None: - raise VerificationMissing(self._get_c_name()) - - def build_backend_type(self, ffi, finishlist): - self.check_not_partial() - finishlist.append(self) - # - return global_cache(self, ffi, 'new_%s_type' % self.kind, - self.get_official_name(), key=self) - - -class StructType(StructOrUnion): - kind = 'struct' - - -class UnionType(StructOrUnion): - kind = 'union' - - -class EnumType(StructOrUnionOrEnum): - kind = 'enum' - partial = False - partial_resolved = False - - def __init__(self, name, enumerators, enumvalues, baseinttype=None): - self.name = name - self.enumerators = enumerators - self.enumvalues = enumvalues - self.baseinttype = baseinttype - self.build_c_name_with_marker() - - def force_the_name(self, forcename): - StructOrUnionOrEnum.force_the_name(self, forcename) - if self.forcename is None: - name = self.get_official_name() - self.forcename = '$' + name.replace(' ', '_') - - def check_not_partial(self): - if self.partial and not self.partial_resolved: - raise VerificationMissing(self._get_c_name()) - - def build_backend_type(self, ffi, finishlist): - self.check_not_partial() - base_btype = self.build_baseinttype(ffi, finishlist) - return global_cache(self, ffi, 'new_enum_type', - self.get_official_name(), - self.enumerators, self.enumvalues, - base_btype, key=self) - - def build_baseinttype(self, ffi, finishlist): - if self.baseinttype is not None: - return self.baseinttype.get_cached_btype(ffi, finishlist) - # - if self.enumvalues: - smallest_value = min(self.enumvalues) - largest_value = max(self.enumvalues) - else: - import warnings - try: - # XXX! The goal is to ensure that the warnings.warn() - # will not suppress the warning. We want to get it - # several times if we reach this point several times. - __warningregistry__.clear() - except NameError: - pass - warnings.warn("%r has no values explicitly defined; " - "guessing that it is equivalent to 'unsigned int'" - % self._get_c_name()) - smallest_value = largest_value = 0 - if smallest_value < 0: # needs a signed type - sign = 1 - candidate1 = PrimitiveType("int") - candidate2 = PrimitiveType("long") - else: - sign = 0 - candidate1 = PrimitiveType("unsigned int") - candidate2 = PrimitiveType("unsigned long") - btype1 = candidate1.get_cached_btype(ffi, finishlist) - btype2 = candidate2.get_cached_btype(ffi, finishlist) - size1 = ffi.sizeof(btype1) - size2 = ffi.sizeof(btype2) - if (smallest_value >= ((-1) << (8*size1-1)) and - largest_value < (1 << (8*size1-sign))): - return btype1 - if (smallest_value >= ((-1) << (8*size2-1)) and - largest_value < (1 << (8*size2-sign))): - return btype2 - raise CDefError("%s values don't all fit into either 'long' " - "or 'unsigned long'" % self._get_c_name()) - -def unknown_type(name, structname=None): - if structname is None: - structname = '$%s' % name - tp = StructType(structname, None, None, None) - tp.force_the_name(name) - tp.origin = "unknown_type" - return tp - -def unknown_ptr_type(name, structname=None): - if structname is None: - structname = '$$%s' % name - tp = StructType(structname, None, None, None) - return NamedPointerType(tp, name) - - -global_lock = allocate_lock() -_typecache_cffi_backend = weakref.WeakValueDictionary() - -def get_typecache(backend): - # returns _typecache_cffi_backend if backend is the _cffi_backend - # module, or type(backend).__typecache if backend is an instance of - # CTypesBackend (or some FakeBackend class during tests) - if isinstance(backend, types.ModuleType): - return _typecache_cffi_backend - with global_lock: - if not hasattr(type(backend), '__typecache'): - type(backend).__typecache = weakref.WeakValueDictionary() - return type(backend).__typecache - -def global_cache(srctype, ffi, funcname, *args, **kwds): - key = kwds.pop('key', (funcname, args)) - assert not kwds - try: - return ffi._typecache[key] - except KeyError: - pass - try: - res = getattr(ffi._backend, funcname)(*args) - except NotImplementedError as e: - raise NotImplementedError("%s: %r: %s" % (funcname, srctype, e)) - # note that setdefault() on WeakValueDictionary is not atomic - # and contains a rare bug (http://bugs.python.org/issue19542); - # we have to use a lock and do it ourselves - cache = ffi._typecache - with global_lock: - res1 = cache.get(key) - if res1 is None: - cache[key] = res - return res - else: - return res1 - -def pointer_cache(ffi, BType): - return global_cache('?', ffi, 'new_pointer_type', BType) - -def attach_exception_info(e, name): - if e.args and type(e.args[0]) is str: - e.args = ('%s: %s' % (name, e.args[0]),) + e.args[1:] diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_t_r_a_k.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_t_r_a_k.py deleted file mode 100644 index 0d1b313eaef36bed86ab064e341d14a472a39625..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_t_r_a_k.py +++ /dev/null @@ -1,325 +0,0 @@ -from fontTools.misc import sstruct -from fontTools.misc.fixedTools import ( - fixedToFloat as fi2fl, - floatToFixed as fl2fi, - floatToFixedToStr as fl2str, - strToFixedToFloat as str2fl, -) -from fontTools.misc.textTools import bytesjoin, safeEval -from fontTools.ttLib import TTLibError -from . import DefaultTable -import struct -from collections.abc import MutableMapping - - -# Apple's documentation of 'trak': -# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6trak.html - -TRAK_HEADER_FORMAT = """ - > # big endian - version: 16.16F - format: H - horizOffset: H - vertOffset: H - reserved: H -""" - -TRAK_HEADER_FORMAT_SIZE = sstruct.calcsize(TRAK_HEADER_FORMAT) - - -TRACK_DATA_FORMAT = """ - > # big endian - nTracks: H - nSizes: H - sizeTableOffset: L -""" - -TRACK_DATA_FORMAT_SIZE = sstruct.calcsize(TRACK_DATA_FORMAT) - - -TRACK_TABLE_ENTRY_FORMAT = """ - > # big endian - track: 16.16F - nameIndex: H - offset: H -""" - -TRACK_TABLE_ENTRY_FORMAT_SIZE = sstruct.calcsize(TRACK_TABLE_ENTRY_FORMAT) - - -# size values are actually '16.16F' fixed-point values, but here I do the -# fixedToFloat conversion manually instead of relying on sstruct -SIZE_VALUE_FORMAT = ">l" -SIZE_VALUE_FORMAT_SIZE = struct.calcsize(SIZE_VALUE_FORMAT) - -# per-Size values are in 'FUnits', i.e. 16-bit signed integers -PER_SIZE_VALUE_FORMAT = ">h" -PER_SIZE_VALUE_FORMAT_SIZE = struct.calcsize(PER_SIZE_VALUE_FORMAT) - - -class table__t_r_a_k(DefaultTable.DefaultTable): - dependencies = ["name"] - - def compile(self, ttFont): - dataList = [] - offset = TRAK_HEADER_FORMAT_SIZE - for direction in ("horiz", "vert"): - trackData = getattr(self, direction + "Data", TrackData()) - offsetName = direction + "Offset" - # set offset to 0 if None or empty - if not trackData: - setattr(self, offsetName, 0) - continue - # TrackData table format must be longword aligned - alignedOffset = (offset + 3) & ~3 - padding, offset = b"\x00" * (alignedOffset - offset), alignedOffset - setattr(self, offsetName, offset) - - data = trackData.compile(offset) - offset += len(data) - dataList.append(padding + data) - - self.reserved = 0 - tableData = bytesjoin([sstruct.pack(TRAK_HEADER_FORMAT, self)] + dataList) - return tableData - - def decompile(self, data, ttFont): - sstruct.unpack(TRAK_HEADER_FORMAT, data[:TRAK_HEADER_FORMAT_SIZE], self) - for direction in ("horiz", "vert"): - trackData = TrackData() - offset = getattr(self, direction + "Offset") - if offset != 0: - trackData.decompile(data, offset) - setattr(self, direction + "Data", trackData) - - def toXML(self, writer, ttFont): - writer.simpletag("version", value=self.version) - writer.newline() - writer.simpletag("format", value=self.format) - writer.newline() - for direction in ("horiz", "vert"): - dataName = direction + "Data" - writer.begintag(dataName) - writer.newline() - trackData = getattr(self, dataName, TrackData()) - trackData.toXML(writer, ttFont) - writer.endtag(dataName) - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - if name == "version": - self.version = safeEval(attrs["value"]) - elif name == "format": - self.format = safeEval(attrs["value"]) - elif name in ("horizData", "vertData"): - trackData = TrackData() - setattr(self, name, trackData) - for element in content: - if not isinstance(element, tuple): - continue - name, attrs, content_ = element - trackData.fromXML(name, attrs, content_, ttFont) - - -class TrackData(MutableMapping): - def __init__(self, initialdata={}): - self._map = dict(initialdata) - - def compile(self, offset): - nTracks = len(self) - sizes = self.sizes() - nSizes = len(sizes) - - # offset to the start of the size subtable - offset += TRACK_DATA_FORMAT_SIZE + TRACK_TABLE_ENTRY_FORMAT_SIZE * nTracks - trackDataHeader = sstruct.pack( - TRACK_DATA_FORMAT, - {"nTracks": nTracks, "nSizes": nSizes, "sizeTableOffset": offset}, - ) - - entryDataList = [] - perSizeDataList = [] - # offset to per-size tracking values - offset += SIZE_VALUE_FORMAT_SIZE * nSizes - # sort track table entries by track value - for track, entry in sorted(self.items()): - assert entry.nameIndex is not None - entry.track = track - entry.offset = offset - entryDataList += [sstruct.pack(TRACK_TABLE_ENTRY_FORMAT, entry)] - # sort per-size values by size - for size, value in sorted(entry.items()): - perSizeDataList += [struct.pack(PER_SIZE_VALUE_FORMAT, value)] - offset += PER_SIZE_VALUE_FORMAT_SIZE * nSizes - # sort size values - sizeDataList = [ - struct.pack(SIZE_VALUE_FORMAT, fl2fi(sv, 16)) for sv in sorted(sizes) - ] - - data = bytesjoin( - [trackDataHeader] + entryDataList + sizeDataList + perSizeDataList - ) - return data - - def decompile(self, data, offset): - # initial offset is from the start of trak table to the current TrackData - trackDataHeader = data[offset : offset + TRACK_DATA_FORMAT_SIZE] - if len(trackDataHeader) != TRACK_DATA_FORMAT_SIZE: - raise TTLibError("not enough data to decompile TrackData header") - sstruct.unpack(TRACK_DATA_FORMAT, trackDataHeader, self) - offset += TRACK_DATA_FORMAT_SIZE - - nSizes = self.nSizes - sizeTableOffset = self.sizeTableOffset - sizeTable = [] - for i in range(nSizes): - sizeValueData = data[ - sizeTableOffset : sizeTableOffset + SIZE_VALUE_FORMAT_SIZE - ] - if len(sizeValueData) < SIZE_VALUE_FORMAT_SIZE: - raise TTLibError("not enough data to decompile TrackData size subtable") - (sizeValue,) = struct.unpack(SIZE_VALUE_FORMAT, sizeValueData) - sizeTable.append(fi2fl(sizeValue, 16)) - sizeTableOffset += SIZE_VALUE_FORMAT_SIZE - - for i in range(self.nTracks): - entry = TrackTableEntry() - entryData = data[offset : offset + TRACK_TABLE_ENTRY_FORMAT_SIZE] - if len(entryData) < TRACK_TABLE_ENTRY_FORMAT_SIZE: - raise TTLibError("not enough data to decompile TrackTableEntry record") - sstruct.unpack(TRACK_TABLE_ENTRY_FORMAT, entryData, entry) - perSizeOffset = entry.offset - for j in range(nSizes): - size = sizeTable[j] - perSizeValueData = data[ - perSizeOffset : perSizeOffset + PER_SIZE_VALUE_FORMAT_SIZE - ] - if len(perSizeValueData) < PER_SIZE_VALUE_FORMAT_SIZE: - raise TTLibError( - "not enough data to decompile per-size track values" - ) - (perSizeValue,) = struct.unpack(PER_SIZE_VALUE_FORMAT, perSizeValueData) - entry[size] = perSizeValue - perSizeOffset += PER_SIZE_VALUE_FORMAT_SIZE - self[entry.track] = entry - offset += TRACK_TABLE_ENTRY_FORMAT_SIZE - - def toXML(self, writer, ttFont): - nTracks = len(self) - nSizes = len(self.sizes()) - writer.comment("nTracks=%d, nSizes=%d" % (nTracks, nSizes)) - writer.newline() - for track, entry in sorted(self.items()): - assert entry.nameIndex is not None - entry.track = track - entry.toXML(writer, ttFont) - - def fromXML(self, name, attrs, content, ttFont): - if name != "trackEntry": - return - entry = TrackTableEntry() - entry.fromXML(name, attrs, content, ttFont) - self[entry.track] = entry - - def sizes(self): - if not self: - return frozenset() - tracks = list(self.tracks()) - sizes = self[tracks.pop(0)].sizes() - for track in tracks: - entrySizes = self[track].sizes() - if sizes != entrySizes: - raise TTLibError( - "'trak' table entries must specify the same sizes: " - "%s != %s" % (sorted(sizes), sorted(entrySizes)) - ) - return frozenset(sizes) - - def __getitem__(self, track): - return self._map[track] - - def __delitem__(self, track): - del self._map[track] - - def __setitem__(self, track, entry): - self._map[track] = entry - - def __len__(self): - return len(self._map) - - def __iter__(self): - return iter(self._map) - - def keys(self): - return self._map.keys() - - tracks = keys - - def __repr__(self): - return "TrackData({})".format(self._map if self else "") - - -class TrackTableEntry(MutableMapping): - def __init__(self, values={}, nameIndex=None): - self.nameIndex = nameIndex - self._map = dict(values) - - def toXML(self, writer, ttFont): - name = ttFont["name"].getDebugName(self.nameIndex) - writer.begintag( - "trackEntry", - (("value", fl2str(self.track, 16)), ("nameIndex", self.nameIndex)), - ) - writer.newline() - if name: - writer.comment(name) - writer.newline() - for size, perSizeValue in sorted(self.items()): - writer.simpletag("track", size=fl2str(size, 16), value=perSizeValue) - writer.newline() - writer.endtag("trackEntry") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - self.track = str2fl(attrs["value"], 16) - self.nameIndex = safeEval(attrs["nameIndex"]) - for element in content: - if not isinstance(element, tuple): - continue - name, attrs, _ = element - if name != "track": - continue - size = str2fl(attrs["size"], 16) - self[size] = safeEval(attrs["value"]) - - def __getitem__(self, size): - return self._map[size] - - def __delitem__(self, size): - del self._map[size] - - def __setitem__(self, size, value): - self._map[size] = value - - def __len__(self): - return len(self._map) - - def __iter__(self): - return iter(self._map) - - def keys(self): - return self._map.keys() - - sizes = keys - - def __repr__(self): - return "TrackTableEntry({}, nameIndex={})".format(self._map, self.nameIndex) - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return self.nameIndex == other.nameIndex and dict(self) == dict(other) - - def __ne__(self, other): - result = self.__eq__(other) - return result if result is NotImplemented else not result diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/google/protobuf/descriptor_pb2.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/google/protobuf/descriptor_pb2.py deleted file mode 100644 index c42e7b6d1a28117a614728cc49146a60cb6e0949..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/google/protobuf/descriptor_pb2.py +++ /dev/null @@ -1,2227 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/protobuf/descriptor.proto -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import symbol_database as _symbol_database -from google.protobuf.internal import builder as _builder -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - - - -if _descriptor._USE_C_DESCRIPTORS == False: - DESCRIPTOR = _descriptor.FileDescriptor( - name='google/protobuf/descriptor.proto', - package='google.protobuf', - syntax='proto2', - serialized_options=None, - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n google/protobuf/descriptor.proto\x12\x0fgoogle.protobuf\"M\n\x11\x46ileDescriptorSet\x12\x38\n\x04\x66ile\x18\x01 \x03(\x0b\x32$.google.protobuf.FileDescriptorProtoR\x04\x66ile\"\xfe\x04\n\x13\x46ileDescriptorProto\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12\x18\n\x07package\x18\x02 \x01(\tR\x07package\x12\x1e\n\ndependency\x18\x03 \x03(\tR\ndependency\x12+\n\x11public_dependency\x18\n \x03(\x05R\x10publicDependency\x12\'\n\x0fweak_dependency\x18\x0b \x03(\x05R\x0eweakDependency\x12\x43\n\x0cmessage_type\x18\x04 \x03(\x0b\x32 .google.protobuf.DescriptorProtoR\x0bmessageType\x12\x41\n\tenum_type\x18\x05 \x03(\x0b\x32$.google.protobuf.EnumDescriptorProtoR\x08\x65numType\x12\x41\n\x07service\x18\x06 \x03(\x0b\x32\'.google.protobuf.ServiceDescriptorProtoR\x07service\x12\x43\n\textension\x18\x07 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProtoR\textension\x12\x36\n\x07options\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.FileOptionsR\x07options\x12I\n\x10source_code_info\x18\t \x01(\x0b\x32\x1f.google.protobuf.SourceCodeInfoR\x0esourceCodeInfo\x12\x16\n\x06syntax\x18\x0c \x01(\tR\x06syntax\x12\x18\n\x07\x65\x64ition\x18\r \x01(\tR\x07\x65\x64ition\"\xb9\x06\n\x0f\x44\x65scriptorProto\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12;\n\x05\x66ield\x18\x02 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProtoR\x05\x66ield\x12\x43\n\textension\x18\x06 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProtoR\textension\x12\x41\n\x0bnested_type\x18\x03 \x03(\x0b\x32 .google.protobuf.DescriptorProtoR\nnestedType\x12\x41\n\tenum_type\x18\x04 \x03(\x0b\x32$.google.protobuf.EnumDescriptorProtoR\x08\x65numType\x12X\n\x0f\x65xtension_range\x18\x05 \x03(\x0b\x32/.google.protobuf.DescriptorProto.ExtensionRangeR\x0e\x65xtensionRange\x12\x44\n\noneof_decl\x18\x08 \x03(\x0b\x32%.google.protobuf.OneofDescriptorProtoR\toneofDecl\x12\x39\n\x07options\x18\x07 \x01(\x0b\x32\x1f.google.protobuf.MessageOptionsR\x07options\x12U\n\x0ereserved_range\x18\t \x03(\x0b\x32..google.protobuf.DescriptorProto.ReservedRangeR\rreservedRange\x12#\n\rreserved_name\x18\n \x03(\tR\x0creservedName\x1az\n\x0e\x45xtensionRange\x12\x14\n\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n\x03\x65nd\x18\x02 \x01(\x05R\x03\x65nd\x12@\n\x07options\x18\x03 \x01(\x0b\x32&.google.protobuf.ExtensionRangeOptionsR\x07options\x1a\x37\n\rReservedRange\x12\x14\n\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n\x03\x65nd\x18\x02 \x01(\x05R\x03\x65nd\"\xad\x04\n\x15\x45xtensionRangeOptions\x12X\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\x12Y\n\x0b\x64\x65\x63laration\x18\x02 \x03(\x0b\x32\x32.google.protobuf.ExtensionRangeOptions.DeclarationB\x03\x88\x01\x02R\x0b\x64\x65\x63laration\x12h\n\x0cverification\x18\x03 \x01(\x0e\x32\x38.google.protobuf.ExtensionRangeOptions.VerificationState:\nUNVERIFIEDR\x0cverification\x1a\xb3\x01\n\x0b\x44\x65\x63laration\x12\x16\n\x06number\x18\x01 \x01(\x05R\x06number\x12\x1b\n\tfull_name\x18\x02 \x01(\tR\x08\x66ullName\x12\x12\n\x04type\x18\x03 \x01(\tR\x04type\x12#\n\x0bis_repeated\x18\x04 \x01(\x08\x42\x02\x18\x01R\nisRepeated\x12\x1a\n\x08reserved\x18\x05 \x01(\x08R\x08reserved\x12\x1a\n\x08repeated\x18\x06 \x01(\x08R\x08repeated\"4\n\x11VerificationState\x12\x0f\n\x0b\x44\x45\x43LARATION\x10\x00\x12\x0e\n\nUNVERIFIED\x10\x01*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\xc1\x06\n\x14\x46ieldDescriptorProto\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12\x16\n\x06number\x18\x03 \x01(\x05R\x06number\x12\x41\n\x05label\x18\x04 \x01(\x0e\x32+.google.protobuf.FieldDescriptorProto.LabelR\x05label\x12>\n\x04type\x18\x05 \x01(\x0e\x32*.google.protobuf.FieldDescriptorProto.TypeR\x04type\x12\x1b\n\ttype_name\x18\x06 \x01(\tR\x08typeName\x12\x1a\n\x08\x65xtendee\x18\x02 \x01(\tR\x08\x65xtendee\x12#\n\rdefault_value\x18\x07 \x01(\tR\x0c\x64\x65\x66\x61ultValue\x12\x1f\n\x0boneof_index\x18\t \x01(\x05R\noneofIndex\x12\x1b\n\tjson_name\x18\n \x01(\tR\x08jsonName\x12\x37\n\x07options\x18\x08 \x01(\x0b\x32\x1d.google.protobuf.FieldOptionsR\x07options\x12\'\n\x0fproto3_optional\x18\x11 \x01(\x08R\x0eproto3Optional\"\xb6\x02\n\x04Type\x12\x0f\n\x0bTYPE_DOUBLE\x10\x01\x12\x0e\n\nTYPE_FLOAT\x10\x02\x12\x0e\n\nTYPE_INT64\x10\x03\x12\x0f\n\x0bTYPE_UINT64\x10\x04\x12\x0e\n\nTYPE_INT32\x10\x05\x12\x10\n\x0cTYPE_FIXED64\x10\x06\x12\x10\n\x0cTYPE_FIXED32\x10\x07\x12\r\n\tTYPE_BOOL\x10\x08\x12\x0f\n\x0bTYPE_STRING\x10\t\x12\x0e\n\nTYPE_GROUP\x10\n\x12\x10\n\x0cTYPE_MESSAGE\x10\x0b\x12\x0e\n\nTYPE_BYTES\x10\x0c\x12\x0f\n\x0bTYPE_UINT32\x10\r\x12\r\n\tTYPE_ENUM\x10\x0e\x12\x11\n\rTYPE_SFIXED32\x10\x0f\x12\x11\n\rTYPE_SFIXED64\x10\x10\x12\x0f\n\x0bTYPE_SINT32\x10\x11\x12\x0f\n\x0bTYPE_SINT64\x10\x12\"C\n\x05Label\x12\x12\n\x0eLABEL_OPTIONAL\x10\x01\x12\x12\n\x0eLABEL_REQUIRED\x10\x02\x12\x12\n\x0eLABEL_REPEATED\x10\x03\"c\n\x14OneofDescriptorProto\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12\x37\n\x07options\x18\x02 \x01(\x0b\x32\x1d.google.protobuf.OneofOptionsR\x07options\"\xe3\x02\n\x13\x45numDescriptorProto\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12?\n\x05value\x18\x02 \x03(\x0b\x32).google.protobuf.EnumValueDescriptorProtoR\x05value\x12\x36\n\x07options\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.EnumOptionsR\x07options\x12]\n\x0ereserved_range\x18\x04 \x03(\x0b\x32\x36.google.protobuf.EnumDescriptorProto.EnumReservedRangeR\rreservedRange\x12#\n\rreserved_name\x18\x05 \x03(\tR\x0creservedName\x1a;\n\x11\x45numReservedRange\x12\x14\n\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n\x03\x65nd\x18\x02 \x01(\x05R\x03\x65nd\"\x83\x01\n\x18\x45numValueDescriptorProto\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12\x16\n\x06number\x18\x02 \x01(\x05R\x06number\x12;\n\x07options\x18\x03 \x01(\x0b\x32!.google.protobuf.EnumValueOptionsR\x07options\"\xa7\x01\n\x16ServiceDescriptorProto\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12>\n\x06method\x18\x02 \x03(\x0b\x32&.google.protobuf.MethodDescriptorProtoR\x06method\x12\x39\n\x07options\x18\x03 \x01(\x0b\x32\x1f.google.protobuf.ServiceOptionsR\x07options\"\x89\x02\n\x15MethodDescriptorProto\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12\x1d\n\ninput_type\x18\x02 \x01(\tR\tinputType\x12\x1f\n\x0boutput_type\x18\x03 \x01(\tR\noutputType\x12\x38\n\x07options\x18\x04 \x01(\x0b\x32\x1e.google.protobuf.MethodOptionsR\x07options\x12\x30\n\x10\x63lient_streaming\x18\x05 \x01(\x08:\x05\x66\x61lseR\x0f\x63lientStreaming\x12\x30\n\x10server_streaming\x18\x06 \x01(\x08:\x05\x66\x61lseR\x0fserverStreaming\"\x91\t\n\x0b\x46ileOptions\x12!\n\x0cjava_package\x18\x01 \x01(\tR\x0bjavaPackage\x12\x30\n\x14java_outer_classname\x18\x08 \x01(\tR\x12javaOuterClassname\x12\x35\n\x13java_multiple_files\x18\n \x01(\x08:\x05\x66\x61lseR\x11javaMultipleFiles\x12\x44\n\x1djava_generate_equals_and_hash\x18\x14 \x01(\x08\x42\x02\x18\x01R\x19javaGenerateEqualsAndHash\x12:\n\x16java_string_check_utf8\x18\x1b \x01(\x08:\x05\x66\x61lseR\x13javaStringCheckUtf8\x12S\n\x0coptimize_for\x18\t \x01(\x0e\x32).google.protobuf.FileOptions.OptimizeMode:\x05SPEEDR\x0boptimizeFor\x12\x1d\n\ngo_package\x18\x0b \x01(\tR\tgoPackage\x12\x35\n\x13\x63\x63_generic_services\x18\x10 \x01(\x08:\x05\x66\x61lseR\x11\x63\x63GenericServices\x12\x39\n\x15java_generic_services\x18\x11 \x01(\x08:\x05\x66\x61lseR\x13javaGenericServices\x12\x35\n\x13py_generic_services\x18\x12 \x01(\x08:\x05\x66\x61lseR\x11pyGenericServices\x12\x37\n\x14php_generic_services\x18* \x01(\x08:\x05\x66\x61lseR\x12phpGenericServices\x12%\n\ndeprecated\x18\x17 \x01(\x08:\x05\x66\x61lseR\ndeprecated\x12.\n\x10\x63\x63_enable_arenas\x18\x1f \x01(\x08:\x04trueR\x0e\x63\x63\x45nableArenas\x12*\n\x11objc_class_prefix\x18$ \x01(\tR\x0fobjcClassPrefix\x12)\n\x10\x63sharp_namespace\x18% \x01(\tR\x0f\x63sharpNamespace\x12!\n\x0cswift_prefix\x18\' \x01(\tR\x0bswiftPrefix\x12(\n\x10php_class_prefix\x18( \x01(\tR\x0ephpClassPrefix\x12#\n\rphp_namespace\x18) \x01(\tR\x0cphpNamespace\x12\x34\n\x16php_metadata_namespace\x18, \x01(\tR\x14phpMetadataNamespace\x12!\n\x0cruby_package\x18- \x01(\tR\x0brubyPackage\x12X\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\":\n\x0cOptimizeMode\x12\t\n\x05SPEED\x10\x01\x12\r\n\tCODE_SIZE\x10\x02\x12\x10\n\x0cLITE_RUNTIME\x10\x03*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08&\x10\'\"\xbb\x03\n\x0eMessageOptions\x12<\n\x17message_set_wire_format\x18\x01 \x01(\x08:\x05\x66\x61lseR\x14messageSetWireFormat\x12L\n\x1fno_standard_descriptor_accessor\x18\x02 \x01(\x08:\x05\x66\x61lseR\x1cnoStandardDescriptorAccessor\x12%\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lseR\ndeprecated\x12\x1b\n\tmap_entry\x18\x07 \x01(\x08R\x08mapEntry\x12V\n&deprecated_legacy_json_field_conflicts\x18\x0b \x01(\x08\x42\x02\x18\x01R\"deprecatedLegacyJsonFieldConflicts\x12X\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08\x04\x10\x05J\x04\x08\x05\x10\x06J\x04\x08\x06\x10\x07J\x04\x08\x08\x10\tJ\x04\x08\t\x10\n\"\x85\t\n\x0c\x46ieldOptions\x12\x41\n\x05\x63type\x18\x01 \x01(\x0e\x32#.google.protobuf.FieldOptions.CType:\x06STRINGR\x05\x63type\x12\x16\n\x06packed\x18\x02 \x01(\x08R\x06packed\x12G\n\x06jstype\x18\x06 \x01(\x0e\x32$.google.protobuf.FieldOptions.JSType:\tJS_NORMALR\x06jstype\x12\x19\n\x04lazy\x18\x05 \x01(\x08:\x05\x66\x61lseR\x04lazy\x12.\n\x0funverified_lazy\x18\x0f \x01(\x08:\x05\x66\x61lseR\x0eunverifiedLazy\x12%\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lseR\ndeprecated\x12\x19\n\x04weak\x18\n \x01(\x08:\x05\x66\x61lseR\x04weak\x12(\n\x0c\x64\x65\x62ug_redact\x18\x10 \x01(\x08:\x05\x66\x61lseR\x0b\x64\x65\x62ugRedact\x12K\n\tretention\x18\x11 \x01(\x0e\x32-.google.protobuf.FieldOptions.OptionRetentionR\tretention\x12J\n\x06target\x18\x12 \x01(\x0e\x32..google.protobuf.FieldOptions.OptionTargetTypeB\x02\x18\x01R\x06target\x12H\n\x07targets\x18\x13 \x03(\x0e\x32..google.protobuf.FieldOptions.OptionTargetTypeR\x07targets\x12X\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\"/\n\x05\x43Type\x12\n\n\x06STRING\x10\x00\x12\x08\n\x04\x43ORD\x10\x01\x12\x10\n\x0cSTRING_PIECE\x10\x02\"5\n\x06JSType\x12\r\n\tJS_NORMAL\x10\x00\x12\r\n\tJS_STRING\x10\x01\x12\r\n\tJS_NUMBER\x10\x02\"U\n\x0fOptionRetention\x12\x15\n\x11RETENTION_UNKNOWN\x10\x00\x12\x15\n\x11RETENTION_RUNTIME\x10\x01\x12\x14\n\x10RETENTION_SOURCE\x10\x02\"\x8c\x02\n\x10OptionTargetType\x12\x17\n\x13TARGET_TYPE_UNKNOWN\x10\x00\x12\x14\n\x10TARGET_TYPE_FILE\x10\x01\x12\x1f\n\x1bTARGET_TYPE_EXTENSION_RANGE\x10\x02\x12\x17\n\x13TARGET_TYPE_MESSAGE\x10\x03\x12\x15\n\x11TARGET_TYPE_FIELD\x10\x04\x12\x15\n\x11TARGET_TYPE_ONEOF\x10\x05\x12\x14\n\x10TARGET_TYPE_ENUM\x10\x06\x12\x1a\n\x16TARGET_TYPE_ENUM_ENTRY\x10\x07\x12\x17\n\x13TARGET_TYPE_SERVICE\x10\x08\x12\x16\n\x12TARGET_TYPE_METHOD\x10\t*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08\x04\x10\x05\"s\n\x0cOneofOptions\x12X\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\x98\x02\n\x0b\x45numOptions\x12\x1f\n\x0b\x61llow_alias\x18\x02 \x01(\x08R\nallowAlias\x12%\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lseR\ndeprecated\x12V\n&deprecated_legacy_json_field_conflicts\x18\x06 \x01(\x08\x42\x02\x18\x01R\"deprecatedLegacyJsonFieldConflicts\x12X\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08\x05\x10\x06\"\x9e\x01\n\x10\x45numValueOptions\x12%\n\ndeprecated\x18\x01 \x01(\x08:\x05\x66\x61lseR\ndeprecated\x12X\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\x9c\x01\n\x0eServiceOptions\x12%\n\ndeprecated\x18! \x01(\x08:\x05\x66\x61lseR\ndeprecated\x12X\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\xe0\x02\n\rMethodOptions\x12%\n\ndeprecated\x18! \x01(\x08:\x05\x66\x61lseR\ndeprecated\x12q\n\x11idempotency_level\x18\" \x01(\x0e\x32/.google.protobuf.MethodOptions.IdempotencyLevel:\x13IDEMPOTENCY_UNKNOWNR\x10idempotencyLevel\x12X\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\"P\n\x10IdempotencyLevel\x12\x17\n\x13IDEMPOTENCY_UNKNOWN\x10\x00\x12\x13\n\x0fNO_SIDE_EFFECTS\x10\x01\x12\x0e\n\nIDEMPOTENT\x10\x02*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\x9a\x03\n\x13UninterpretedOption\x12\x41\n\x04name\x18\x02 \x03(\x0b\x32-.google.protobuf.UninterpretedOption.NamePartR\x04name\x12)\n\x10identifier_value\x18\x03 \x01(\tR\x0fidentifierValue\x12,\n\x12positive_int_value\x18\x04 \x01(\x04R\x10positiveIntValue\x12,\n\x12negative_int_value\x18\x05 \x01(\x03R\x10negativeIntValue\x12!\n\x0c\x64ouble_value\x18\x06 \x01(\x01R\x0b\x64oubleValue\x12!\n\x0cstring_value\x18\x07 \x01(\x0cR\x0bstringValue\x12\'\n\x0f\x61ggregate_value\x18\x08 \x01(\tR\x0e\x61ggregateValue\x1aJ\n\x08NamePart\x12\x1b\n\tname_part\x18\x01 \x02(\tR\x08namePart\x12!\n\x0cis_extension\x18\x02 \x02(\x08R\x0bisExtension\"\xa7\x02\n\x0eSourceCodeInfo\x12\x44\n\x08location\x18\x01 \x03(\x0b\x32(.google.protobuf.SourceCodeInfo.LocationR\x08location\x1a\xce\x01\n\x08Location\x12\x16\n\x04path\x18\x01 \x03(\x05\x42\x02\x10\x01R\x04path\x12\x16\n\x04span\x18\x02 \x03(\x05\x42\x02\x10\x01R\x04span\x12)\n\x10leading_comments\x18\x03 \x01(\tR\x0fleadingComments\x12+\n\x11trailing_comments\x18\x04 \x01(\tR\x10trailingComments\x12:\n\x19leading_detached_comments\x18\x06 \x03(\tR\x17leadingDetachedComments\"\xd0\x02\n\x11GeneratedCodeInfo\x12M\n\nannotation\x18\x01 \x03(\x0b\x32-.google.protobuf.GeneratedCodeInfo.AnnotationR\nannotation\x1a\xeb\x01\n\nAnnotation\x12\x16\n\x04path\x18\x01 \x03(\x05\x42\x02\x10\x01R\x04path\x12\x1f\n\x0bsource_file\x18\x02 \x01(\tR\nsourceFile\x12\x14\n\x05\x62\x65gin\x18\x03 \x01(\x05R\x05\x62\x65gin\x12\x10\n\x03\x65nd\x18\x04 \x01(\x05R\x03\x65nd\x12R\n\x08semantic\x18\x05 \x01(\x0e\x32\x36.google.protobuf.GeneratedCodeInfo.Annotation.SemanticR\x08semantic\"(\n\x08Semantic\x12\x08\n\x04NONE\x10\x00\x12\x07\n\x03SET\x10\x01\x12\t\n\x05\x41LIAS\x10\x02\x42~\n\x13\x63om.google.protobufB\x10\x44\x65scriptorProtosH\x01Z-google.golang.org/protobuf/types/descriptorpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1aGoogle.Protobuf.Reflection' - ) -else: - DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n google/protobuf/descriptor.proto\x12\x0fgoogle.protobuf\"M\n\x11\x46ileDescriptorSet\x12\x38\n\x04\x66ile\x18\x01 \x03(\x0b\x32$.google.protobuf.FileDescriptorProtoR\x04\x66ile\"\xfe\x04\n\x13\x46ileDescriptorProto\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12\x18\n\x07package\x18\x02 \x01(\tR\x07package\x12\x1e\n\ndependency\x18\x03 \x03(\tR\ndependency\x12+\n\x11public_dependency\x18\n \x03(\x05R\x10publicDependency\x12\'\n\x0fweak_dependency\x18\x0b \x03(\x05R\x0eweakDependency\x12\x43\n\x0cmessage_type\x18\x04 \x03(\x0b\x32 .google.protobuf.DescriptorProtoR\x0bmessageType\x12\x41\n\tenum_type\x18\x05 \x03(\x0b\x32$.google.protobuf.EnumDescriptorProtoR\x08\x65numType\x12\x41\n\x07service\x18\x06 \x03(\x0b\x32\'.google.protobuf.ServiceDescriptorProtoR\x07service\x12\x43\n\textension\x18\x07 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProtoR\textension\x12\x36\n\x07options\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.FileOptionsR\x07options\x12I\n\x10source_code_info\x18\t \x01(\x0b\x32\x1f.google.protobuf.SourceCodeInfoR\x0esourceCodeInfo\x12\x16\n\x06syntax\x18\x0c \x01(\tR\x06syntax\x12\x18\n\x07\x65\x64ition\x18\r \x01(\tR\x07\x65\x64ition\"\xb9\x06\n\x0f\x44\x65scriptorProto\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12;\n\x05\x66ield\x18\x02 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProtoR\x05\x66ield\x12\x43\n\textension\x18\x06 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProtoR\textension\x12\x41\n\x0bnested_type\x18\x03 \x03(\x0b\x32 .google.protobuf.DescriptorProtoR\nnestedType\x12\x41\n\tenum_type\x18\x04 \x03(\x0b\x32$.google.protobuf.EnumDescriptorProtoR\x08\x65numType\x12X\n\x0f\x65xtension_range\x18\x05 \x03(\x0b\x32/.google.protobuf.DescriptorProto.ExtensionRangeR\x0e\x65xtensionRange\x12\x44\n\noneof_decl\x18\x08 \x03(\x0b\x32%.google.protobuf.OneofDescriptorProtoR\toneofDecl\x12\x39\n\x07options\x18\x07 \x01(\x0b\x32\x1f.google.protobuf.MessageOptionsR\x07options\x12U\n\x0ereserved_range\x18\t \x03(\x0b\x32..google.protobuf.DescriptorProto.ReservedRangeR\rreservedRange\x12#\n\rreserved_name\x18\n \x03(\tR\x0creservedName\x1az\n\x0e\x45xtensionRange\x12\x14\n\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n\x03\x65nd\x18\x02 \x01(\x05R\x03\x65nd\x12@\n\x07options\x18\x03 \x01(\x0b\x32&.google.protobuf.ExtensionRangeOptionsR\x07options\x1a\x37\n\rReservedRange\x12\x14\n\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n\x03\x65nd\x18\x02 \x01(\x05R\x03\x65nd\"\xad\x04\n\x15\x45xtensionRangeOptions\x12X\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\x12Y\n\x0b\x64\x65\x63laration\x18\x02 \x03(\x0b\x32\x32.google.protobuf.ExtensionRangeOptions.DeclarationB\x03\x88\x01\x02R\x0b\x64\x65\x63laration\x12h\n\x0cverification\x18\x03 \x01(\x0e\x32\x38.google.protobuf.ExtensionRangeOptions.VerificationState:\nUNVERIFIEDR\x0cverification\x1a\xb3\x01\n\x0b\x44\x65\x63laration\x12\x16\n\x06number\x18\x01 \x01(\x05R\x06number\x12\x1b\n\tfull_name\x18\x02 \x01(\tR\x08\x66ullName\x12\x12\n\x04type\x18\x03 \x01(\tR\x04type\x12#\n\x0bis_repeated\x18\x04 \x01(\x08\x42\x02\x18\x01R\nisRepeated\x12\x1a\n\x08reserved\x18\x05 \x01(\x08R\x08reserved\x12\x1a\n\x08repeated\x18\x06 \x01(\x08R\x08repeated\"4\n\x11VerificationState\x12\x0f\n\x0b\x44\x45\x43LARATION\x10\x00\x12\x0e\n\nUNVERIFIED\x10\x01*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\xc1\x06\n\x14\x46ieldDescriptorProto\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12\x16\n\x06number\x18\x03 \x01(\x05R\x06number\x12\x41\n\x05label\x18\x04 \x01(\x0e\x32+.google.protobuf.FieldDescriptorProto.LabelR\x05label\x12>\n\x04type\x18\x05 \x01(\x0e\x32*.google.protobuf.FieldDescriptorProto.TypeR\x04type\x12\x1b\n\ttype_name\x18\x06 \x01(\tR\x08typeName\x12\x1a\n\x08\x65xtendee\x18\x02 \x01(\tR\x08\x65xtendee\x12#\n\rdefault_value\x18\x07 \x01(\tR\x0c\x64\x65\x66\x61ultValue\x12\x1f\n\x0boneof_index\x18\t \x01(\x05R\noneofIndex\x12\x1b\n\tjson_name\x18\n \x01(\tR\x08jsonName\x12\x37\n\x07options\x18\x08 \x01(\x0b\x32\x1d.google.protobuf.FieldOptionsR\x07options\x12\'\n\x0fproto3_optional\x18\x11 \x01(\x08R\x0eproto3Optional\"\xb6\x02\n\x04Type\x12\x0f\n\x0bTYPE_DOUBLE\x10\x01\x12\x0e\n\nTYPE_FLOAT\x10\x02\x12\x0e\n\nTYPE_INT64\x10\x03\x12\x0f\n\x0bTYPE_UINT64\x10\x04\x12\x0e\n\nTYPE_INT32\x10\x05\x12\x10\n\x0cTYPE_FIXED64\x10\x06\x12\x10\n\x0cTYPE_FIXED32\x10\x07\x12\r\n\tTYPE_BOOL\x10\x08\x12\x0f\n\x0bTYPE_STRING\x10\t\x12\x0e\n\nTYPE_GROUP\x10\n\x12\x10\n\x0cTYPE_MESSAGE\x10\x0b\x12\x0e\n\nTYPE_BYTES\x10\x0c\x12\x0f\n\x0bTYPE_UINT32\x10\r\x12\r\n\tTYPE_ENUM\x10\x0e\x12\x11\n\rTYPE_SFIXED32\x10\x0f\x12\x11\n\rTYPE_SFIXED64\x10\x10\x12\x0f\n\x0bTYPE_SINT32\x10\x11\x12\x0f\n\x0bTYPE_SINT64\x10\x12\"C\n\x05Label\x12\x12\n\x0eLABEL_OPTIONAL\x10\x01\x12\x12\n\x0eLABEL_REQUIRED\x10\x02\x12\x12\n\x0eLABEL_REPEATED\x10\x03\"c\n\x14OneofDescriptorProto\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12\x37\n\x07options\x18\x02 \x01(\x0b\x32\x1d.google.protobuf.OneofOptionsR\x07options\"\xe3\x02\n\x13\x45numDescriptorProto\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12?\n\x05value\x18\x02 \x03(\x0b\x32).google.protobuf.EnumValueDescriptorProtoR\x05value\x12\x36\n\x07options\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.EnumOptionsR\x07options\x12]\n\x0ereserved_range\x18\x04 \x03(\x0b\x32\x36.google.protobuf.EnumDescriptorProto.EnumReservedRangeR\rreservedRange\x12#\n\rreserved_name\x18\x05 \x03(\tR\x0creservedName\x1a;\n\x11\x45numReservedRange\x12\x14\n\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n\x03\x65nd\x18\x02 \x01(\x05R\x03\x65nd\"\x83\x01\n\x18\x45numValueDescriptorProto\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12\x16\n\x06number\x18\x02 \x01(\x05R\x06number\x12;\n\x07options\x18\x03 \x01(\x0b\x32!.google.protobuf.EnumValueOptionsR\x07options\"\xa7\x01\n\x16ServiceDescriptorProto\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12>\n\x06method\x18\x02 \x03(\x0b\x32&.google.protobuf.MethodDescriptorProtoR\x06method\x12\x39\n\x07options\x18\x03 \x01(\x0b\x32\x1f.google.protobuf.ServiceOptionsR\x07options\"\x89\x02\n\x15MethodDescriptorProto\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12\x1d\n\ninput_type\x18\x02 \x01(\tR\tinputType\x12\x1f\n\x0boutput_type\x18\x03 \x01(\tR\noutputType\x12\x38\n\x07options\x18\x04 \x01(\x0b\x32\x1e.google.protobuf.MethodOptionsR\x07options\x12\x30\n\x10\x63lient_streaming\x18\x05 \x01(\x08:\x05\x66\x61lseR\x0f\x63lientStreaming\x12\x30\n\x10server_streaming\x18\x06 \x01(\x08:\x05\x66\x61lseR\x0fserverStreaming\"\x91\t\n\x0b\x46ileOptions\x12!\n\x0cjava_package\x18\x01 \x01(\tR\x0bjavaPackage\x12\x30\n\x14java_outer_classname\x18\x08 \x01(\tR\x12javaOuterClassname\x12\x35\n\x13java_multiple_files\x18\n \x01(\x08:\x05\x66\x61lseR\x11javaMultipleFiles\x12\x44\n\x1djava_generate_equals_and_hash\x18\x14 \x01(\x08\x42\x02\x18\x01R\x19javaGenerateEqualsAndHash\x12:\n\x16java_string_check_utf8\x18\x1b \x01(\x08:\x05\x66\x61lseR\x13javaStringCheckUtf8\x12S\n\x0coptimize_for\x18\t \x01(\x0e\x32).google.protobuf.FileOptions.OptimizeMode:\x05SPEEDR\x0boptimizeFor\x12\x1d\n\ngo_package\x18\x0b \x01(\tR\tgoPackage\x12\x35\n\x13\x63\x63_generic_services\x18\x10 \x01(\x08:\x05\x66\x61lseR\x11\x63\x63GenericServices\x12\x39\n\x15java_generic_services\x18\x11 \x01(\x08:\x05\x66\x61lseR\x13javaGenericServices\x12\x35\n\x13py_generic_services\x18\x12 \x01(\x08:\x05\x66\x61lseR\x11pyGenericServices\x12\x37\n\x14php_generic_services\x18* \x01(\x08:\x05\x66\x61lseR\x12phpGenericServices\x12%\n\ndeprecated\x18\x17 \x01(\x08:\x05\x66\x61lseR\ndeprecated\x12.\n\x10\x63\x63_enable_arenas\x18\x1f \x01(\x08:\x04trueR\x0e\x63\x63\x45nableArenas\x12*\n\x11objc_class_prefix\x18$ \x01(\tR\x0fobjcClassPrefix\x12)\n\x10\x63sharp_namespace\x18% \x01(\tR\x0f\x63sharpNamespace\x12!\n\x0cswift_prefix\x18\' \x01(\tR\x0bswiftPrefix\x12(\n\x10php_class_prefix\x18( \x01(\tR\x0ephpClassPrefix\x12#\n\rphp_namespace\x18) \x01(\tR\x0cphpNamespace\x12\x34\n\x16php_metadata_namespace\x18, \x01(\tR\x14phpMetadataNamespace\x12!\n\x0cruby_package\x18- \x01(\tR\x0brubyPackage\x12X\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\":\n\x0cOptimizeMode\x12\t\n\x05SPEED\x10\x01\x12\r\n\tCODE_SIZE\x10\x02\x12\x10\n\x0cLITE_RUNTIME\x10\x03*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08&\x10\'\"\xbb\x03\n\x0eMessageOptions\x12<\n\x17message_set_wire_format\x18\x01 \x01(\x08:\x05\x66\x61lseR\x14messageSetWireFormat\x12L\n\x1fno_standard_descriptor_accessor\x18\x02 \x01(\x08:\x05\x66\x61lseR\x1cnoStandardDescriptorAccessor\x12%\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lseR\ndeprecated\x12\x1b\n\tmap_entry\x18\x07 \x01(\x08R\x08mapEntry\x12V\n&deprecated_legacy_json_field_conflicts\x18\x0b \x01(\x08\x42\x02\x18\x01R\"deprecatedLegacyJsonFieldConflicts\x12X\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08\x04\x10\x05J\x04\x08\x05\x10\x06J\x04\x08\x06\x10\x07J\x04\x08\x08\x10\tJ\x04\x08\t\x10\n\"\x85\t\n\x0c\x46ieldOptions\x12\x41\n\x05\x63type\x18\x01 \x01(\x0e\x32#.google.protobuf.FieldOptions.CType:\x06STRINGR\x05\x63type\x12\x16\n\x06packed\x18\x02 \x01(\x08R\x06packed\x12G\n\x06jstype\x18\x06 \x01(\x0e\x32$.google.protobuf.FieldOptions.JSType:\tJS_NORMALR\x06jstype\x12\x19\n\x04lazy\x18\x05 \x01(\x08:\x05\x66\x61lseR\x04lazy\x12.\n\x0funverified_lazy\x18\x0f \x01(\x08:\x05\x66\x61lseR\x0eunverifiedLazy\x12%\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lseR\ndeprecated\x12\x19\n\x04weak\x18\n \x01(\x08:\x05\x66\x61lseR\x04weak\x12(\n\x0c\x64\x65\x62ug_redact\x18\x10 \x01(\x08:\x05\x66\x61lseR\x0b\x64\x65\x62ugRedact\x12K\n\tretention\x18\x11 \x01(\x0e\x32-.google.protobuf.FieldOptions.OptionRetentionR\tretention\x12J\n\x06target\x18\x12 \x01(\x0e\x32..google.protobuf.FieldOptions.OptionTargetTypeB\x02\x18\x01R\x06target\x12H\n\x07targets\x18\x13 \x03(\x0e\x32..google.protobuf.FieldOptions.OptionTargetTypeR\x07targets\x12X\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\"/\n\x05\x43Type\x12\n\n\x06STRING\x10\x00\x12\x08\n\x04\x43ORD\x10\x01\x12\x10\n\x0cSTRING_PIECE\x10\x02\"5\n\x06JSType\x12\r\n\tJS_NORMAL\x10\x00\x12\r\n\tJS_STRING\x10\x01\x12\r\n\tJS_NUMBER\x10\x02\"U\n\x0fOptionRetention\x12\x15\n\x11RETENTION_UNKNOWN\x10\x00\x12\x15\n\x11RETENTION_RUNTIME\x10\x01\x12\x14\n\x10RETENTION_SOURCE\x10\x02\"\x8c\x02\n\x10OptionTargetType\x12\x17\n\x13TARGET_TYPE_UNKNOWN\x10\x00\x12\x14\n\x10TARGET_TYPE_FILE\x10\x01\x12\x1f\n\x1bTARGET_TYPE_EXTENSION_RANGE\x10\x02\x12\x17\n\x13TARGET_TYPE_MESSAGE\x10\x03\x12\x15\n\x11TARGET_TYPE_FIELD\x10\x04\x12\x15\n\x11TARGET_TYPE_ONEOF\x10\x05\x12\x14\n\x10TARGET_TYPE_ENUM\x10\x06\x12\x1a\n\x16TARGET_TYPE_ENUM_ENTRY\x10\x07\x12\x17\n\x13TARGET_TYPE_SERVICE\x10\x08\x12\x16\n\x12TARGET_TYPE_METHOD\x10\t*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08\x04\x10\x05\"s\n\x0cOneofOptions\x12X\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\x98\x02\n\x0b\x45numOptions\x12\x1f\n\x0b\x61llow_alias\x18\x02 \x01(\x08R\nallowAlias\x12%\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lseR\ndeprecated\x12V\n&deprecated_legacy_json_field_conflicts\x18\x06 \x01(\x08\x42\x02\x18\x01R\"deprecatedLegacyJsonFieldConflicts\x12X\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08\x05\x10\x06\"\x9e\x01\n\x10\x45numValueOptions\x12%\n\ndeprecated\x18\x01 \x01(\x08:\x05\x66\x61lseR\ndeprecated\x12X\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\x9c\x01\n\x0eServiceOptions\x12%\n\ndeprecated\x18! \x01(\x08:\x05\x66\x61lseR\ndeprecated\x12X\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\xe0\x02\n\rMethodOptions\x12%\n\ndeprecated\x18! \x01(\x08:\x05\x66\x61lseR\ndeprecated\x12q\n\x11idempotency_level\x18\" \x01(\x0e\x32/.google.protobuf.MethodOptions.IdempotencyLevel:\x13IDEMPOTENCY_UNKNOWNR\x10idempotencyLevel\x12X\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\"P\n\x10IdempotencyLevel\x12\x17\n\x13IDEMPOTENCY_UNKNOWN\x10\x00\x12\x13\n\x0fNO_SIDE_EFFECTS\x10\x01\x12\x0e\n\nIDEMPOTENT\x10\x02*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\x9a\x03\n\x13UninterpretedOption\x12\x41\n\x04name\x18\x02 \x03(\x0b\x32-.google.protobuf.UninterpretedOption.NamePartR\x04name\x12)\n\x10identifier_value\x18\x03 \x01(\tR\x0fidentifierValue\x12,\n\x12positive_int_value\x18\x04 \x01(\x04R\x10positiveIntValue\x12,\n\x12negative_int_value\x18\x05 \x01(\x03R\x10negativeIntValue\x12!\n\x0c\x64ouble_value\x18\x06 \x01(\x01R\x0b\x64oubleValue\x12!\n\x0cstring_value\x18\x07 \x01(\x0cR\x0bstringValue\x12\'\n\x0f\x61ggregate_value\x18\x08 \x01(\tR\x0e\x61ggregateValue\x1aJ\n\x08NamePart\x12\x1b\n\tname_part\x18\x01 \x02(\tR\x08namePart\x12!\n\x0cis_extension\x18\x02 \x02(\x08R\x0bisExtension\"\xa7\x02\n\x0eSourceCodeInfo\x12\x44\n\x08location\x18\x01 \x03(\x0b\x32(.google.protobuf.SourceCodeInfo.LocationR\x08location\x1a\xce\x01\n\x08Location\x12\x16\n\x04path\x18\x01 \x03(\x05\x42\x02\x10\x01R\x04path\x12\x16\n\x04span\x18\x02 \x03(\x05\x42\x02\x10\x01R\x04span\x12)\n\x10leading_comments\x18\x03 \x01(\tR\x0fleadingComments\x12+\n\x11trailing_comments\x18\x04 \x01(\tR\x10trailingComments\x12:\n\x19leading_detached_comments\x18\x06 \x03(\tR\x17leadingDetachedComments\"\xd0\x02\n\x11GeneratedCodeInfo\x12M\n\nannotation\x18\x01 \x03(\x0b\x32-.google.protobuf.GeneratedCodeInfo.AnnotationR\nannotation\x1a\xeb\x01\n\nAnnotation\x12\x16\n\x04path\x18\x01 \x03(\x05\x42\x02\x10\x01R\x04path\x12\x1f\n\x0bsource_file\x18\x02 \x01(\tR\nsourceFile\x12\x14\n\x05\x62\x65gin\x18\x03 \x01(\x05R\x05\x62\x65gin\x12\x10\n\x03\x65nd\x18\x04 \x01(\x05R\x03\x65nd\x12R\n\x08semantic\x18\x05 \x01(\x0e\x32\x36.google.protobuf.GeneratedCodeInfo.Annotation.SemanticR\x08semantic\"(\n\x08Semantic\x12\x08\n\x04NONE\x10\x00\x12\x07\n\x03SET\x10\x01\x12\t\n\x05\x41LIAS\x10\x02\x42~\n\x13\x63om.google.protobufB\x10\x44\x65scriptorProtosH\x01Z-google.golang.org/protobuf/types/descriptorpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1aGoogle.Protobuf.Reflection') - -_globals = globals() -if _descriptor._USE_C_DESCRIPTORS == False: - _EXTENSIONRANGEOPTIONS_VERIFICATIONSTATE = _descriptor.EnumDescriptor( - name='VerificationState', - full_name='google.protobuf.ExtensionRangeOptions.VerificationState', - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name='DECLARATION', index=0, number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='UNVERIFIED', index=1, number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - ], - containing_type=None, - serialized_options=None, - ) - _sym_db.RegisterEnumDescriptor(_EXTENSIONRANGEOPTIONS_VERIFICATIONSTATE) - - _FIELDDESCRIPTORPROTO_TYPE = _descriptor.EnumDescriptor( - name='Type', - full_name='google.protobuf.FieldDescriptorProto.Type', - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name='TYPE_DOUBLE', index=0, number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='TYPE_FLOAT', index=1, number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='TYPE_INT64', index=2, number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='TYPE_UINT64', index=3, number=4, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='TYPE_INT32', index=4, number=5, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='TYPE_FIXED64', index=5, number=6, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='TYPE_FIXED32', index=6, number=7, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='TYPE_BOOL', index=7, number=8, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='TYPE_STRING', index=8, number=9, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='TYPE_GROUP', index=9, number=10, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='TYPE_MESSAGE', index=10, number=11, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='TYPE_BYTES', index=11, number=12, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='TYPE_UINT32', index=12, number=13, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='TYPE_ENUM', index=13, number=14, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='TYPE_SFIXED32', index=14, number=15, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='TYPE_SFIXED64', index=15, number=16, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='TYPE_SINT32', index=16, number=17, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='TYPE_SINT64', index=17, number=18, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - ], - containing_type=None, - serialized_options=None, - ) - _sym_db.RegisterEnumDescriptor(_FIELDDESCRIPTORPROTO_TYPE) - - _FIELDDESCRIPTORPROTO_LABEL = _descriptor.EnumDescriptor( - name='Label', - full_name='google.protobuf.FieldDescriptorProto.Label', - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name='LABEL_OPTIONAL', index=0, number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='LABEL_REQUIRED', index=1, number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='LABEL_REPEATED', index=2, number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - ], - containing_type=None, - serialized_options=None, - ) - _sym_db.RegisterEnumDescriptor(_FIELDDESCRIPTORPROTO_LABEL) - - _FILEOPTIONS_OPTIMIZEMODE = _descriptor.EnumDescriptor( - name='OptimizeMode', - full_name='google.protobuf.FileOptions.OptimizeMode', - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name='SPEED', index=0, number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='CODE_SIZE', index=1, number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='LITE_RUNTIME', index=2, number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - ], - containing_type=None, - serialized_options=None, - ) - _sym_db.RegisterEnumDescriptor(_FILEOPTIONS_OPTIMIZEMODE) - - _FIELDOPTIONS_CTYPE = _descriptor.EnumDescriptor( - name='CType', - full_name='google.protobuf.FieldOptions.CType', - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name='STRING', index=0, number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='CORD', index=1, number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='STRING_PIECE', index=2, number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - ], - containing_type=None, - serialized_options=None, - ) - _sym_db.RegisterEnumDescriptor(_FIELDOPTIONS_CTYPE) - - _FIELDOPTIONS_JSTYPE = _descriptor.EnumDescriptor( - name='JSType', - full_name='google.protobuf.FieldOptions.JSType', - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name='JS_NORMAL', index=0, number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='JS_STRING', index=1, number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='JS_NUMBER', index=2, number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - ], - containing_type=None, - serialized_options=None, - ) - _sym_db.RegisterEnumDescriptor(_FIELDOPTIONS_JSTYPE) - - _FIELDOPTIONS_OPTIONRETENTION = _descriptor.EnumDescriptor( - name='OptionRetention', - full_name='google.protobuf.FieldOptions.OptionRetention', - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name='RETENTION_UNKNOWN', index=0, number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='RETENTION_RUNTIME', index=1, number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='RETENTION_SOURCE', index=2, number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - ], - containing_type=None, - serialized_options=None, - ) - _sym_db.RegisterEnumDescriptor(_FIELDOPTIONS_OPTIONRETENTION) - - _FIELDOPTIONS_OPTIONTARGETTYPE = _descriptor.EnumDescriptor( - name='OptionTargetType', - full_name='google.protobuf.FieldOptions.OptionTargetType', - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name='TARGET_TYPE_UNKNOWN', index=0, number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='TARGET_TYPE_FILE', index=1, number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='TARGET_TYPE_EXTENSION_RANGE', index=2, number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='TARGET_TYPE_MESSAGE', index=3, number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='TARGET_TYPE_FIELD', index=4, number=4, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='TARGET_TYPE_ONEOF', index=5, number=5, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='TARGET_TYPE_ENUM', index=6, number=6, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='TARGET_TYPE_ENUM_ENTRY', index=7, number=7, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='TARGET_TYPE_SERVICE', index=8, number=8, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='TARGET_TYPE_METHOD', index=9, number=9, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - ], - containing_type=None, - serialized_options=None, - ) - _sym_db.RegisterEnumDescriptor(_FIELDOPTIONS_OPTIONTARGETTYPE) - - _METHODOPTIONS_IDEMPOTENCYLEVEL = _descriptor.EnumDescriptor( - name='IdempotencyLevel', - full_name='google.protobuf.MethodOptions.IdempotencyLevel', - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name='IDEMPOTENCY_UNKNOWN', index=0, number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='NO_SIDE_EFFECTS', index=1, number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='IDEMPOTENT', index=2, number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - ], - containing_type=None, - serialized_options=None, - ) - _sym_db.RegisterEnumDescriptor(_METHODOPTIONS_IDEMPOTENCYLEVEL) - - _GENERATEDCODEINFO_ANNOTATION_SEMANTIC = _descriptor.EnumDescriptor( - name='Semantic', - full_name='google.protobuf.GeneratedCodeInfo.Annotation.Semantic', - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name='NONE', index=0, number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='SET', index=1, number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='ALIAS', index=2, number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - ], - containing_type=None, - serialized_options=None, - ) - _sym_db.RegisterEnumDescriptor(_GENERATEDCODEINFO_ANNOTATION_SEMANTIC) - - - _FILEDESCRIPTORSET = _descriptor.Descriptor( - name='FileDescriptorSet', - full_name='google.protobuf.FileDescriptorSet', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='file', full_name='google.protobuf.FileDescriptorSet.file', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='file', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - ) - - - _FILEDESCRIPTORPROTO = _descriptor.Descriptor( - name='FileDescriptorProto', - full_name='google.protobuf.FileDescriptorProto', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.protobuf.FileDescriptorProto.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='name', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='package', full_name='google.protobuf.FileDescriptorProto.package', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='package', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='dependency', full_name='google.protobuf.FileDescriptorProto.dependency', index=2, - number=3, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='dependency', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='public_dependency', full_name='google.protobuf.FileDescriptorProto.public_dependency', index=3, - number=10, type=5, cpp_type=1, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='publicDependency', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='weak_dependency', full_name='google.protobuf.FileDescriptorProto.weak_dependency', index=4, - number=11, type=5, cpp_type=1, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='weakDependency', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='message_type', full_name='google.protobuf.FileDescriptorProto.message_type', index=5, - number=4, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='messageType', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='enum_type', full_name='google.protobuf.FileDescriptorProto.enum_type', index=6, - number=5, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='enumType', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='service', full_name='google.protobuf.FileDescriptorProto.service', index=7, - number=6, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='service', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='extension', full_name='google.protobuf.FileDescriptorProto.extension', index=8, - number=7, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='extension', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='options', full_name='google.protobuf.FileDescriptorProto.options', index=9, - number=8, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='options', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='source_code_info', full_name='google.protobuf.FileDescriptorProto.source_code_info', index=10, - number=9, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='sourceCodeInfo', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='syntax', full_name='google.protobuf.FileDescriptorProto.syntax', index=11, - number=12, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='syntax', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='edition', full_name='google.protobuf.FileDescriptorProto.edition', index=12, - number=13, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='edition', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - ) - - - _DESCRIPTORPROTO_EXTENSIONRANGE = _descriptor.Descriptor( - name='ExtensionRange', - full_name='google.protobuf.DescriptorProto.ExtensionRange', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='start', full_name='google.protobuf.DescriptorProto.ExtensionRange.start', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='start', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='end', full_name='google.protobuf.DescriptorProto.ExtensionRange.end', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='end', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='options', full_name='google.protobuf.DescriptorProto.ExtensionRange.options', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='options', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - ) - - _DESCRIPTORPROTO_RESERVEDRANGE = _descriptor.Descriptor( - name='ReservedRange', - full_name='google.protobuf.DescriptorProto.ReservedRange', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='start', full_name='google.protobuf.DescriptorProto.ReservedRange.start', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='start', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='end', full_name='google.protobuf.DescriptorProto.ReservedRange.end', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='end', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - ) - - _DESCRIPTORPROTO = _descriptor.Descriptor( - name='DescriptorProto', - full_name='google.protobuf.DescriptorProto', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.protobuf.DescriptorProto.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='name', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='field', full_name='google.protobuf.DescriptorProto.field', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='field', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='extension', full_name='google.protobuf.DescriptorProto.extension', index=2, - number=6, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='extension', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='nested_type', full_name='google.protobuf.DescriptorProto.nested_type', index=3, - number=3, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='nestedType', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='enum_type', full_name='google.protobuf.DescriptorProto.enum_type', index=4, - number=4, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='enumType', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='extension_range', full_name='google.protobuf.DescriptorProto.extension_range', index=5, - number=5, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='extensionRange', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='oneof_decl', full_name='google.protobuf.DescriptorProto.oneof_decl', index=6, - number=8, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='oneofDecl', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='options', full_name='google.protobuf.DescriptorProto.options', index=7, - number=7, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='options', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='reserved_range', full_name='google.protobuf.DescriptorProto.reserved_range', index=8, - number=9, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='reservedRange', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='reserved_name', full_name='google.protobuf.DescriptorProto.reserved_name', index=9, - number=10, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='reservedName', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[_DESCRIPTORPROTO_EXTENSIONRANGE, _DESCRIPTORPROTO_RESERVEDRANGE, ], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - ) - - - _EXTENSIONRANGEOPTIONS_DECLARATION = _descriptor.Descriptor( - name='Declaration', - full_name='google.protobuf.ExtensionRangeOptions.Declaration', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='number', full_name='google.protobuf.ExtensionRangeOptions.Declaration.number', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='number', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='full_name', full_name='google.protobuf.ExtensionRangeOptions.Declaration.full_name', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='fullName', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='type', full_name='google.protobuf.ExtensionRangeOptions.Declaration.type', index=2, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='type', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='is_repeated', full_name='google.protobuf.ExtensionRangeOptions.Declaration.is_repeated', index=3, - number=4, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='isRepeated', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='reserved', full_name='google.protobuf.ExtensionRangeOptions.Declaration.reserved', index=4, - number=5, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='reserved', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='repeated', full_name='google.protobuf.ExtensionRangeOptions.Declaration.repeated', index=5, - number=6, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='repeated', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - ) - - _EXTENSIONRANGEOPTIONS = _descriptor.Descriptor( - name='ExtensionRangeOptions', - full_name='google.protobuf.ExtensionRangeOptions', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='uninterpreted_option', full_name='google.protobuf.ExtensionRangeOptions.uninterpreted_option', index=0, - number=999, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='uninterpretedOption', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='declaration', full_name='google.protobuf.ExtensionRangeOptions.declaration', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='declaration', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='verification', full_name='google.protobuf.ExtensionRangeOptions.verification', index=2, - number=3, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='verification', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[_EXTENSIONRANGEOPTIONS_DECLARATION, ], - enum_types=[ - _EXTENSIONRANGEOPTIONS_VERIFICATIONSTATE, - ], - serialized_options=None, - is_extendable=True, - syntax='proto2', - extension_ranges=[(1000, 536870912), ], - oneofs=[ - ], - ) - - - _FIELDDESCRIPTORPROTO = _descriptor.Descriptor( - name='FieldDescriptorProto', - full_name='google.protobuf.FieldDescriptorProto', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.protobuf.FieldDescriptorProto.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='name', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='number', full_name='google.protobuf.FieldDescriptorProto.number', index=1, - number=3, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='number', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='label', full_name='google.protobuf.FieldDescriptorProto.label', index=2, - number=4, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='label', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='type', full_name='google.protobuf.FieldDescriptorProto.type', index=3, - number=5, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='type', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='type_name', full_name='google.protobuf.FieldDescriptorProto.type_name', index=4, - number=6, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='typeName', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='extendee', full_name='google.protobuf.FieldDescriptorProto.extendee', index=5, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='extendee', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='default_value', full_name='google.protobuf.FieldDescriptorProto.default_value', index=6, - number=7, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='defaultValue', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='oneof_index', full_name='google.protobuf.FieldDescriptorProto.oneof_index', index=7, - number=9, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='oneofIndex', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='json_name', full_name='google.protobuf.FieldDescriptorProto.json_name', index=8, - number=10, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='jsonName', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='options', full_name='google.protobuf.FieldDescriptorProto.options', index=9, - number=8, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='options', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='proto3_optional', full_name='google.protobuf.FieldDescriptorProto.proto3_optional', index=10, - number=17, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='proto3Optional', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _FIELDDESCRIPTORPROTO_TYPE, - _FIELDDESCRIPTORPROTO_LABEL, - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - ) - - - _ONEOFDESCRIPTORPROTO = _descriptor.Descriptor( - name='OneofDescriptorProto', - full_name='google.protobuf.OneofDescriptorProto', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.protobuf.OneofDescriptorProto.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='name', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='options', full_name='google.protobuf.OneofDescriptorProto.options', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='options', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - ) - - - _ENUMDESCRIPTORPROTO_ENUMRESERVEDRANGE = _descriptor.Descriptor( - name='EnumReservedRange', - full_name='google.protobuf.EnumDescriptorProto.EnumReservedRange', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='start', full_name='google.protobuf.EnumDescriptorProto.EnumReservedRange.start', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='start', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='end', full_name='google.protobuf.EnumDescriptorProto.EnumReservedRange.end', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='end', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - ) - - _ENUMDESCRIPTORPROTO = _descriptor.Descriptor( - name='EnumDescriptorProto', - full_name='google.protobuf.EnumDescriptorProto', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.protobuf.EnumDescriptorProto.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='name', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='value', full_name='google.protobuf.EnumDescriptorProto.value', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='value', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='options', full_name='google.protobuf.EnumDescriptorProto.options', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='options', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='reserved_range', full_name='google.protobuf.EnumDescriptorProto.reserved_range', index=3, - number=4, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='reservedRange', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='reserved_name', full_name='google.protobuf.EnumDescriptorProto.reserved_name', index=4, - number=5, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='reservedName', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[_ENUMDESCRIPTORPROTO_ENUMRESERVEDRANGE, ], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - ) - - - _ENUMVALUEDESCRIPTORPROTO = _descriptor.Descriptor( - name='EnumValueDescriptorProto', - full_name='google.protobuf.EnumValueDescriptorProto', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.protobuf.EnumValueDescriptorProto.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='name', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='number', full_name='google.protobuf.EnumValueDescriptorProto.number', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='number', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='options', full_name='google.protobuf.EnumValueDescriptorProto.options', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='options', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - ) - - - _SERVICEDESCRIPTORPROTO = _descriptor.Descriptor( - name='ServiceDescriptorProto', - full_name='google.protobuf.ServiceDescriptorProto', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.protobuf.ServiceDescriptorProto.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='name', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='method', full_name='google.protobuf.ServiceDescriptorProto.method', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='method', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='options', full_name='google.protobuf.ServiceDescriptorProto.options', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='options', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - ) - - - _METHODDESCRIPTORPROTO = _descriptor.Descriptor( - name='MethodDescriptorProto', - full_name='google.protobuf.MethodDescriptorProto', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.protobuf.MethodDescriptorProto.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='name', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='input_type', full_name='google.protobuf.MethodDescriptorProto.input_type', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='inputType', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='output_type', full_name='google.protobuf.MethodDescriptorProto.output_type', index=2, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='outputType', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='options', full_name='google.protobuf.MethodDescriptorProto.options', index=3, - number=4, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='options', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='client_streaming', full_name='google.protobuf.MethodDescriptorProto.client_streaming', index=4, - number=5, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='clientStreaming', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='server_streaming', full_name='google.protobuf.MethodDescriptorProto.server_streaming', index=5, - number=6, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='serverStreaming', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - ) - - - _FILEOPTIONS = _descriptor.Descriptor( - name='FileOptions', - full_name='google.protobuf.FileOptions', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='java_package', full_name='google.protobuf.FileOptions.java_package', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='javaPackage', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='java_outer_classname', full_name='google.protobuf.FileOptions.java_outer_classname', index=1, - number=8, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='javaOuterClassname', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='java_multiple_files', full_name='google.protobuf.FileOptions.java_multiple_files', index=2, - number=10, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='javaMultipleFiles', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='java_generate_equals_and_hash', full_name='google.protobuf.FileOptions.java_generate_equals_and_hash', index=3, - number=20, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='javaGenerateEqualsAndHash', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='java_string_check_utf8', full_name='google.protobuf.FileOptions.java_string_check_utf8', index=4, - number=27, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='javaStringCheckUtf8', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='optimize_for', full_name='google.protobuf.FileOptions.optimize_for', index=5, - number=9, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='optimizeFor', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='go_package', full_name='google.protobuf.FileOptions.go_package', index=6, - number=11, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='goPackage', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='cc_generic_services', full_name='google.protobuf.FileOptions.cc_generic_services', index=7, - number=16, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='ccGenericServices', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='java_generic_services', full_name='google.protobuf.FileOptions.java_generic_services', index=8, - number=17, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='javaGenericServices', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='py_generic_services', full_name='google.protobuf.FileOptions.py_generic_services', index=9, - number=18, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='pyGenericServices', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='php_generic_services', full_name='google.protobuf.FileOptions.php_generic_services', index=10, - number=42, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='phpGenericServices', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='deprecated', full_name='google.protobuf.FileOptions.deprecated', index=11, - number=23, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='deprecated', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='cc_enable_arenas', full_name='google.protobuf.FileOptions.cc_enable_arenas', index=12, - number=31, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=True, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='ccEnableArenas', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='objc_class_prefix', full_name='google.protobuf.FileOptions.objc_class_prefix', index=13, - number=36, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='objcClassPrefix', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='csharp_namespace', full_name='google.protobuf.FileOptions.csharp_namespace', index=14, - number=37, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='csharpNamespace', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='swift_prefix', full_name='google.protobuf.FileOptions.swift_prefix', index=15, - number=39, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='swiftPrefix', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='php_class_prefix', full_name='google.protobuf.FileOptions.php_class_prefix', index=16, - number=40, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='phpClassPrefix', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='php_namespace', full_name='google.protobuf.FileOptions.php_namespace', index=17, - number=41, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='phpNamespace', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='php_metadata_namespace', full_name='google.protobuf.FileOptions.php_metadata_namespace', index=18, - number=44, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='phpMetadataNamespace', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='ruby_package', full_name='google.protobuf.FileOptions.ruby_package', index=19, - number=45, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='rubyPackage', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='uninterpreted_option', full_name='google.protobuf.FileOptions.uninterpreted_option', index=20, - number=999, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='uninterpretedOption', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _FILEOPTIONS_OPTIMIZEMODE, - ], - serialized_options=None, - is_extendable=True, - syntax='proto2', - extension_ranges=[(1000, 536870912), ], - oneofs=[ - ], - ) - - - _MESSAGEOPTIONS = _descriptor.Descriptor( - name='MessageOptions', - full_name='google.protobuf.MessageOptions', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='message_set_wire_format', full_name='google.protobuf.MessageOptions.message_set_wire_format', index=0, - number=1, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='messageSetWireFormat', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='no_standard_descriptor_accessor', full_name='google.protobuf.MessageOptions.no_standard_descriptor_accessor', index=1, - number=2, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='noStandardDescriptorAccessor', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='deprecated', full_name='google.protobuf.MessageOptions.deprecated', index=2, - number=3, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='deprecated', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='map_entry', full_name='google.protobuf.MessageOptions.map_entry', index=3, - number=7, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='mapEntry', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='deprecated_legacy_json_field_conflicts', full_name='google.protobuf.MessageOptions.deprecated_legacy_json_field_conflicts', index=4, - number=11, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='deprecatedLegacyJsonFieldConflicts', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='uninterpreted_option', full_name='google.protobuf.MessageOptions.uninterpreted_option', index=5, - number=999, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='uninterpretedOption', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=True, - syntax='proto2', - extension_ranges=[(1000, 536870912), ], - oneofs=[ - ], - ) - - - _FIELDOPTIONS = _descriptor.Descriptor( - name='FieldOptions', - full_name='google.protobuf.FieldOptions', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='ctype', full_name='google.protobuf.FieldOptions.ctype', index=0, - number=1, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='ctype', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='packed', full_name='google.protobuf.FieldOptions.packed', index=1, - number=2, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='packed', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='jstype', full_name='google.protobuf.FieldOptions.jstype', index=2, - number=6, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='jstype', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='lazy', full_name='google.protobuf.FieldOptions.lazy', index=3, - number=5, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='lazy', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='unverified_lazy', full_name='google.protobuf.FieldOptions.unverified_lazy', index=4, - number=15, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='unverifiedLazy', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='deprecated', full_name='google.protobuf.FieldOptions.deprecated', index=5, - number=3, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='deprecated', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='weak', full_name='google.protobuf.FieldOptions.weak', index=6, - number=10, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='weak', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='debug_redact', full_name='google.protobuf.FieldOptions.debug_redact', index=7, - number=16, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='debugRedact', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='retention', full_name='google.protobuf.FieldOptions.retention', index=8, - number=17, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='retention', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='target', full_name='google.protobuf.FieldOptions.target', index=9, - number=18, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='target', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='targets', full_name='google.protobuf.FieldOptions.targets', index=10, - number=19, type=14, cpp_type=8, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='targets', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='uninterpreted_option', full_name='google.protobuf.FieldOptions.uninterpreted_option', index=11, - number=999, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='uninterpretedOption', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _FIELDOPTIONS_CTYPE, - _FIELDOPTIONS_JSTYPE, - _FIELDOPTIONS_OPTIONRETENTION, - _FIELDOPTIONS_OPTIONTARGETTYPE, - ], - serialized_options=None, - is_extendable=True, - syntax='proto2', - extension_ranges=[(1000, 536870912), ], - oneofs=[ - ], - ) - - - _ONEOFOPTIONS = _descriptor.Descriptor( - name='OneofOptions', - full_name='google.protobuf.OneofOptions', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='uninterpreted_option', full_name='google.protobuf.OneofOptions.uninterpreted_option', index=0, - number=999, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='uninterpretedOption', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=True, - syntax='proto2', - extension_ranges=[(1000, 536870912), ], - oneofs=[ - ], - ) - - - _ENUMOPTIONS = _descriptor.Descriptor( - name='EnumOptions', - full_name='google.protobuf.EnumOptions', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='allow_alias', full_name='google.protobuf.EnumOptions.allow_alias', index=0, - number=2, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='allowAlias', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='deprecated', full_name='google.protobuf.EnumOptions.deprecated', index=1, - number=3, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='deprecated', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='deprecated_legacy_json_field_conflicts', full_name='google.protobuf.EnumOptions.deprecated_legacy_json_field_conflicts', index=2, - number=6, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='deprecatedLegacyJsonFieldConflicts', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='uninterpreted_option', full_name='google.protobuf.EnumOptions.uninterpreted_option', index=3, - number=999, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='uninterpretedOption', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=True, - syntax='proto2', - extension_ranges=[(1000, 536870912), ], - oneofs=[ - ], - ) - - - _ENUMVALUEOPTIONS = _descriptor.Descriptor( - name='EnumValueOptions', - full_name='google.protobuf.EnumValueOptions', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='deprecated', full_name='google.protobuf.EnumValueOptions.deprecated', index=0, - number=1, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='deprecated', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='uninterpreted_option', full_name='google.protobuf.EnumValueOptions.uninterpreted_option', index=1, - number=999, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='uninterpretedOption', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=True, - syntax='proto2', - extension_ranges=[(1000, 536870912), ], - oneofs=[ - ], - ) - - - _SERVICEOPTIONS = _descriptor.Descriptor( - name='ServiceOptions', - full_name='google.protobuf.ServiceOptions', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='deprecated', full_name='google.protobuf.ServiceOptions.deprecated', index=0, - number=33, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='deprecated', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='uninterpreted_option', full_name='google.protobuf.ServiceOptions.uninterpreted_option', index=1, - number=999, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='uninterpretedOption', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=True, - syntax='proto2', - extension_ranges=[(1000, 536870912), ], - oneofs=[ - ], - ) - - - _METHODOPTIONS = _descriptor.Descriptor( - name='MethodOptions', - full_name='google.protobuf.MethodOptions', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='deprecated', full_name='google.protobuf.MethodOptions.deprecated', index=0, - number=33, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='deprecated', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='idempotency_level', full_name='google.protobuf.MethodOptions.idempotency_level', index=1, - number=34, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='idempotencyLevel', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='uninterpreted_option', full_name='google.protobuf.MethodOptions.uninterpreted_option', index=2, - number=999, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='uninterpretedOption', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _METHODOPTIONS_IDEMPOTENCYLEVEL, - ], - serialized_options=None, - is_extendable=True, - syntax='proto2', - extension_ranges=[(1000, 536870912), ], - oneofs=[ - ], - ) - - - _UNINTERPRETEDOPTION_NAMEPART = _descriptor.Descriptor( - name='NamePart', - full_name='google.protobuf.UninterpretedOption.NamePart', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='name_part', full_name='google.protobuf.UninterpretedOption.NamePart.name_part', index=0, - number=1, type=9, cpp_type=9, label=2, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='namePart', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='is_extension', full_name='google.protobuf.UninterpretedOption.NamePart.is_extension', index=1, - number=2, type=8, cpp_type=7, label=2, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='isExtension', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - ) - - _UNINTERPRETEDOPTION = _descriptor.Descriptor( - name='UninterpretedOption', - full_name='google.protobuf.UninterpretedOption', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.protobuf.UninterpretedOption.name', index=0, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='name', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='identifier_value', full_name='google.protobuf.UninterpretedOption.identifier_value', index=1, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='identifierValue', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='positive_int_value', full_name='google.protobuf.UninterpretedOption.positive_int_value', index=2, - number=4, type=4, cpp_type=4, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='positiveIntValue', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='negative_int_value', full_name='google.protobuf.UninterpretedOption.negative_int_value', index=3, - number=5, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='negativeIntValue', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='double_value', full_name='google.protobuf.UninterpretedOption.double_value', index=4, - number=6, type=1, cpp_type=5, label=1, - has_default_value=False, default_value=float(0), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='doubleValue', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='string_value', full_name='google.protobuf.UninterpretedOption.string_value', index=5, - number=7, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='stringValue', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='aggregate_value', full_name='google.protobuf.UninterpretedOption.aggregate_value', index=6, - number=8, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='aggregateValue', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[_UNINTERPRETEDOPTION_NAMEPART, ], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - ) - - - _SOURCECODEINFO_LOCATION = _descriptor.Descriptor( - name='Location', - full_name='google.protobuf.SourceCodeInfo.Location', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='path', full_name='google.protobuf.SourceCodeInfo.Location.path', index=0, - number=1, type=5, cpp_type=1, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='path', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='span', full_name='google.protobuf.SourceCodeInfo.Location.span', index=1, - number=2, type=5, cpp_type=1, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='span', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='leading_comments', full_name='google.protobuf.SourceCodeInfo.Location.leading_comments', index=2, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='leadingComments', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='trailing_comments', full_name='google.protobuf.SourceCodeInfo.Location.trailing_comments', index=3, - number=4, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='trailingComments', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='leading_detached_comments', full_name='google.protobuf.SourceCodeInfo.Location.leading_detached_comments', index=4, - number=6, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='leadingDetachedComments', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - ) - - _SOURCECODEINFO = _descriptor.Descriptor( - name='SourceCodeInfo', - full_name='google.protobuf.SourceCodeInfo', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='location', full_name='google.protobuf.SourceCodeInfo.location', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='location', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[_SOURCECODEINFO_LOCATION, ], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - ) - - - _GENERATEDCODEINFO_ANNOTATION = _descriptor.Descriptor( - name='Annotation', - full_name='google.protobuf.GeneratedCodeInfo.Annotation', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='path', full_name='google.protobuf.GeneratedCodeInfo.Annotation.path', index=0, - number=1, type=5, cpp_type=1, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='path', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='source_file', full_name='google.protobuf.GeneratedCodeInfo.Annotation.source_file', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='sourceFile', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='begin', full_name='google.protobuf.GeneratedCodeInfo.Annotation.begin', index=2, - number=3, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='begin', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='end', full_name='google.protobuf.GeneratedCodeInfo.Annotation.end', index=3, - number=4, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='end', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='semantic', full_name='google.protobuf.GeneratedCodeInfo.Annotation.semantic', index=4, - number=5, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='semantic', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _GENERATEDCODEINFO_ANNOTATION_SEMANTIC, - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - ) - - _GENERATEDCODEINFO = _descriptor.Descriptor( - name='GeneratedCodeInfo', - full_name='google.protobuf.GeneratedCodeInfo', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='annotation', full_name='google.protobuf.GeneratedCodeInfo.annotation', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, json_name='annotation', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[_GENERATEDCODEINFO_ANNOTATION, ], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - ) - - _FILEDESCRIPTORSET.fields_by_name['file'].message_type = _FILEDESCRIPTORPROTO - _FILEDESCRIPTORPROTO.fields_by_name['message_type'].message_type = _DESCRIPTORPROTO - _FILEDESCRIPTORPROTO.fields_by_name['enum_type'].message_type = _ENUMDESCRIPTORPROTO - _FILEDESCRIPTORPROTO.fields_by_name['service'].message_type = _SERVICEDESCRIPTORPROTO - _FILEDESCRIPTORPROTO.fields_by_name['extension'].message_type = _FIELDDESCRIPTORPROTO - _FILEDESCRIPTORPROTO.fields_by_name['options'].message_type = _FILEOPTIONS - _FILEDESCRIPTORPROTO.fields_by_name['source_code_info'].message_type = _SOURCECODEINFO - _DESCRIPTORPROTO_EXTENSIONRANGE.fields_by_name['options'].message_type = _EXTENSIONRANGEOPTIONS - _DESCRIPTORPROTO_EXTENSIONRANGE.containing_type = _DESCRIPTORPROTO - _DESCRIPTORPROTO_RESERVEDRANGE.containing_type = _DESCRIPTORPROTO - _DESCRIPTORPROTO.fields_by_name['field'].message_type = _FIELDDESCRIPTORPROTO - _DESCRIPTORPROTO.fields_by_name['extension'].message_type = _FIELDDESCRIPTORPROTO - _DESCRIPTORPROTO.fields_by_name['nested_type'].message_type = _DESCRIPTORPROTO - _DESCRIPTORPROTO.fields_by_name['enum_type'].message_type = _ENUMDESCRIPTORPROTO - _DESCRIPTORPROTO.fields_by_name['extension_range'].message_type = _DESCRIPTORPROTO_EXTENSIONRANGE - _DESCRIPTORPROTO.fields_by_name['oneof_decl'].message_type = _ONEOFDESCRIPTORPROTO - _DESCRIPTORPROTO.fields_by_name['options'].message_type = _MESSAGEOPTIONS - _DESCRIPTORPROTO.fields_by_name['reserved_range'].message_type = _DESCRIPTORPROTO_RESERVEDRANGE - _EXTENSIONRANGEOPTIONS_DECLARATION.containing_type = _EXTENSIONRANGEOPTIONS - _EXTENSIONRANGEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION - _EXTENSIONRANGEOPTIONS.fields_by_name['declaration'].message_type = _EXTENSIONRANGEOPTIONS_DECLARATION - _EXTENSIONRANGEOPTIONS.fields_by_name['verification'].enum_type = _EXTENSIONRANGEOPTIONS_VERIFICATIONSTATE - _EXTENSIONRANGEOPTIONS_VERIFICATIONSTATE.containing_type = _EXTENSIONRANGEOPTIONS - _FIELDDESCRIPTORPROTO.fields_by_name['label'].enum_type = _FIELDDESCRIPTORPROTO_LABEL - _FIELDDESCRIPTORPROTO.fields_by_name['type'].enum_type = _FIELDDESCRIPTORPROTO_TYPE - _FIELDDESCRIPTORPROTO.fields_by_name['options'].message_type = _FIELDOPTIONS - _FIELDDESCRIPTORPROTO_TYPE.containing_type = _FIELDDESCRIPTORPROTO - _FIELDDESCRIPTORPROTO_LABEL.containing_type = _FIELDDESCRIPTORPROTO - _ONEOFDESCRIPTORPROTO.fields_by_name['options'].message_type = _ONEOFOPTIONS - _ENUMDESCRIPTORPROTO_ENUMRESERVEDRANGE.containing_type = _ENUMDESCRIPTORPROTO - _ENUMDESCRIPTORPROTO.fields_by_name['value'].message_type = _ENUMVALUEDESCRIPTORPROTO - _ENUMDESCRIPTORPROTO.fields_by_name['options'].message_type = _ENUMOPTIONS - _ENUMDESCRIPTORPROTO.fields_by_name['reserved_range'].message_type = _ENUMDESCRIPTORPROTO_ENUMRESERVEDRANGE - _ENUMVALUEDESCRIPTORPROTO.fields_by_name['options'].message_type = _ENUMVALUEOPTIONS - _SERVICEDESCRIPTORPROTO.fields_by_name['method'].message_type = _METHODDESCRIPTORPROTO - _SERVICEDESCRIPTORPROTO.fields_by_name['options'].message_type = _SERVICEOPTIONS - _METHODDESCRIPTORPROTO.fields_by_name['options'].message_type = _METHODOPTIONS - _FILEOPTIONS.fields_by_name['optimize_for'].enum_type = _FILEOPTIONS_OPTIMIZEMODE - _FILEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION - _FILEOPTIONS_OPTIMIZEMODE.containing_type = _FILEOPTIONS - _MESSAGEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION - _FIELDOPTIONS.fields_by_name['ctype'].enum_type = _FIELDOPTIONS_CTYPE - _FIELDOPTIONS.fields_by_name['jstype'].enum_type = _FIELDOPTIONS_JSTYPE - _FIELDOPTIONS.fields_by_name['retention'].enum_type = _FIELDOPTIONS_OPTIONRETENTION - _FIELDOPTIONS.fields_by_name['target'].enum_type = _FIELDOPTIONS_OPTIONTARGETTYPE - _FIELDOPTIONS.fields_by_name['targets'].enum_type = _FIELDOPTIONS_OPTIONTARGETTYPE - _FIELDOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION - _FIELDOPTIONS_CTYPE.containing_type = _FIELDOPTIONS - _FIELDOPTIONS_JSTYPE.containing_type = _FIELDOPTIONS - _FIELDOPTIONS_OPTIONRETENTION.containing_type = _FIELDOPTIONS - _FIELDOPTIONS_OPTIONTARGETTYPE.containing_type = _FIELDOPTIONS - _ONEOFOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION - _ENUMOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION - _ENUMVALUEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION - _SERVICEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION - _METHODOPTIONS.fields_by_name['idempotency_level'].enum_type = _METHODOPTIONS_IDEMPOTENCYLEVEL - _METHODOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION - _METHODOPTIONS_IDEMPOTENCYLEVEL.containing_type = _METHODOPTIONS - _UNINTERPRETEDOPTION_NAMEPART.containing_type = _UNINTERPRETEDOPTION - _UNINTERPRETEDOPTION.fields_by_name['name'].message_type = _UNINTERPRETEDOPTION_NAMEPART - _SOURCECODEINFO_LOCATION.containing_type = _SOURCECODEINFO - _SOURCECODEINFO.fields_by_name['location'].message_type = _SOURCECODEINFO_LOCATION - _GENERATEDCODEINFO_ANNOTATION.fields_by_name['semantic'].enum_type = _GENERATEDCODEINFO_ANNOTATION_SEMANTIC - _GENERATEDCODEINFO_ANNOTATION.containing_type = _GENERATEDCODEINFO - _GENERATEDCODEINFO_ANNOTATION_SEMANTIC.containing_type = _GENERATEDCODEINFO_ANNOTATION - _GENERATEDCODEINFO.fields_by_name['annotation'].message_type = _GENERATEDCODEINFO_ANNOTATION - DESCRIPTOR.message_types_by_name['FileDescriptorSet'] = _FILEDESCRIPTORSET - DESCRIPTOR.message_types_by_name['FileDescriptorProto'] = _FILEDESCRIPTORPROTO - DESCRIPTOR.message_types_by_name['DescriptorProto'] = _DESCRIPTORPROTO - DESCRIPTOR.message_types_by_name['ExtensionRangeOptions'] = _EXTENSIONRANGEOPTIONS - DESCRIPTOR.message_types_by_name['FieldDescriptorProto'] = _FIELDDESCRIPTORPROTO - DESCRIPTOR.message_types_by_name['OneofDescriptorProto'] = _ONEOFDESCRIPTORPROTO - DESCRIPTOR.message_types_by_name['EnumDescriptorProto'] = _ENUMDESCRIPTORPROTO - DESCRIPTOR.message_types_by_name['EnumValueDescriptorProto'] = _ENUMVALUEDESCRIPTORPROTO - DESCRIPTOR.message_types_by_name['ServiceDescriptorProto'] = _SERVICEDESCRIPTORPROTO - DESCRIPTOR.message_types_by_name['MethodDescriptorProto'] = _METHODDESCRIPTORPROTO - DESCRIPTOR.message_types_by_name['FileOptions'] = _FILEOPTIONS - DESCRIPTOR.message_types_by_name['MessageOptions'] = _MESSAGEOPTIONS - DESCRIPTOR.message_types_by_name['FieldOptions'] = _FIELDOPTIONS - DESCRIPTOR.message_types_by_name['OneofOptions'] = _ONEOFOPTIONS - DESCRIPTOR.message_types_by_name['EnumOptions'] = _ENUMOPTIONS - DESCRIPTOR.message_types_by_name['EnumValueOptions'] = _ENUMVALUEOPTIONS - DESCRIPTOR.message_types_by_name['ServiceOptions'] = _SERVICEOPTIONS - DESCRIPTOR.message_types_by_name['MethodOptions'] = _METHODOPTIONS - DESCRIPTOR.message_types_by_name['UninterpretedOption'] = _UNINTERPRETEDOPTION - DESCRIPTOR.message_types_by_name['SourceCodeInfo'] = _SOURCECODEINFO - DESCRIPTOR.message_types_by_name['GeneratedCodeInfo'] = _GENERATEDCODEINFO - _sym_db.RegisterFileDescriptor(DESCRIPTOR) - -else: - _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.descriptor_pb2', _globals) -if _descriptor._USE_C_DESCRIPTORS == False: - - DESCRIPTOR._options = None - _globals['_FILEDESCRIPTORSET']._serialized_start=53 - _globals['_FILEDESCRIPTORSET']._serialized_end=130 - _globals['_FILEDESCRIPTORPROTO']._serialized_start=133 - _globals['_FILEDESCRIPTORPROTO']._serialized_end=771 - _globals['_DESCRIPTORPROTO']._serialized_start=774 - _globals['_DESCRIPTORPROTO']._serialized_end=1599 - _globals['_DESCRIPTORPROTO_EXTENSIONRANGE']._serialized_start=1420 - _globals['_DESCRIPTORPROTO_EXTENSIONRANGE']._serialized_end=1542 - _globals['_DESCRIPTORPROTO_RESERVEDRANGE']._serialized_start=1544 - _globals['_DESCRIPTORPROTO_RESERVEDRANGE']._serialized_end=1599 - _globals['_EXTENSIONRANGEOPTIONS']._serialized_start=1602 - _globals['_EXTENSIONRANGEOPTIONS']._serialized_end=2159 - _globals['_EXTENSIONRANGEOPTIONS_DECLARATION']._serialized_start=1915 - _globals['_EXTENSIONRANGEOPTIONS_DECLARATION']._serialized_end=2094 - _globals['_EXTENSIONRANGEOPTIONS_VERIFICATIONSTATE']._serialized_start=2096 - _globals['_EXTENSIONRANGEOPTIONS_VERIFICATIONSTATE']._serialized_end=2148 - _globals['_FIELDDESCRIPTORPROTO']._serialized_start=2162 - _globals['_FIELDDESCRIPTORPROTO']._serialized_end=2995 - _globals['_FIELDDESCRIPTORPROTO_TYPE']._serialized_start=2616 - _globals['_FIELDDESCRIPTORPROTO_TYPE']._serialized_end=2926 - _globals['_FIELDDESCRIPTORPROTO_LABEL']._serialized_start=2928 - _globals['_FIELDDESCRIPTORPROTO_LABEL']._serialized_end=2995 - _globals['_ONEOFDESCRIPTORPROTO']._serialized_start=2997 - _globals['_ONEOFDESCRIPTORPROTO']._serialized_end=3096 - _globals['_ENUMDESCRIPTORPROTO']._serialized_start=3099 - _globals['_ENUMDESCRIPTORPROTO']._serialized_end=3454 - _globals['_ENUMDESCRIPTORPROTO_ENUMRESERVEDRANGE']._serialized_start=3395 - _globals['_ENUMDESCRIPTORPROTO_ENUMRESERVEDRANGE']._serialized_end=3454 - _globals['_ENUMVALUEDESCRIPTORPROTO']._serialized_start=3457 - _globals['_ENUMVALUEDESCRIPTORPROTO']._serialized_end=3588 - _globals['_SERVICEDESCRIPTORPROTO']._serialized_start=3591 - _globals['_SERVICEDESCRIPTORPROTO']._serialized_end=3758 - _globals['_METHODDESCRIPTORPROTO']._serialized_start=3761 - _globals['_METHODDESCRIPTORPROTO']._serialized_end=4026 - _globals['_FILEOPTIONS']._serialized_start=4029 - _globals['_FILEOPTIONS']._serialized_end=5198 - _globals['_FILEOPTIONS_OPTIMIZEMODE']._serialized_start=5123 - _globals['_FILEOPTIONS_OPTIMIZEMODE']._serialized_end=5181 - _globals['_MESSAGEOPTIONS']._serialized_start=5201 - _globals['_MESSAGEOPTIONS']._serialized_end=5644 - _globals['_FIELDOPTIONS']._serialized_start=5647 - _globals['_FIELDOPTIONS']._serialized_end=6804 - _globals['_FIELDOPTIONS_CTYPE']._serialized_start=6327 - _globals['_FIELDOPTIONS_CTYPE']._serialized_end=6374 - _globals['_FIELDOPTIONS_JSTYPE']._serialized_start=6376 - _globals['_FIELDOPTIONS_JSTYPE']._serialized_end=6429 - _globals['_FIELDOPTIONS_OPTIONRETENTION']._serialized_start=6431 - _globals['_FIELDOPTIONS_OPTIONRETENTION']._serialized_end=6516 - _globals['_FIELDOPTIONS_OPTIONTARGETTYPE']._serialized_start=6519 - _globals['_FIELDOPTIONS_OPTIONTARGETTYPE']._serialized_end=6787 - _globals['_ONEOFOPTIONS']._serialized_start=6806 - _globals['_ONEOFOPTIONS']._serialized_end=6921 - _globals['_ENUMOPTIONS']._serialized_start=6924 - _globals['_ENUMOPTIONS']._serialized_end=7204 - _globals['_ENUMVALUEOPTIONS']._serialized_start=7207 - _globals['_ENUMVALUEOPTIONS']._serialized_end=7365 - _globals['_SERVICEOPTIONS']._serialized_start=7368 - _globals['_SERVICEOPTIONS']._serialized_end=7524 - _globals['_METHODOPTIONS']._serialized_start=7527 - _globals['_METHODOPTIONS']._serialized_end=7879 - _globals['_METHODOPTIONS_IDEMPOTENCYLEVEL']._serialized_start=7788 - _globals['_METHODOPTIONS_IDEMPOTENCYLEVEL']._serialized_end=7868 - _globals['_UNINTERPRETEDOPTION']._serialized_start=7882 - _globals['_UNINTERPRETEDOPTION']._serialized_end=8292 - _globals['_UNINTERPRETEDOPTION_NAMEPART']._serialized_start=8218 - _globals['_UNINTERPRETEDOPTION_NAMEPART']._serialized_end=8292 - _globals['_SOURCECODEINFO']._serialized_start=8295 - _globals['_SOURCECODEINFO']._serialized_end=8590 - _globals['_SOURCECODEINFO_LOCATION']._serialized_start=8384 - _globals['_SOURCECODEINFO_LOCATION']._serialized_end=8590 - _globals['_GENERATEDCODEINFO']._serialized_start=8593 - _globals['_GENERATEDCODEINFO']._serialized_end=8929 - _globals['_GENERATEDCODEINFO_ANNOTATION']._serialized_start=8694 - _globals['_GENERATEDCODEINFO_ANNOTATION']._serialized_end=8929 - _globals['_GENERATEDCODEINFO_ANNOTATION_SEMANTIC']._serialized_start=8889 - _globals['_GENERATEDCODEINFO_ANNOTATION_SEMANTIC']._serialized_end=8929 -# @@protoc_insertion_point(module_scope) diff --git a/spaces/cihyFjudo/fairness-paper-search/Djay App To Sonos The Ultimate Guide for iPad DJs.md b/spaces/cihyFjudo/fairness-paper-search/Djay App To Sonos The Ultimate Guide for iPad DJs.md deleted file mode 100644 index 210eb57118fbf694d1e7332f08e4dc7ab0df835e..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Djay App To Sonos The Ultimate Guide for iPad DJs.md +++ /dev/null @@ -1,14 +0,0 @@ - -

    I have just discovered the djay app, was going to use it for my wedding reception as we have very ... individual music tastes, that a paid DJ doesn't like! Very disappointed that it will no longer work!

    -

    Djay App To Sonos


    Download File ····· https://tinurli.com/2uwhC0



    -

    The new djay Pro AI enables live music separation of audio tracks into vocals, beats, and instruments, enabling real-time mixing of selected components of streamed or local tracks. The new feature is branded as Neural Mix, highlighting its use of Apple's Core ML frameworks to take advantage of dedicated Neural Engine hardware on modern mobile devices including the new A12Z Bionic powered iPad Pro.

    -

    Karim Morsy, Alogoriddim's chief executive, demonstrated djay Pro AI's ability to isolate individual layers of standard music tracks, and then layer or swap components such as just the vocals, the beats, or the instrumental elements into a new mix. The dedicated hardware used to accelerate this music processing accelerates the tasks enough to also support adding contextual visual effects to music videos played back at the same time.

    -

    "From the analog to the digital age, transformations in DJ technology have had a profound impact on music styles and genres as a whole," Morsy stated. "Neural Mix is a major paradigm shift. Inspired by the auditory system of the human brain, it changes the way we conceive and mix music. It allows artists of all skill levels to experience and interact with music at an unprecedented depth. We're incredibly excited to introduce Neural Mix to the world and we can't wait to see what artists and music enthusiasts create with djay Pro AI."

    -

    -

    Algoriddim's djay Pro has been a recipient of an Apple Design Award, and the developer has frequently worked to take full advantage of new features of Apple's operating systems and platforms. Last fall, the company added support for iPadOS 13 file browsing using USB devices and cloud services, and enhanced accessibility with support for the new Voice Control in iOS 13 as well as Core Haptics for inaudible feedback for performers using metronome timing during their mixing.

    -

    The djay app is a free download in the iOS App Store, and the features of djay Pro AI along with a library of sounds, loops, and visuals can be purchased as an In-App subscription for $4.99 per month, with a free Pro trial available to try out.

    -

    On top of support for the Pioneer DDJ-WeGO and DDJ-ERGO controllers through the Pioneer DJC-WeCAi iPad Connection Cable for the iPad version of the app, both the iPhone and iPad djay apps get the following new features and enhancements:

    -

    Als je via deezer download dan is dat voor offline te luisteren dus word het niet opgeslagen als bestand je kunt volgens mij wel een Hercules gebruiken djay daar kun je wel met Spotify of deezer koppelen en hoef je alleen maar liedjes te zoeken op Deezer of Spotify

    -

    ik heb nu ook djay pro voor op de windows laptop alleen speelt hij de spotify nummers niet volledig af hij begint op driekwart van het nummer weer overnieuw. kan niet ontdekken waar dit aan ligt het aftellen gaat ook gewoon door en je kan niet zien dat hij terug springt echt klote =(

    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/aiofiles/tempfile/temptypes.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/aiofiles/tempfile/temptypes.py deleted file mode 100644 index dccee6ce53c9ee4aa134d0f9d6a76af3f9846099..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/aiofiles/tempfile/temptypes.py +++ /dev/null @@ -1,69 +0,0 @@ -"""Async wrappers for spooled temp files and temp directory objects""" -from functools import partial - -from ..base import AsyncBase -from ..threadpool.utils import ( - cond_delegate_to_executor, - delegate_to_executor, - proxy_property_directly, -) - - -@delegate_to_executor("fileno", "rollover") -@cond_delegate_to_executor( - "close", - "flush", - "isatty", - "read", - "readline", - "readlines", - "seek", - "tell", - "truncate", -) -@proxy_property_directly("closed", "encoding", "mode", "name", "newlines") -class AsyncSpooledTemporaryFile(AsyncBase): - """Async wrapper for SpooledTemporaryFile class""" - - async def _check(self): - if self._file._rolled: - return - max_size = self._file._max_size - if max_size and self._file.tell() > max_size: - await self.rollover() - - async def write(self, s): - """Implementation to anticipate rollover""" - if self._file._rolled: - cb = partial(self._file.write, s) - return await self._loop.run_in_executor(self._executor, cb) - else: - file = self._file._file # reference underlying base IO object - rv = file.write(s) - await self._check() - return rv - - async def writelines(self, iterable): - """Implementation to anticipate rollover""" - if self._file._rolled: - cb = partial(self._file.writelines, iterable) - return await self._loop.run_in_executor(self._executor, cb) - else: - file = self._file._file # reference underlying base IO object - rv = file.writelines(iterable) - await self._check() - return rv - - -@delegate_to_executor("cleanup") -@proxy_property_directly("name") -class AsyncTemporaryDirectory: - """Async wrapper for TemporaryDirectory class""" - - def __init__(self, file, loop, executor): - self._file = file - self._loop = loop - self._executor = executor - - async def close(self): - await self.cleanup() diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/ttLib/tables/S_I_N_G_.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/ttLib/tables/S_I_N_G_.py deleted file mode 100644 index 7420da7e5dcec81b835ab0e8e2c775dbce860cbd..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/ttLib/tables/S_I_N_G_.py +++ /dev/null @@ -1,93 +0,0 @@ -from fontTools.misc import sstruct -from fontTools.misc.textTools import bytechr, byteord, tobytes, tostr, safeEval -from . import DefaultTable - -SINGFormat = """ - > # big endian - tableVersionMajor: H - tableVersionMinor: H - glyphletVersion: H - permissions: h - mainGID: H - unitsPerEm: H - vertAdvance: h - vertOrigin: h - uniqueName: 28s - METAMD5: 16s - nameLength: 1s -""" -# baseGlyphName is a byte string which follows the record above. - - -class table_S_I_N_G_(DefaultTable.DefaultTable): - - dependencies = [] - - def decompile(self, data, ttFont): - dummy, rest = sstruct.unpack2(SINGFormat, data, self) - self.uniqueName = self.decompileUniqueName(self.uniqueName) - self.nameLength = byteord(self.nameLength) - assert len(rest) == self.nameLength - self.baseGlyphName = tostr(rest) - - rawMETAMD5 = self.METAMD5 - self.METAMD5 = "[" + hex(byteord(self.METAMD5[0])) - for char in rawMETAMD5[1:]: - self.METAMD5 = self.METAMD5 + ", " + hex(byteord(char)) - self.METAMD5 = self.METAMD5 + "]" - - def decompileUniqueName(self, data): - name = "" - for char in data: - val = byteord(char) - if val == 0: - break - if (val > 31) or (val < 128): - name += chr(val) - else: - octString = oct(val) - if len(octString) > 3: - octString = octString[1:] # chop off that leading zero. - elif len(octString) < 3: - octString.zfill(3) - name += "\\" + octString - return name - - def compile(self, ttFont): - d = self.__dict__.copy() - d["nameLength"] = bytechr(len(self.baseGlyphName)) - d["uniqueName"] = self.compilecompileUniqueName(self.uniqueName, 28) - METAMD5List = eval(self.METAMD5) - d["METAMD5"] = b"" - for val in METAMD5List: - d["METAMD5"] += bytechr(val) - assert len(d["METAMD5"]) == 16, "Failed to pack 16 byte MD5 hash in SING table" - data = sstruct.pack(SINGFormat, d) - data = data + tobytes(self.baseGlyphName) - return data - - def compilecompileUniqueName(self, name, length): - nameLen = len(name) - if length <= nameLen: - name = name[: length - 1] + "\000" - else: - name += (nameLen - length) * "\000" - return name - - def toXML(self, writer, ttFont): - writer.comment("Most of this table will be recalculated by the compiler") - writer.newline() - formatstring, names, fixes = sstruct.getformat(SINGFormat) - for name in names: - value = getattr(self, name) - writer.simpletag(name, value=value) - writer.newline() - writer.simpletag("baseGlyphName", value=self.baseGlyphName) - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - value = attrs["value"] - if name in ["uniqueName", "METAMD5", "baseGlyphName"]: - setattr(self, name, value) - else: - setattr(self, name, safeEval(value)) diff --git a/spaces/codedog-ai/edu-assistant/webui/ui.py b/spaces/codedog-ai/edu-assistant/webui/ui.py deleted file mode 100644 index 90fdfca2b07061099c5124cc439f537de4ca3c8c..0000000000000000000000000000000000000000 --- a/spaces/codedog-ai/edu-assistant/webui/ui.py +++ /dev/null @@ -1,107 +0,0 @@ -import gradio as gr -from fastapi import FastAPI - -from edu_assistant import version -from webui.coding_problem import CodingProblemUI -from webui.qa import QaUI - -app = FastAPI() -demo = gr.Blocks(title="Codedog Edu Assistant", theme="gradio/soft") -qa_ui = QaUI() -cp_ui = CodingProblemUI() - - -def apply_cfg( - gpt4_flags: list[int], - qa_instruction: str, - cp_instruction: str, - cp_first_question: str, - qa_knowledge: str, - cp_knowledge: str, -): - qa_ui.ui_reload( - instruction=qa_instruction, - knowledge_name=qa_knowledge, - enable_gpt4=0 in gpt4_flags, - ) - cp_ui.ui_reload( - instruction=cp_instruction, - first_question=cp_first_question, - knowledge_name=cp_knowledge, - enable_gpt4=1 in gpt4_flags, - ) - demo.render() - gr.update() - gr.Info("更新配置成功") - - -def default_cfg(): - qa_ui.ui_reload() - cp_ui.ui_reload() - demo.render() - gr.update() - gr.Info("恢复默认配置成功") - - -def get_gpt4_flags(): - result = [] - if qa_ui.enable_gpt4: - result.append("答疑") - if cp_ui.enable_gpt4: - result.append("做题") - return result - - -with demo: - with gr.Row(): - gr.Markdown(f"# Codedog Edu Assistant v{version.VERSION}") - - with gr.Tab(label="答疑"): - qa_ui.ui_render() - - with gr.Tab(label="做题"): - cp_ui.ui_render() - - with gr.Tab(label="设置"): - with gr.Row(): - gr.Markdown("## Prompt 设置") - with gr.Row(): - qa_instruction = gr.Textbox( - label="答疑指示Prompt", lines=5, max_lines=20, value=qa_ui.get_instruction, interactive=True - ) - with gr.Row(): - cp_instruction = gr.Textbox( - label="做题指示Prompt", lines=5, max_lines=20, value=cp_ui.get_instruction, interactive=True - ) - with gr.Row(): - cp_first_question = gr.Textbox( - label="判题Prompt", lines=5, max_lines=20, value=cp_ui.get_first_question, interactive=True - ) - with gr.Row(): - with gr.Column(scale=1): - gr.Markdown("## Open AI 设置") - with gr.Column(scale=2): - gpt4_flags = gr.CheckboxGroup( - value=get_gpt4_flags, choices=["答疑", "做题"], label="启用GPT4", type="index", interactive=True - ) - - with gr.Row(): - gr.Markdown("## 知识库设置") - qa_knowledge = gr.Textbox(value=qa_ui.knowledge, label="答疑知识库", interactive=True) - cp_knowledge = gr.Textbox(value=cp_ui.knowledge, label="做题知识库", interactive=True) - - with gr.Row(): - default_btn = gr.Button(value="恢复默认配置", interactive=True, scale=1) - apply_btn = gr.Button(value="更新配置", interactive=True, variant="primary", scale=1) - - default_btn.click(default_cfg, [], []) - apply_btn.click( - apply_cfg, [gpt4_flags, qa_instruction, cp_instruction, cp_first_question, qa_knowledge, cp_knowledge], [] - ) - -demo.queue() -# app = gr.mount_gradio_app(app, demo, path="/") - -if __name__ == "__main__": - demo.launch() - # uvicorn.run(app, port=7860) diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/atrac3plusdsp.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/atrac3plusdsp.c deleted file mode 100644 index 802f12aec574214be4077f4c00876eaa37a3553a..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/atrac3plusdsp.c +++ /dev/null @@ -1,640 +0,0 @@ -/* - * ATRAC3+ compatible decoder - * - * Copyright (c) 2010-2013 Maxim Poliakovski - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * DSP functions for ATRAC3+ decoder. - */ - -#include - -#include "libavutil/float_dsp.h" -#include "libavutil/libm.h" -#include "libavutil/mem_internal.h" - -#include "sinewin.h" -#include "atrac3plus.h" - -/** - * Map quant unit number to its position in the spectrum. - * To get the number of spectral lines in each quant unit do the following: - * num_specs = qu_to_spec_pos[i+1] - qu_to_spec_pos[i] - */ -const uint16_t ff_atrac3p_qu_to_spec_pos[33] = { - 0, 16, 32, 48, 64, 80, 96, 112, - 128, 160, 192, 224, 256, 288, 320, 352, - 384, 448, 512, 576, 640, 704, 768, 896, - 1024, 1152, 1280, 1408, 1536, 1664, 1792, 1920, - 2048 -}; - -/* Scalefactors table. */ -/* Approx. Equ: pow(2.0, (i - 16.0 + 0.501783948) / 3.0) */ -const float ff_atrac3p_sf_tab[64] = { - 0.027852058, 0.0350914, 0.044212341, 0.055704117, 0.0701828, - 0.088424683, 0.11140823, 0.1403656, 0.17684937, 0.22281647, 0.2807312, 0.35369873, - 0.44563293, 0.5614624, 0.70739746, 0.89126587, 1.1229248, 1.4147949, 1.7825317, - 2.2458496, 2.8295898, 3.5650635, 4.4916992, 5.6591797, 7.130127, 8.9833984, - 11.318359, 14.260254, 17.966797, 22.636719, 28.520508, 35.933594, 45.273438, - 57.041016, 71.867188, 90.546875, 114.08203, 143.73438, 181.09375, 228.16406, - 287.46875, 362.1875, 456.32812, 574.9375, 724.375, 912.65625, 1149.875, - 1448.75, 1825.3125, 2299.75, 2897.5, 3650.625, 4599.5, 5795.0, - 7301.25, 9199.0, 11590.0, 14602.5, 18398.0, 23180.0, 29205.0, - 36796.0, 46360.0, 58410.0 -}; - -/* Mantissa table. */ -/* pow(10, x * log10(2) + 0.05) / 2 / ([1,2,3,5,7,15,31] + 0.5) */ -const float ff_atrac3p_mant_tab[8] = { - 0.0, - 0.74801636, - 0.44882202, - 0.32058716, - 0.20400238, - 0.1496048, - 0.07239151, - 0.035619736 -}; - -#define ATRAC3P_MDCT_SIZE (ATRAC3P_SUBBAND_SAMPLES * 2) - -#define TWOPI (2 * M_PI) - -#define DEQUANT_PHASE(ph) (((ph) & 0x1F) << 6) - -static DECLARE_ALIGNED(32, float, sine_table)[2048]; ///< wave table -static DECLARE_ALIGNED(32, float, hann_window)[256]; ///< Hann windowing function -static float amp_sf_tab[64]; ///< scalefactors for quantized amplitudes - -av_cold void ff_atrac3p_init_dsp_static(void) -{ - int i; - - /* generate sine wave table */ - for (i = 0; i < 2048; i++) - sine_table[i] = sin(TWOPI * i / 2048); - - /* generate Hann window */ - for (i = 0; i < 256; i++) - hann_window[i] = (1.0f - cos(TWOPI * i / 256.0f)) * 0.5f; - - /* generate amplitude scalefactors table */ - for (i = 0; i < 64; i++) - amp_sf_tab[i] = exp2f((i - 3) / 4.0f); - - ff_init_ff_sine_windows(7); - ff_init_ff_sine_windows(6); -} - -/** - * Synthesize sine waves according to given parameters. - * - * @param[in] synth_param ptr to common synthesis parameters - * @param[in] waves_info parameters for each sine wave - * @param[in] envelope envelope data for all waves in a group - * @param[in] fdsp ptr to floating-point DSP context - * @param[in] invert_phase flag indicating 180° phase shift - * @param[in] reg_offset region offset for trimming envelope data - * @param[out] out receives sythesized data - */ -static void waves_synth(Atrac3pWaveSynthParams *synth_param, - Atrac3pWavesData *waves_info, - Atrac3pWaveEnvelope *envelope, - AVFloatDSPContext *fdsp, - int invert_phase, int reg_offset, float *out) -{ - int i, wn, inc, pos; - double amp; - Atrac3pWaveParam *wave_param = &synth_param->waves[waves_info->start_index]; - - for (wn = 0; wn < waves_info->num_wavs; wn++, wave_param++) { - /* amplitude dequantization */ - amp = amp_sf_tab[wave_param->amp_sf] * - (!synth_param->amplitude_mode - ? (wave_param->amp_index + 1) / 15.13f - : 1.0f); - - inc = wave_param->freq_index; - pos = DEQUANT_PHASE(wave_param->phase_index) - (reg_offset ^ 128) * inc & 2047; - - /* waveform generation */ - for (i = 0; i < 128; i++) { - out[i] += sine_table[pos] * amp; - pos = (pos + inc) & 2047; - } - } - - /* invert phase if requested */ - if (invert_phase) - fdsp->vector_fmul_scalar(out, out, -1.0f, 128); - - /* fade in with steep Hann window if requested */ - if (envelope->has_start_point) { - pos = (envelope->start_pos << 2) - reg_offset; - if (pos > 0 && pos <= 128) { - memset(out, 0, pos * sizeof(*out)); - if (!envelope->has_stop_point || - envelope->start_pos != envelope->stop_pos) { - out[pos + 0] *= hann_window[0]; - out[pos + 1] *= hann_window[32]; - out[pos + 2] *= hann_window[64]; - out[pos + 3] *= hann_window[96]; - } - } - } - - /* fade out with steep Hann window if requested */ - if (envelope->has_stop_point) { - pos = (envelope->stop_pos + 1 << 2) - reg_offset; - if (pos > 0 && pos <= 128) { - out[pos - 4] *= hann_window[96]; - out[pos - 3] *= hann_window[64]; - out[pos - 2] *= hann_window[32]; - out[pos - 1] *= hann_window[0]; - memset(&out[pos], 0, (128 - pos) * sizeof(out[pos])); - } - } -} - -void ff_atrac3p_generate_tones(Atrac3pChanUnitCtx *ch_unit, AVFloatDSPContext *fdsp, - int ch_num, int sb, float *out) -{ - DECLARE_ALIGNED(32, float, wavreg1)[128] = { 0 }; - DECLARE_ALIGNED(32, float, wavreg2)[128] = { 0 }; - int i, reg1_env_nonzero, reg2_env_nonzero; - Atrac3pWavesData *tones_now = &ch_unit->channels[ch_num].tones_info_prev[sb]; - Atrac3pWavesData *tones_next = &ch_unit->channels[ch_num].tones_info[sb]; - - /* reconstruct full envelopes for both overlapping regions - * from truncated bitstream data */ - if (tones_next->pend_env.has_start_point && - tones_next->pend_env.start_pos < tones_next->pend_env.stop_pos) { - tones_next->curr_env.has_start_point = 1; - tones_next->curr_env.start_pos = tones_next->pend_env.start_pos + 32; - } else if (tones_now->pend_env.has_start_point) { - tones_next->curr_env.has_start_point = 1; - tones_next->curr_env.start_pos = tones_now->pend_env.start_pos; - } else { - tones_next->curr_env.has_start_point = 0; - tones_next->curr_env.start_pos = 0; - } - - if (tones_now->pend_env.has_stop_point && - tones_now->pend_env.stop_pos >= tones_next->curr_env.start_pos) { - tones_next->curr_env.has_stop_point = 1; - tones_next->curr_env.stop_pos = tones_now->pend_env.stop_pos; - } else if (tones_next->pend_env.has_stop_point) { - tones_next->curr_env.has_stop_point = 1; - tones_next->curr_env.stop_pos = tones_next->pend_env.stop_pos + 32; - } else { - tones_next->curr_env.has_stop_point = 0; - tones_next->curr_env.stop_pos = 64; - } - - /* is the visible part of the envelope non-zero? */ - reg1_env_nonzero = (tones_now->curr_env.stop_pos < 32) ? 0 : 1; - reg2_env_nonzero = (tones_next->curr_env.start_pos >= 32) ? 0 : 1; - - /* synthesize waves for both overlapping regions */ - if (tones_now->num_wavs && reg1_env_nonzero) - waves_synth(ch_unit->waves_info_prev, tones_now, &tones_now->curr_env, - fdsp, ch_unit->waves_info_prev->invert_phase[sb] & ch_num, - 128, wavreg1); - - if (tones_next->num_wavs && reg2_env_nonzero) - waves_synth(ch_unit->waves_info, tones_next, &tones_next->curr_env, fdsp, - ch_unit->waves_info->invert_phase[sb] & ch_num, 0, wavreg2); - - /* Hann windowing for non-faded wave signals */ - if (tones_now->num_wavs && tones_next->num_wavs && - reg1_env_nonzero && reg2_env_nonzero) { - fdsp->vector_fmul(wavreg1, wavreg1, &hann_window[128], 128); - fdsp->vector_fmul(wavreg2, wavreg2, hann_window, 128); - } else { - if (tones_now->num_wavs && !tones_now->curr_env.has_stop_point) - fdsp->vector_fmul(wavreg1, wavreg1, &hann_window[128], 128); - - if (tones_next->num_wavs && !tones_next->curr_env.has_start_point) - fdsp->vector_fmul(wavreg2, wavreg2, hann_window, 128); - } - - /* Overlap and add to residual */ - for (i = 0; i < 128; i++) - out[i] += wavreg1[i] + wavreg2[i]; -} - -static const uint8_t subband_to_powgrp[ATRAC3P_SUBBANDS] = { - 0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4 -}; - -/* noise table for power compensation */ -static const float noise_tab[1024] = { - -0.01358032, -0.05593872, 0.01696777, -0.14871216, -0.26412964, -0.09893799, 0.25723267, - 0.02008057, -0.72235107, -0.44351196, -0.22985840, 0.16833496, 0.46902466, 0.05917358, - -0.15179443, 0.41299438, -0.01287842, 0.13360596, 0.43557739, -0.09530640, -0.58422852, - 0.39266968, -0.08343506, -0.25604248, 0.22848511, 0.26013184, -0.65588379, 0.17288208, - -0.08673096, -0.05203247, 0.07299805, -0.28665161, -0.35806274, 0.06552124, -0.09387207, - 0.21099854, -0.28347778, -0.72402954, 0.05050659, -0.10635376, -0.18853760, 0.29724121, - 0.20703125, -0.29791260, -0.37634277, 0.47970581, -0.09976196, 0.32641602, -0.29248047, - -0.28237915, 0.26028442, -0.36157227, 0.22042847, -0.03222656, -0.37268066, -0.03759766, - 0.09909058, 0.23284912, 0.19320679, 0.14453125, -0.02139282, -0.19702148, 0.31533813, - -0.16741943, 0.35031128, -0.35656738, -0.66128540, -0.00701904, 0.20898438, 0.26837158, - -0.33706665, -0.04568481, 0.12600708, 0.10284424, 0.07321167, -0.18280029, 0.38101196, - 0.21301270, 0.04541016, 0.01156616, -0.26391602, -0.02346802, -0.22125244, 0.29760742, - -0.36233521, -0.31314087, -0.13967896, -0.11276245, -0.19433594, 0.34490967, 0.02343750, - 0.21963501, -0.02777100, -0.67678833, -0.08999634, 0.14233398, -0.27697754, 0.51422119, - -0.05047607, 0.48327637, 0.37167358, -0.60806274, 0.18728638, -0.15191650, 0.00637817, - 0.02832031, -0.15618896, 0.60644531, 0.21826172, 0.06384277, -0.31863403, 0.08816528, - 0.15447998, -0.07015991, -0.08154297, -0.40966797, -0.39785767, -0.11709595, 0.22052002, - 0.18466187, -0.17257690, 0.03759766, -0.06195068, 0.00433350, 0.12176514, 0.34011841, - 0.25610352, -0.05294800, 0.41033936, 0.16854858, -0.76187134, 0.13845825, -0.19418335, - -0.21524048, -0.44412231, -0.08160400, -0.28195190, -0.01873779, 0.15524292, -0.37438965, - -0.44860840, 0.43096924, -0.24746704, 0.49856567, 0.14859009, 0.38159180, 0.20541382, - -0.39175415, -0.65850830, -0.43716431, 0.13037109, -0.05111694, 0.39956665, 0.21447754, - -0.04861450, 0.33654785, 0.10589600, -0.88085938, -0.30822754, 0.38577271, 0.30047607, - 0.38836670, 0.09118652, -0.36477661, -0.01641846, -0.23031616, 0.26058960, 0.18859863, - -0.21868896, -0.17861938, -0.29754639, 0.09777832, 0.10806274, -0.51605225, 0.00076294, - 0.13259888, 0.11090088, -0.24084473, 0.24957275, 0.01379395, -0.04141235, -0.04937744, - 0.57394409, 0.27410889, 0.27587891, 0.45013428, -0.32592773, 0.11160278, -0.00970459, - 0.29092407, 0.03356934, -0.70925903, 0.04882812, 0.43499756, 0.07720947, -0.27554321, - -0.01742554, -0.08413696, -0.04028320, -0.52850342, -0.07330322, 0.05181885, 0.21362305, - -0.18765259, 0.07058716, -0.03009033, 0.32662964, 0.27023315, -0.28002930, 0.17568970, - 0.03338623, 0.30242920, -0.03921509, 0.32174683, -0.23733521, 0.08575439, -0.38269043, - 0.09194946, -0.07238770, 0.17941284, -0.51278687, -0.25146484, 0.19790649, -0.19195557, - 0.16549683, 0.42456055, 0.39129639, -0.02868652, 0.17980957, 0.24902344, -0.76583862, - -0.20959473, 0.61013794, 0.37011719, 0.36859131, -0.04486084, 0.10678101, -0.15994263, - -0.05328369, 0.28463745, -0.06420898, -0.36987305, -0.28009033, -0.11764526, 0.04312134, - -0.08038330, 0.04885864, -0.03067017, -0.00042725, 0.34289551, -0.00988770, 0.34838867, - 0.32516479, -0.16271973, 0.38269043, 0.03240967, 0.12417603, -0.14331055, -0.34902954, - -0.18325806, 0.29421997, 0.44284058, 0.75170898, -0.67245483, -0.12176514, 0.27914429, - -0.29806519, 0.19863892, 0.30087280, 0.22680664, -0.36633301, -0.32534790, -0.57553101, - -0.16641235, 0.43811035, 0.08331299, 0.15942383, 0.26516724, -0.24240112, -0.11761475, - -0.16827393, -0.14260864, 0.46343994, 0.11804199, -0.55514526, -0.02520752, -0.14309692, - 0.00448608, 0.02749634, -0.30545044, 0.70965576, 0.45108032, 0.66439819, -0.68255615, - -0.12496948, 0.09146118, -0.21109009, -0.23791504, 0.79943848, -0.35205078, -0.24963379, - 0.18719482, -0.19079590, 0.07458496, 0.07623291, -0.28781128, -0.37121582, -0.19580078, - -0.01773071, -0.16717529, 0.13040161, 0.14672852, 0.42379761, 0.03582764, 0.11431885, - 0.05145264, 0.44702148, 0.08963013, 0.01367188, -0.54519653, -0.12692261, 0.21176147, - 0.04925537, 0.30670166, -0.11029053, 0.19555664, -0.27740479, 0.23043823, 0.15554810, - -0.19299316, -0.25729370, 0.17800903, -0.03579712, -0.05065918, -0.06933594, -0.09500122, - -0.07821655, 0.23889160, -0.31900024, 0.03073120, -0.00415039, 0.61315918, 0.37176514, - -0.13442993, -0.15536499, -0.19216919, -0.37899780, 0.19992065, 0.02630615, -0.12573242, - 0.25927734, -0.02447510, 0.29629517, -0.40731812, -0.17333984, 0.24310303, -0.10607910, - 0.14828491, 0.08792114, -0.18743896, -0.05572510, -0.04833984, 0.10473633, -0.29028320, - -0.67687988, -0.28170776, -0.41687012, 0.05413818, -0.23284912, 0.09555054, -0.08969116, - -0.15112305, 0.12738037, 0.35986328, 0.28948975, 0.30691528, 0.23956299, 0.06973267, - -0.31198120, -0.18450928, 0.22280884, -0.21600342, 0.23522949, -0.61840820, -0.13012695, - 0.26412964, 0.47320557, -0.26440430, 0.38757324, 0.17352295, -0.26104736, -0.25866699, - -0.12274170, -0.29733276, 0.07687378, 0.18588257, -0.08880615, 0.31185913, 0.05313110, - -0.10885620, -0.14901733, -0.22323608, -0.08538818, 0.19812012, 0.19732666, -0.18927002, - 0.29058838, 0.25555420, -0.48599243, 0.18768311, 0.01345825, 0.34887695, 0.21530151, - 0.19857788, 0.18661499, -0.01394653, -0.09063721, -0.38781738, 0.27160645, -0.20379639, - -0.32119751, -0.23889160, 0.27096558, 0.24951172, 0.07922363, 0.07479858, -0.50946045, - 0.10220337, 0.58364868, -0.19503784, -0.18560791, -0.01165771, 0.47195435, 0.22430420, - -0.38635254, -0.03732300, -0.09179688, 0.06991577, 0.15106201, 0.20605469, -0.05969238, - -0.41821289, 0.12231445, -0.04672241, -0.05117798, -0.11523438, -0.51849365, -0.04077148, - 0.44284058, -0.64086914, 0.17019653, 0.02236938, 0.22848511, -0.23214722, -0.32354736, - -0.14068604, -0.29690552, -0.19891357, 0.02774048, -0.20965576, -0.52191162, -0.19299316, - -0.07290649, 0.49053955, -0.22302246, 0.05642700, 0.13122559, -0.20819092, -0.83590698, - -0.08181763, 0.26797485, -0.00091553, -0.09457397, 0.17089844, -0.27020264, 0.30270386, - 0.05496216, 0.09564209, -0.08590698, 0.02130127, 0.35931396, 0.21728516, -0.15396118, - -0.05053711, 0.02719116, 0.16302490, 0.43212891, 0.10229492, -0.40820312, 0.21646118, - 0.08435059, -0.11145020, -0.39962769, -0.05618286, -0.10223389, -0.60839844, 0.33724976, - -0.06341553, -0.47369385, -0.32852173, 0.05242920, 0.19635010, -0.19137573, -0.67901611, - 0.16180420, 0.05133057, -0.22283936, 0.09646606, 0.24288940, -0.45007324, 0.08804321, - 0.14053345, 0.22619629, -0.01000977, 0.36355591, -0.19863892, -0.30364990, -0.24118042, - -0.57461548, 0.26498413, 0.04345703, -0.09796143, -0.47714233, -0.23739624, 0.18737793, - 0.08926392, -0.02795410, 0.00305176, -0.08700562, -0.38711548, 0.03222656, 0.10940552, - -0.41906738, -0.01620483, -0.47061157, 0.37985229, -0.21624756, 0.47976685, -0.20046997, - -0.62533569, -0.26907349, -0.02877808, 0.00671387, -0.29071045, -0.24685669, -0.15722656, - -0.26055908, 0.29968262, 0.28225708, -0.08990479, -0.16748047, -0.46759033, -0.25067139, - -0.25183105, -0.45932007, 0.05828857, 0.29006958, 0.23840332, -0.17974854, 0.26931763, - 0.10696411, -0.06848145, -0.17126465, -0.10522461, -0.55386353, -0.42306519, -0.07608032, - 0.24380493, 0.38586426, 0.16882324, 0.26751709, 0.17303467, 0.35809326, -0.22094727, - -0.30703735, -0.28497314, -0.04321289, 0.15219116, -0.17071533, -0.39334106, 0.03439331, - -0.10809326, -0.30590820, 0.26449585, -0.07412720, 0.13638306, -0.01062012, 0.27996826, - 0.04397583, -0.05557251, -0.56933594, 0.03363037, -0.00949097, 0.52642822, -0.44329834, - 0.28308105, -0.05499268, -0.23312378, -0.29870605, -0.05123901, 0.26831055, -0.35238647, - -0.30993652, 0.34646606, -0.19775391, 0.44595337, 0.13769531, 0.45358276, 0.19961548, - 0.42681885, 0.15722656, 0.00128174, 0.23757935, 0.40988159, 0.25164795, -0.00732422, - -0.12405396, -0.43420410, -0.00402832, 0.34243774, 0.36264038, 0.18807983, -0.09301758, - -0.10296631, 0.05532837, -0.31652832, 0.14337158, 0.35040283, 0.32540894, 0.05728149, - -0.12030029, -0.25942993, -0.20312500, -0.16491699, -0.46051025, -0.08004761, 0.50772095, - 0.16168213, 0.28439331, 0.08105469, -0.19104004, 0.38589478, -0.16400146, -0.25454712, - 0.20281982, -0.20730591, -0.06311035, 0.32937622, 0.15032959, -0.05340576, 0.30487061, - -0.11648560, 0.38009644, -0.20062256, 0.43466187, 0.01150513, 0.35754395, -0.13146973, - 0.67489624, 0.05212402, 0.27914429, -0.39431763, 0.75308228, -0.13366699, 0.24453735, - 0.42248535, -0.65905762, -0.00546265, -0.03491211, -0.13659668, -0.08294678, -0.45666504, - 0.27188110, 0.12731934, 0.61148071, 0.10449219, -0.28836060, 0.00091553, 0.24618530, - 0.13119507, 0.05685425, 0.17355347, 0.42034912, 0.08514404, 0.24536133, 0.18951416, - -0.19107056, -0.15036011, 0.02334595, 0.54986572, 0.32321167, -0.16104126, -0.03054810, - 0.43594360, 0.17309570, 0.61053467, 0.24731445, 0.33334351, 0.15240479, 0.15588379, - 0.36425781, -0.30407715, -0.13302612, 0.00427246, 0.04171753, -0.33178711, 0.34216309, - -0.12463379, -0.02764893, 0.05905151, -0.31436157, 0.16531372, 0.34542847, -0.03292847, - 0.12527466, -0.12313843, -0.13171387, 0.04757690, -0.45095825, -0.19085693, 0.35342407, - -0.23239136, -0.34387207, 0.11264038, -0.15740967, 0.05273438, 0.74942017, 0.21505737, - 0.08514404, -0.42391968, -0.19531250, 0.35293579, 0.25305176, 0.15731812, -0.70324707, - -0.21591187, 0.35604858, 0.14132690, 0.11724854, 0.15853882, -0.24597168, 0.07019043, - 0.02127075, 0.12658691, 0.06390381, -0.12292480, 0.15441895, -0.47640991, 0.06195068, - 0.58981323, -0.15151978, -0.03604126, -0.45059204, -0.01672363, -0.46997070, 0.25750732, - 0.18084717, 0.06661987, 0.13253784, 0.67828369, 0.11370850, 0.11325073, -0.04611206, - -0.07791138, -0.36544800, -0.06747437, -0.31594849, 0.16131592, 0.41983032, 0.11071777, - -0.36889648, 0.30963135, -0.37875366, 0.58508301, 0.00393677, 0.12338257, 0.03424072, - -0.21728516, -0.12838745, -0.46981812, 0.05868530, -0.25015259, 0.27407837, 0.65240479, - -0.34429932, -0.15179443, 0.14056396, 0.33505249, 0.28826904, 0.09921265, 0.34390259, - 0.13656616, -0.23608398, 0.00863647, 0.02627563, -0.19119263, 0.19775391, -0.07214355, - 0.07809448, 0.03454590, -0.03417969, 0.00033569, -0.23095703, 0.18673706, 0.05798340, - 0.03814697, -0.04318237, 0.05487061, 0.08633423, 0.55950928, -0.06347656, 0.10333252, - 0.25305176, 0.05853271, 0.12246704, -0.25543213, -0.34262085, -0.36437988, -0.21304321, - -0.05093384, 0.02777100, 0.07620239, -0.21215820, -0.09326172, 0.19021606, -0.40579224, - -0.01193237, 0.19845581, -0.35336304, -0.07397461, 0.20104980, 0.08615112, -0.44375610, - 0.11419678, 0.24453735, -0.16555786, -0.05081177, -0.01406860, 0.27893066, -0.18692017, - 0.07473755, 0.03451538, -0.39733887, 0.21548462, -0.22534180, -0.39651489, -0.04989624, - -0.57662964, 0.06390381, 0.62020874, -0.13470459, 0.04345703, -0.21862793, -0.02789307, - 0.51696777, -0.27587891, 0.39004517, 0.09857178, -0.00738525, 0.31317139, 0.00048828, - -0.46572876, 0.29531860, -0.10009766, -0.27856445, 0.03594971, 0.25048828, -0.74584961, - -0.25350952, -0.03302002, 0.31188965, 0.01571655, 0.46710205, 0.21591187, 0.07260132, - -0.42132568, -0.53900146, -0.13674927, -0.16571045, -0.34454346, 0.12359619, -0.11184692, - 0.00967407, 0.34576416, -0.05761719, 0.34848022, 0.17645264, -0.39395142, 0.10339355, - 0.18215942, 0.20697021, 0.59109497, -0.11560059, -0.07385254, 0.10397339, 0.35437012, - -0.22863770, 0.01794434, 0.17559814, -0.17495728, 0.12142944, 0.10928345, -1.00000000, - -0.01379395, 0.21237183, -0.27035522, 0.27319336, -0.37066650, 0.41354370, -0.40054321, - 0.00689697, 0.26321411, 0.39266968, 0.65298462, 0.41625977, -0.13909912, 0.78375244, - -0.30941772, 0.20169067, -0.39367676, 0.94021606, -0.24066162, 0.05557251, -0.24533081, - -0.05444336, -0.76754761, -0.19375610, -0.11041260, -0.17532349, 0.16006470, 0.02188110, - 0.17465210, -0.04342651, -0.56777954, -0.40988159, 0.26687622, 0.11700439, -0.00344849, - -0.05395508, 0.37426758, -0.40719604, -0.15032959, -0.01660156, 0.04196167, -0.04559326, - -0.12969971, 0.12011719, 0.08419800, -0.11199951, 0.35174561, 0.10275269, -0.25686646, - 0.48446655, 0.03225708, 0.28408813, -0.18701172, 0.36282349, -0.03280640, 0.32302856, - 0.17233276, 0.48269653, 0.31112671, -0.04946899, 0.12774658, 0.52685547, 0.10211182, - 0.05953979, 0.05999756, 0.20144653, 0.00744629, 0.27316284, 0.24377441, 0.39672852, - 0.01702881, -0.35513306, 0.11364746, -0.13555908, 0.48880005, -0.15417480, -0.09149170, - -0.02615356, 0.46246338, -0.72250366, 0.22332764, 0.23849487, -0.25686646, -0.08514404, - -0.02062988, -0.34494019, -0.02297974, -0.80386353, -0.08074951, -0.12689209, -0.06896973, - 0.24099731, -0.35650635, -0.09558105, 0.29254150, 0.23132324, -0.16726685, 0.00000000, - -0.24237061, 0.30899048, 0.29504395, -0.20898438, 0.17059326, -0.07672119, -0.14395142, - 0.05572510, 0.20602417, -0.51550293, -0.03167725, -0.48840332, -0.20425415, 0.14144897, - 0.07275391, -0.76669312, -0.22488403, 0.20651245, 0.03259277, 0.00085449, 0.03039551, - 0.47555542, 0.38351440 -}; - -/** Noise level table for power compensation. - * Equ: pow(2.0f, (double)(6 - i) / 3.0f) where i = 0...15 */ -static const float pwc_levs[16] = { - 3.96875, 3.15625, 2.5, 2.0, 1.59375, 1.25, 1.0, 0.78125, - 0.625, 0.5, 0.40625, 0.3125, 0.25, 0.1875, 0.15625, 0.0 -}; - -/** Map subband number to quant unit number. */ -static const uint8_t subband_to_qu[17] = { - 0, 8, 12, 16, 18, 20, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32 -}; - -void ff_atrac3p_power_compensation(Atrac3pChanUnitCtx *ctx, AVFloatDSPContext *fdsp, - int ch_index, float *sp, int rng_index, int sb) -{ - AtracGainInfo *g1, *g2; - LOCAL_ALIGNED_32(float, pwcsp, [ATRAC3P_SUBBAND_SAMPLES]); - float *dst, grp_lev, qu_lev; - int i, gain_lev, gcv = 0, qu, nsp; - int swap_ch = (ctx->unit_type == CH_UNIT_STEREO && ctx->swap_channels[sb]) ? 1 : 0; - - if (ctx->channels[ch_index ^ swap_ch].power_levs[subband_to_powgrp[sb]] == ATRAC3P_POWER_COMP_OFF) - return; - - /* generate initial noise spectrum */ - for (i = 0; i < ATRAC3P_SUBBAND_SAMPLES; i++, rng_index++) - pwcsp[i] = noise_tab[rng_index & 0x3FF]; - - /* check gain control information */ - g1 = &ctx->channels[ch_index ^ swap_ch].gain_data[sb]; - g2 = &ctx->channels[ch_index ^ swap_ch].gain_data_prev[sb]; - - gain_lev = (g1->num_points > 0) ? (6 - g1->lev_code[0]) : 0; - - for (i = 0; i < g2->num_points; i++) - gcv = FFMAX(gcv, gain_lev - (g2->lev_code[i] - 6)); - - for (i = 0; i < g1->num_points; i++) - gcv = FFMAX(gcv, 6 - g1->lev_code[i]); - - grp_lev = pwc_levs[ctx->channels[ch_index ^ swap_ch].power_levs[subband_to_powgrp[sb]]] / (1 << gcv); - - /* skip the lowest two quant units (frequencies 0...351 Hz) for subband 0 */ - for (qu = subband_to_qu[sb] + (!sb ? 2 : 0); qu < subband_to_qu[sb + 1]; qu++) { - if (ctx->channels[ch_index].qu_wordlen[qu] <= 0) - continue; - - qu_lev = ff_atrac3p_sf_tab[ctx->channels[ch_index].qu_sf_idx[qu]] * - ff_atrac3p_mant_tab[ctx->channels[ch_index].qu_wordlen[qu]] / - (1 << ctx->channels[ch_index].qu_wordlen[qu]) * grp_lev; - - dst = &sp[ff_atrac3p_qu_to_spec_pos[qu]]; - nsp = ff_atrac3p_qu_to_spec_pos[qu + 1] - ff_atrac3p_qu_to_spec_pos[qu]; - - fdsp->vector_fmac_scalar(dst, pwcsp, qu_lev, nsp); - } -} - -void ff_atrac3p_imdct(AVFloatDSPContext *fdsp, AVTXContext *mdct_ctx, - av_tx_fn mdct_fn, float *pIn, float *pOut, - int wind_id, int sb) -{ - int i; - - if (sb & 1) - for (i = 0; i < ATRAC3P_SUBBAND_SAMPLES / 2; i++) - FFSWAP(float, pIn[i], pIn[ATRAC3P_SUBBAND_SAMPLES - 1 - i]); - - mdct_fn(mdct_ctx, pOut, pIn, sizeof(float)); - - /* Perform windowing on the output. - * ATRAC3+ uses two different MDCT windows: - * - The first one is just the plain sine window of size 256 - * - The 2nd one is the plain sine window of size 128 - * wrapped into zero (at the start) and one (at the end) regions. - * Both regions are 32 samples long. */ - if (wind_id & 2) { /* 1st half: steep window */ - memset(pOut, 0, sizeof(float) * 32); - fdsp->vector_fmul(&pOut[32], &pOut[32], ff_sine_64, 64); - } else /* 1st half: simple sine window */ - fdsp->vector_fmul(pOut, pOut, ff_sine_128, ATRAC3P_MDCT_SIZE / 2); - - if (wind_id & 1) { /* 2nd half: steep window */ - fdsp->vector_fmul_reverse(&pOut[160], &pOut[160], ff_sine_64, 64); - memset(&pOut[224], 0, sizeof(float) * 32); - } else /* 2nd half: simple sine window */ - fdsp->vector_fmul_reverse(&pOut[128], &pOut[128], ff_sine_128, - ATRAC3P_MDCT_SIZE / 2); -} - -/* lookup table for fast modulo 23 op required for cyclic buffers of the IPQF */ -static const int mod23_lut[26] = { - 23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, - 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 0 -}; - -/* First half of the 384-tap IPQF filtering coefficients. */ -static const float ipqf_coeffs1[ATRAC3P_PQF_FIR_LEN][16] = { - { -5.8336207e-7, -8.0604229e-7, -4.2005411e-7, -4.4400572e-8, - 3.226247e-8, 3.530856e-8, 1.2660377e-8, 0.000010516783, - -0.000011838618, 6.005389e-7, 0.0000014333754, 0.0000023108685, - 0.0000032569742, 0.0000046192422, 0.0000063894258, 0.0000070302972 }, - { -0.0000091622824, -0.000010502935, -0.0000079212787, -0.0000041712024, - -0.0000026336629, -0.0000015432918, -5.7168614e-7, 0.0000018111954, - 0.000023530851, 0.00002780562, 0.000032302323, 0.000036968919, - 0.000041575615, 0.000045337845, 0.000046043948, 0.000048585582 }, - { -0.000064464548, -0.000068306952, -0.000073081472, -0.00007612785, - -0.000074850752, -0.000070208509, -0.000062285151, -0.000058270442, - -0.000056296329, -0.000049888811, -0.000035615325, -0.000018532943, - 0.0000016657353, 0.00002610587, 0.000053397067, 0.00008079566 }, - { -0.00054488552, -0.00052537228, -0.00049731287, -0.00045778, - -0.00040612387, -0.00034301577, -0.00026866337, -0.00018248901, - -0.000084307925, 0.000025081157, 0.00014135583, 0.00026649953, - 0.00039945057, 0.00053928449, 0.00068422867, 0.00083093712 }, - { -0.0014771431, -0.001283227, -0.0010566821, -0.00079780724, - -0.00050782406, -0.00018855913, 0.00015771533, 0.00052769453, - 0.00091862219, 0.001326357, 0.0017469483, 0.0021754825, - 0.0026067684, 0.0030352892, 0.0034549395, 0.0038591374 }, - { -0.0022995141, -0.001443546, -0.00049266568, 0.00055068987, - 0.001682895, 0.0028992873, 0.0041943151, 0.0055614738, - 0.0069935122, 0.0084823566, 0.010018963, 0.011593862, - 0.013196872, 0.014817309, 0.016444042, 0.018065533 }, - { -0.034426283, -0.034281436, -0.033992987, -0.033563249, - -0.032995768, -0.032295227, -0.031467363, -0.030518902, - -0.02945766, -0.028291954, -0.027031265, -0.025685543, - -0.024265358, -0.022781773, -0.021246184, -0.019670162 }, - { -0.0030586775, -0.0037203205, -0.0042847847, -0.0047529764, - -0.0051268316, -0.0054091476, -0.0056034233, -0.005714261, - -0.0057445862, -0.0057025906, -0.0055920109, -0.0054194843, - -0.0051914565, -0.0049146507, -0.0045959447, -0.0042418269 }, - { -0.0016376863, -0.0017651899, -0.0018608454, -0.0019252141, - -0.0019593791, -0.0019653172, -0.0019450618, -0.0018990048, - -0.00183808, -0.0017501717, -0.0016481078, -0.0015320742, - -0.0014046903, -0.0012685474, -0.001125814, -0.00097943726 }, - { -0.00055432378, -0.00055472925, -0.00054783461, -0.00053276919, - -0.00051135791, -0.00048466062, -0.00045358928, -0.00042499689, - -0.00036942671, -0.0003392619, -0.00030001783, -0.00025986304, - -0.0002197204, -0.00018116167, -0.00014691355, -0.00011279432 }, - { -0.000064147389, -0.00006174868, -0.000054267788, -0.000047133824, - -0.000042927582, -0.000039477309, -0.000036340745, -0.000029687517, - -0.000049787737, -0.000041577889, -0.000033864744, -0.000026534748, - -0.000019841305, -0.000014789486, -0.000013131184, -0.0000099198869 }, - { -0.0000062990207, -0.0000072701259, -0.000011984052, -0.000017348082, - -0.000019907106, -0.000021348773, -0.000021961965, -0.000012203576, - -0.000010840992, 4.6299544e-7, 5.2588763e-7, 2.7792686e-7, - -2.3649704e-7, -0.0000010897784, -9.171448e-7, -5.22682e-7 } -}; - -/* Second half of the 384-tap IPQF filtering coefficients. */ -static const float ipqf_coeffs2[ATRAC3P_PQF_FIR_LEN][16] = { - { 5.22682e-7, 9.171448e-7, 0.0000010897784, 2.3649704e-7, - -2.7792686e-7, -5.2588763e-7, -4.6299544e-7, 0.000010840992, - -0.000012203576, -0.000021961965, -0.000021348773, -0.000019907106, - -0.000017348082, -0.000011984052, -0.0000072701259, -0.0000062990207 }, - { 0.0000099198869, 0.000013131184, 0.000014789486, 0.000019841305, - 0.000026534748, 0.000033864744, 0.000041577889, 0.000049787737, - -0.000029687517, -0.000036340745, -0.000039477309, -0.000042927582, - -0.000047133824, -0.000054267788, -0.00006174868, -0.000064147389 }, - { 0.00011279432, 0.00014691355, 0.00018116167, 0.0002197204, - 0.00025986304, 0.00030001783, 0.0003392619, 0.00036942671, - -0.00042499689, -0.00045358928, -0.00048466062, -0.00051135791, - -0.00053276919, -0.00054783461, -0.00055472925, -0.00055432378 }, - { 0.00097943726, 0.001125814, 0.0012685474, 0.0014046903, - 0.0015320742, 0.0016481078, 0.0017501717, 0.00183808, - -0.0018990048, -0.0019450618, -0.0019653172, -0.0019593791, - -0.0019252141, -0.0018608454, -0.0017651899, -0.0016376863 }, - { 0.0042418269, 0.0045959447, 0.0049146507, 0.0051914565, - 0.0054194843, 0.0055920109, 0.0057025906, 0.0057445862, - -0.005714261, -0.0056034233, -0.0054091476, -0.0051268316, - -0.0047529764, -0.0042847847, -0.0037203205, -0.0030586775 }, - { 0.019670162, 0.021246184, 0.022781773, 0.024265358, - 0.025685543, 0.027031265, 0.028291954, 0.02945766, - -0.030518902, -0.031467363, -0.032295227, -0.032995768, - -0.033563249, -0.033992987, -0.034281436, -0.034426283 }, - { -0.018065533, -0.016444042, -0.014817309, -0.013196872, - -0.011593862, -0.010018963, -0.0084823566, -0.0069935122, - 0.0055614738, 0.0041943151, 0.0028992873, 0.001682895, - 0.00055068987, -0.00049266568, -0.001443546, -0.0022995141 }, - { -0.0038591374, -0.0034549395, -0.0030352892, -0.0026067684, - -0.0021754825, -0.0017469483, -0.001326357, -0.00091862219, - 0.00052769453, 0.00015771533, -0.00018855913, -0.00050782406, - -0.00079780724, -0.0010566821, -0.001283227, -0.0014771431 }, - { -0.00083093712, -0.00068422867, -0.00053928449, -0.00039945057, - -0.00026649953, -0.00014135583, -0.000025081157, 0.000084307925, - -0.00018248901, -0.00026866337, -0.00034301577, -0.00040612387, - -0.00045778, -0.00049731287, -0.00052537228, -0.00054488552 }, - { -0.00008079566, -0.000053397067, -0.00002610587, -0.0000016657353, - 0.000018532943, 0.000035615325, 0.000049888811, 0.000056296329, - -0.000058270442, -0.000062285151, -0.000070208509, -0.000074850752, - -0.00007612785, -0.000073081472, -0.000068306952, -0.000064464548 }, - { -0.000048585582, -0.000046043948, -0.000045337845, -0.000041575615, - -0.000036968919, -0.000032302323, -0.00002780562, -0.000023530851, - 0.0000018111954, -5.7168614e-7, -0.0000015432918, -0.0000026336629, - -0.0000041712024, -0.0000079212787, -0.000010502935, -0.0000091622824 }, - { -0.0000070302972, -0.0000063894258, -0.0000046192422, -0.0000032569742, - -0.0000023108685, -0.0000014333754, -6.005389e-7, 0.000011838618, - 0.000010516783, 1.2660377e-8, 3.530856e-8, 3.226247e-8, - -4.4400572e-8, -4.2005411e-7, -8.0604229e-7, -5.8336207e-7 } -}; - -void ff_atrac3p_ipqf(AVTXContext *dct_ctx, av_tx_fn dct_fn, - Atrac3pIPQFChannelCtx *hist, const float *in, float *out) -{ - int i, s, sb, t, pos_now, pos_next; - LOCAL_ALIGNED(32, float, idct_in, [ATRAC3P_SUBBANDS]); - LOCAL_ALIGNED(32, float, idct_out, [ATRAC3P_SUBBANDS]); - - memset(out, 0, ATRAC3P_FRAME_SAMPLES * sizeof(*out)); - - for (s = 0; s < ATRAC3P_SUBBAND_SAMPLES; s++) { - /* pick up one sample from each subband */ - for (sb = 0; sb < ATRAC3P_SUBBANDS; sb++) - idct_in[sb] = in[sb * ATRAC3P_SUBBAND_SAMPLES + s]; - - /* Calculate the sine and cosine part of the PQF using IDCT-IV */ - dct_fn(dct_ctx, idct_out, idct_in, sizeof(float)); - - /* append the result to the history */ - for (i = 0; i < 8; i++) { - hist->buf1[hist->pos][i] = idct_out[i + 8]; - hist->buf2[hist->pos][i] = idct_out[7 - i]; - } - - pos_now = hist->pos; - pos_next = mod23_lut[pos_now + 2]; // pos_next = (pos_now + 1) % 23; - - for (t = 0; t < ATRAC3P_PQF_FIR_LEN; t++) { - for (i = 0; i < 8; i++) { - out[s * 16 + i + 0] += hist->buf1[pos_now][i] * ipqf_coeffs1[t][i] + - hist->buf2[pos_next][i] * ipqf_coeffs2[t][i]; - out[s * 16 + i + 8] += hist->buf1[pos_now][7 - i] * ipqf_coeffs1[t][i + 8] + - hist->buf2[pos_next][7 - i] * ipqf_coeffs2[t][i + 8]; - } - - pos_now = mod23_lut[pos_next + 2]; // pos_now = (pos_now + 2) % 23; - pos_next = mod23_lut[pos_now + 2]; // pos_next = (pos_next + 2) % 23; - } - - hist->pos = mod23_lut[hist->pos]; // hist->pos = (hist->pos - 1) % 23; - } -} diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/cbs_av1.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/cbs_av1.c deleted file mode 100644 index 45e1288a51925cffffe8b59664e52ddbc9ac4d4d..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/cbs_av1.c +++ /dev/null @@ -1,1366 +0,0 @@ -/* - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include "libavutil/avassert.h" -#include "libavutil/opt.h" -#include "libavutil/pixfmt.h" - -#include "avcodec.h" -#include "cbs.h" -#include "cbs_internal.h" -#include "cbs_av1.h" - - -static int cbs_av1_read_uvlc(CodedBitstreamContext *ctx, GetBitContext *gbc, - const char *name, uint32_t *write_to, - uint32_t range_min, uint32_t range_max) -{ - uint32_t zeroes, bits_value, value; - int position; - - if (ctx->trace_enable) - position = get_bits_count(gbc); - - zeroes = 0; - while (1) { - if (get_bits_left(gbc) < 1) { - av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid uvlc code at " - "%s: bitstream ended.\n", name); - return AVERROR_INVALIDDATA; - } - - if (get_bits1(gbc)) - break; - ++zeroes; - } - - if (zeroes >= 32) { - value = MAX_UINT_BITS(32); - } else { - if (get_bits_left(gbc) < zeroes) { - av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid uvlc code at " - "%s: bitstream ended.\n", name); - return AVERROR_INVALIDDATA; - } - - bits_value = get_bits_long(gbc, zeroes); - value = bits_value + (UINT32_C(1) << zeroes) - 1; - } - - if (ctx->trace_enable) { - char bits[65]; - int i, j, k; - - if (zeroes >= 32) { - while (zeroes > 32) { - k = FFMIN(zeroes - 32, 32); - for (i = 0; i < k; i++) - bits[i] = '0'; - bits[i] = 0; - ff_cbs_trace_syntax_element(ctx, position, name, - NULL, bits, 0); - zeroes -= k; - position += k; - } - } - - for (i = 0; i < zeroes; i++) - bits[i] = '0'; - bits[i++] = '1'; - - if (zeroes < 32) { - for (j = 0; j < zeroes; j++) - bits[i++] = (bits_value >> (zeroes - j - 1) & 1) ? '1' : '0'; - } - - bits[i] = 0; - ff_cbs_trace_syntax_element(ctx, position, name, - NULL, bits, value); - } - - if (value < range_min || value > range_max) { - av_log(ctx->log_ctx, AV_LOG_ERROR, "%s out of range: " - "%"PRIu32", but must be in [%"PRIu32",%"PRIu32"].\n", - name, value, range_min, range_max); - return AVERROR_INVALIDDATA; - } - - *write_to = value; - return 0; -} - -static int cbs_av1_write_uvlc(CodedBitstreamContext *ctx, PutBitContext *pbc, - const char *name, uint32_t value, - uint32_t range_min, uint32_t range_max) -{ - uint32_t v; - int position, zeroes; - - if (value < range_min || value > range_max) { - av_log(ctx->log_ctx, AV_LOG_ERROR, "%s out of range: " - "%"PRIu32", but must be in [%"PRIu32",%"PRIu32"].\n", - name, value, range_min, range_max); - return AVERROR_INVALIDDATA; - } - - if (ctx->trace_enable) - position = put_bits_count(pbc); - - zeroes = av_log2(value + 1); - v = value - (1U << zeroes) + 1; - put_bits(pbc, zeroes, 0); - put_bits(pbc, 1, 1); - put_bits(pbc, zeroes, v); - - if (ctx->trace_enable) { - char bits[65]; - int i, j; - i = 0; - for (j = 0; j < zeroes; j++) - bits[i++] = '0'; - bits[i++] = '1'; - for (j = 0; j < zeroes; j++) - bits[i++] = (v >> (zeroes - j - 1) & 1) ? '1' : '0'; - bits[i++] = 0; - ff_cbs_trace_syntax_element(ctx, position, name, NULL, - bits, value); - } - - return 0; -} - -static int cbs_av1_read_leb128(CodedBitstreamContext *ctx, GetBitContext *gbc, - const char *name, uint64_t *write_to) -{ - uint64_t value; - int position, err, i; - - if (ctx->trace_enable) - position = get_bits_count(gbc); - - value = 0; - for (i = 0; i < 8; i++) { - int subscript[2] = { 1, i }; - uint32_t byte; - err = ff_cbs_read_unsigned(ctx, gbc, 8, "leb128_byte[i]", subscript, - &byte, 0x00, 0xff); - if (err < 0) - return err; - - value |= (uint64_t)(byte & 0x7f) << (i * 7); - if (!(byte & 0x80)) - break; - } - - if (value > UINT32_MAX) - return AVERROR_INVALIDDATA; - - if (ctx->trace_enable) - ff_cbs_trace_syntax_element(ctx, position, name, NULL, "", value); - - *write_to = value; - return 0; -} - -static int cbs_av1_write_leb128(CodedBitstreamContext *ctx, PutBitContext *pbc, - const char *name, uint64_t value) -{ - int position, err, len, i; - uint8_t byte; - - len = (av_log2(value) + 7) / 7; - - if (ctx->trace_enable) - position = put_bits_count(pbc); - - for (i = 0; i < len; i++) { - int subscript[2] = { 1, i }; - - byte = value >> (7 * i) & 0x7f; - if (i < len - 1) - byte |= 0x80; - - err = ff_cbs_write_unsigned(ctx, pbc, 8, "leb128_byte[i]", subscript, - byte, 0x00, 0xff); - if (err < 0) - return err; - } - - if (ctx->trace_enable) - ff_cbs_trace_syntax_element(ctx, position, name, NULL, "", value); - - return 0; -} - -static int cbs_av1_read_ns(CodedBitstreamContext *ctx, GetBitContext *gbc, - uint32_t n, const char *name, - const int *subscripts, uint32_t *write_to) -{ - uint32_t m, v, extra_bit, value; - int position, w; - - av_assert0(n > 0); - - if (ctx->trace_enable) - position = get_bits_count(gbc); - - w = av_log2(n) + 1; - m = (1 << w) - n; - - if (get_bits_left(gbc) < w) { - av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid non-symmetric value at " - "%s: bitstream ended.\n", name); - return AVERROR_INVALIDDATA; - } - - if (w - 1 > 0) - v = get_bits(gbc, w - 1); - else - v = 0; - - if (v < m) { - value = v; - } else { - extra_bit = get_bits1(gbc); - value = (v << 1) - m + extra_bit; - } - - if (ctx->trace_enable) { - char bits[33]; - int i; - for (i = 0; i < w - 1; i++) - bits[i] = (v >> i & 1) ? '1' : '0'; - if (v >= m) - bits[i++] = extra_bit ? '1' : '0'; - bits[i] = 0; - - ff_cbs_trace_syntax_element(ctx, position, - name, subscripts, bits, value); - } - - *write_to = value; - return 0; -} - -static int cbs_av1_write_ns(CodedBitstreamContext *ctx, PutBitContext *pbc, - uint32_t n, const char *name, - const int *subscripts, uint32_t value) -{ - uint32_t w, m, v, extra_bit; - int position; - - if (value > n) { - av_log(ctx->log_ctx, AV_LOG_ERROR, "%s out of range: " - "%"PRIu32", but must be in [0,%"PRIu32"].\n", - name, value, n); - return AVERROR_INVALIDDATA; - } - - if (ctx->trace_enable) - position = put_bits_count(pbc); - - w = av_log2(n) + 1; - m = (1 << w) - n; - - if (put_bits_left(pbc) < w) - return AVERROR(ENOSPC); - - if (value < m) { - v = value; - put_bits(pbc, w - 1, v); - } else { - v = m + ((value - m) >> 1); - extra_bit = (value - m) & 1; - put_bits(pbc, w - 1, v); - put_bits(pbc, 1, extra_bit); - } - - if (ctx->trace_enable) { - char bits[33]; - int i; - for (i = 0; i < w - 1; i++) - bits[i] = (v >> i & 1) ? '1' : '0'; - if (value >= m) - bits[i++] = extra_bit ? '1' : '0'; - bits[i] = 0; - - ff_cbs_trace_syntax_element(ctx, position, - name, subscripts, bits, value); - } - - return 0; -} - -static int cbs_av1_read_increment(CodedBitstreamContext *ctx, GetBitContext *gbc, - uint32_t range_min, uint32_t range_max, - const char *name, uint32_t *write_to) -{ - uint32_t value; - int position, i; - char bits[33]; - - av_assert0(range_min <= range_max && range_max - range_min < sizeof(bits) - 1); - if (ctx->trace_enable) - position = get_bits_count(gbc); - - for (i = 0, value = range_min; value < range_max;) { - if (get_bits_left(gbc) < 1) { - av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid increment value at " - "%s: bitstream ended.\n", name); - return AVERROR_INVALIDDATA; - } - if (get_bits1(gbc)) { - bits[i++] = '1'; - ++value; - } else { - bits[i++] = '0'; - break; - } - } - - if (ctx->trace_enable) { - bits[i] = 0; - ff_cbs_trace_syntax_element(ctx, position, - name, NULL, bits, value); - } - - *write_to = value; - return 0; -} - -static int cbs_av1_write_increment(CodedBitstreamContext *ctx, PutBitContext *pbc, - uint32_t range_min, uint32_t range_max, - const char *name, uint32_t value) -{ - int len; - - av_assert0(range_min <= range_max && range_max - range_min < 32); - if (value < range_min || value > range_max) { - av_log(ctx->log_ctx, AV_LOG_ERROR, "%s out of range: " - "%"PRIu32", but must be in [%"PRIu32",%"PRIu32"].\n", - name, value, range_min, range_max); - return AVERROR_INVALIDDATA; - } - - if (value == range_max) - len = range_max - range_min; - else - len = value - range_min + 1; - if (put_bits_left(pbc) < len) - return AVERROR(ENOSPC); - - if (ctx->trace_enable) { - char bits[33]; - int i; - for (i = 0; i < len; i++) { - if (range_min + i == value) - bits[i] = '0'; - else - bits[i] = '1'; - } - bits[i] = 0; - ff_cbs_trace_syntax_element(ctx, put_bits_count(pbc), - name, NULL, bits, value); - } - - if (len > 0) - put_bits(pbc, len, (1 << len) - 1 - (value != range_max)); - - return 0; -} - -static int cbs_av1_read_subexp(CodedBitstreamContext *ctx, GetBitContext *gbc, - uint32_t range_max, const char *name, - const int *subscripts, uint32_t *write_to) -{ - uint32_t value; - int position, err; - uint32_t max_len, len, range_offset, range_bits; - - if (ctx->trace_enable) - position = get_bits_count(gbc); - - av_assert0(range_max > 0); - max_len = av_log2(range_max - 1) - 3; - - err = cbs_av1_read_increment(ctx, gbc, 0, max_len, - "subexp_more_bits", &len); - if (err < 0) - return err; - - if (len) { - range_bits = 2 + len; - range_offset = 1 << range_bits; - } else { - range_bits = 3; - range_offset = 0; - } - - if (len < max_len) { - err = ff_cbs_read_unsigned(ctx, gbc, range_bits, - "subexp_bits", NULL, &value, - 0, MAX_UINT_BITS(range_bits)); - if (err < 0) - return err; - - } else { - err = cbs_av1_read_ns(ctx, gbc, range_max - range_offset, - "subexp_final_bits", NULL, &value); - if (err < 0) - return err; - } - value += range_offset; - - if (ctx->trace_enable) - ff_cbs_trace_syntax_element(ctx, position, - name, subscripts, "", value); - - *write_to = value; - return err; -} - -static int cbs_av1_write_subexp(CodedBitstreamContext *ctx, PutBitContext *pbc, - uint32_t range_max, const char *name, - const int *subscripts, uint32_t value) -{ - int position, err; - uint32_t max_len, len, range_offset, range_bits; - - if (value > range_max) { - av_log(ctx->log_ctx, AV_LOG_ERROR, "%s out of range: " - "%"PRIu32", but must be in [0,%"PRIu32"].\n", - name, value, range_max); - return AVERROR_INVALIDDATA; - } - - if (ctx->trace_enable) - position = put_bits_count(pbc); - - av_assert0(range_max > 0); - max_len = av_log2(range_max - 1) - 3; - - if (value < 8) { - range_bits = 3; - range_offset = 0; - len = 0; - } else { - range_bits = av_log2(value); - len = range_bits - 2; - if (len > max_len) { - // The top bin is combined with the one below it. - av_assert0(len == max_len + 1); - --range_bits; - len = max_len; - } - range_offset = 1 << range_bits; - } - - err = cbs_av1_write_increment(ctx, pbc, 0, max_len, - "subexp_more_bits", len); - if (err < 0) - return err; - - if (len < max_len) { - err = ff_cbs_write_unsigned(ctx, pbc, range_bits, - "subexp_bits", NULL, - value - range_offset, - 0, MAX_UINT_BITS(range_bits)); - if (err < 0) - return err; - - } else { - err = cbs_av1_write_ns(ctx, pbc, range_max - range_offset, - "subexp_final_bits", NULL, - value - range_offset); - if (err < 0) - return err; - } - - if (ctx->trace_enable) - ff_cbs_trace_syntax_element(ctx, position, - name, subscripts, "", value); - - return err; -} - - -static int cbs_av1_tile_log2(int blksize, int target) -{ - int k; - for (k = 0; (blksize << k) < target; k++); - return k; -} - -static int cbs_av1_get_relative_dist(const AV1RawSequenceHeader *seq, - unsigned int a, unsigned int b) -{ - unsigned int diff, m; - if (!seq->enable_order_hint) - return 0; - diff = a - b; - m = 1 << seq->order_hint_bits_minus_1; - diff = (diff & (m - 1)) - (diff & m); - return diff; -} - -static size_t cbs_av1_get_payload_bytes_left(GetBitContext *gbc) -{ - GetBitContext tmp = *gbc; - size_t size = 0; - for (int i = 0; get_bits_left(&tmp) >= 8; i++) { - if (get_bits(&tmp, 8)) - size = i; - } - return size; -} - - -#define HEADER(name) do { \ - ff_cbs_trace_header(ctx, name); \ - } while (0) - -#define CHECK(call) do { \ - err = (call); \ - if (err < 0) \ - return err; \ - } while (0) - -#define FUNC_NAME(rw, codec, name) cbs_ ## codec ## _ ## rw ## _ ## name -#define FUNC_AV1(rw, name) FUNC_NAME(rw, av1, name) -#define FUNC(name) FUNC_AV1(READWRITE, name) - -#define SUBSCRIPTS(subs, ...) (subs > 0 ? ((int[subs + 1]){ subs, __VA_ARGS__ }) : NULL) - -#define fb(width, name) \ - xf(width, name, current->name, 0, MAX_UINT_BITS(width), 0, ) -#define fc(width, name, range_min, range_max) \ - xf(width, name, current->name, range_min, range_max, 0, ) -#define flag(name) fb(1, name) -#define su(width, name) \ - xsu(width, name, current->name, 0, ) - -#define fbs(width, name, subs, ...) \ - xf(width, name, current->name, 0, MAX_UINT_BITS(width), subs, __VA_ARGS__) -#define fcs(width, name, range_min, range_max, subs, ...) \ - xf(width, name, current->name, range_min, range_max, subs, __VA_ARGS__) -#define flags(name, subs, ...) \ - xf(1, name, current->name, 0, 1, subs, __VA_ARGS__) -#define sus(width, name, subs, ...) \ - xsu(width, name, current->name, subs, __VA_ARGS__) - -#define fixed(width, name, value) do { \ - av_unused uint32_t fixed_value = value; \ - xf(width, name, fixed_value, value, value, 0, ); \ - } while (0) - - -#define READ -#define READWRITE read -#define RWContext GetBitContext - -#define xf(width, name, var, range_min, range_max, subs, ...) do { \ - uint32_t value; \ - CHECK(ff_cbs_read_unsigned(ctx, rw, width, #name, \ - SUBSCRIPTS(subs, __VA_ARGS__), \ - &value, range_min, range_max)); \ - var = value; \ - } while (0) - -#define xsu(width, name, var, subs, ...) do { \ - int32_t value; \ - CHECK(ff_cbs_read_signed(ctx, rw, width, #name, \ - SUBSCRIPTS(subs, __VA_ARGS__), &value, \ - MIN_INT_BITS(width), \ - MAX_INT_BITS(width))); \ - var = value; \ - } while (0) - -#define uvlc(name, range_min, range_max) do { \ - uint32_t value; \ - CHECK(cbs_av1_read_uvlc(ctx, rw, #name, \ - &value, range_min, range_max)); \ - current->name = value; \ - } while (0) - -#define ns(max_value, name, subs, ...) do { \ - uint32_t value; \ - CHECK(cbs_av1_read_ns(ctx, rw, max_value, #name, \ - SUBSCRIPTS(subs, __VA_ARGS__), &value)); \ - current->name = value; \ - } while (0) - -#define increment(name, min, max) do { \ - uint32_t value; \ - CHECK(cbs_av1_read_increment(ctx, rw, min, max, #name, &value)); \ - current->name = value; \ - } while (0) - -#define subexp(name, max, subs, ...) do { \ - uint32_t value; \ - CHECK(cbs_av1_read_subexp(ctx, rw, max, #name, \ - SUBSCRIPTS(subs, __VA_ARGS__), &value)); \ - current->name = value; \ - } while (0) - -#define delta_q(name) do { \ - uint8_t delta_coded; \ - int8_t delta_q; \ - xf(1, name.delta_coded, delta_coded, 0, 1, 0, ); \ - if (delta_coded) \ - xsu(1 + 6, name.delta_q, delta_q, 0, ); \ - else \ - delta_q = 0; \ - current->name = delta_q; \ - } while (0) - -#define leb128(name) do { \ - uint64_t value; \ - CHECK(cbs_av1_read_leb128(ctx, rw, #name, &value)); \ - current->name = value; \ - } while (0) - -#define infer(name, value) do { \ - current->name = value; \ - } while (0) - -#define byte_alignment(rw) (get_bits_count(rw) % 8) - -#include "cbs_av1_syntax_template.c" - -#undef READ -#undef READWRITE -#undef RWContext -#undef xf -#undef xsu -#undef uvlc -#undef ns -#undef increment -#undef subexp -#undef delta_q -#undef leb128 -#undef infer -#undef byte_alignment - - -#define WRITE -#define READWRITE write -#define RWContext PutBitContext - -#define xf(width, name, var, range_min, range_max, subs, ...) do { \ - CHECK(ff_cbs_write_unsigned(ctx, rw, width, #name, \ - SUBSCRIPTS(subs, __VA_ARGS__), \ - var, range_min, range_max)); \ - } while (0) - -#define xsu(width, name, var, subs, ...) do { \ - CHECK(ff_cbs_write_signed(ctx, rw, width, #name, \ - SUBSCRIPTS(subs, __VA_ARGS__), var, \ - MIN_INT_BITS(width), \ - MAX_INT_BITS(width))); \ - } while (0) - -#define uvlc(name, range_min, range_max) do { \ - CHECK(cbs_av1_write_uvlc(ctx, rw, #name, current->name, \ - range_min, range_max)); \ - } while (0) - -#define ns(max_value, name, subs, ...) do { \ - CHECK(cbs_av1_write_ns(ctx, rw, max_value, #name, \ - SUBSCRIPTS(subs, __VA_ARGS__), \ - current->name)); \ - } while (0) - -#define increment(name, min, max) do { \ - CHECK(cbs_av1_write_increment(ctx, rw, min, max, #name, \ - current->name)); \ - } while (0) - -#define subexp(name, max, subs, ...) do { \ - CHECK(cbs_av1_write_subexp(ctx, rw, max, #name, \ - SUBSCRIPTS(subs, __VA_ARGS__), \ - current->name)); \ - } while (0) - -#define delta_q(name) do { \ - xf(1, name.delta_coded, current->name != 0, 0, 1, 0, ); \ - if (current->name) \ - xsu(1 + 6, name.delta_q, current->name, 0, ); \ - } while (0) - -#define leb128(name) do { \ - CHECK(cbs_av1_write_leb128(ctx, rw, #name, current->name)); \ - } while (0) - -#define infer(name, value) do { \ - if (current->name != (value)) { \ - av_log(ctx->log_ctx, AV_LOG_ERROR, \ - "%s does not match inferred value: " \ - "%"PRId64", but should be %"PRId64".\n", \ - #name, (int64_t)current->name, (int64_t)(value)); \ - return AVERROR_INVALIDDATA; \ - } \ - } while (0) - -#define byte_alignment(rw) (put_bits_count(rw) % 8) - -#include "cbs_av1_syntax_template.c" - -#undef WRITE -#undef READWRITE -#undef RWContext -#undef xf -#undef xsu -#undef uvlc -#undef ns -#undef increment -#undef subexp -#undef delta_q -#undef leb128 -#undef infer -#undef byte_alignment - - -static int cbs_av1_split_fragment(CodedBitstreamContext *ctx, - CodedBitstreamFragment *frag, - int header) -{ - GetBitContext gbc; - uint8_t *data; - size_t size; - uint64_t obu_length; - int pos, err, trace; - - // Don't include this parsing in trace output. - trace = ctx->trace_enable; - ctx->trace_enable = 0; - - data = frag->data; - size = frag->data_size; - - if (INT_MAX / 8 < size) { - av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid fragment: " - "too large (%"SIZE_SPECIFIER" bytes).\n", size); - err = AVERROR_INVALIDDATA; - goto fail; - } - - if (header && size && data[0] & 0x80) { - // first bit is nonzero, the extradata does not consist purely of - // OBUs. Expect MP4/Matroska AV1CodecConfigurationRecord - int config_record_version = data[0] & 0x7f; - - if (config_record_version != 1) { - av_log(ctx->log_ctx, AV_LOG_ERROR, - "Unknown version %d of AV1CodecConfigurationRecord " - "found!\n", - config_record_version); - err = AVERROR_INVALIDDATA; - goto fail; - } - - if (size <= 4) { - if (size < 4) { - av_log(ctx->log_ctx, AV_LOG_WARNING, - "Undersized AV1CodecConfigurationRecord v%d found!\n", - config_record_version); - err = AVERROR_INVALIDDATA; - goto fail; - } - - goto success; - } - - // In AV1CodecConfigurationRecord v1, actual OBUs start after - // four bytes. Thus set the offset as required for properly - // parsing them. - data += 4; - size -= 4; - } - - while (size > 0) { - AV1RawOBUHeader header; - uint64_t obu_size; - - init_get_bits(&gbc, data, 8 * size); - - err = cbs_av1_read_obu_header(ctx, &gbc, &header); - if (err < 0) - goto fail; - - if (header.obu_has_size_field) { - if (get_bits_left(&gbc) < 8) { - av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid OBU: fragment " - "too short (%"SIZE_SPECIFIER" bytes).\n", size); - err = AVERROR_INVALIDDATA; - goto fail; - } - err = cbs_av1_read_leb128(ctx, &gbc, "obu_size", &obu_size); - if (err < 0) - goto fail; - } else - obu_size = size - 1 - header.obu_extension_flag; - - pos = get_bits_count(&gbc); - av_assert0(pos % 8 == 0 && pos / 8 <= size); - - obu_length = pos / 8 + obu_size; - - if (size < obu_length) { - av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid OBU length: " - "%"PRIu64", but only %"SIZE_SPECIFIER" bytes remaining in fragment.\n", - obu_length, size); - err = AVERROR_INVALIDDATA; - goto fail; - } - - err = ff_cbs_append_unit_data(frag, header.obu_type, - data, obu_length, frag->data_ref); - if (err < 0) - goto fail; - - data += obu_length; - size -= obu_length; - } - -success: - err = 0; -fail: - ctx->trace_enable = trace; - return err; -} - -static int cbs_av1_ref_tile_data(CodedBitstreamContext *ctx, - CodedBitstreamUnit *unit, - GetBitContext *gbc, - AV1RawTileData *td) -{ - int pos; - - pos = get_bits_count(gbc); - if (pos >= 8 * unit->data_size) { - av_log(ctx->log_ctx, AV_LOG_ERROR, "Bitstream ended before " - "any data in tile group (%d bits read).\n", pos); - return AVERROR_INVALIDDATA; - } - // Must be byte-aligned at this point. - av_assert0(pos % 8 == 0); - - td->data_ref = av_buffer_ref(unit->data_ref); - if (!td->data_ref) - return AVERROR(ENOMEM); - - td->data = unit->data + pos / 8; - td->data_size = unit->data_size - pos / 8; - - return 0; -} - -static int cbs_av1_read_unit(CodedBitstreamContext *ctx, - CodedBitstreamUnit *unit) -{ - CodedBitstreamAV1Context *priv = ctx->priv_data; - AV1RawOBU *obu; - GetBitContext gbc; - int err, start_pos, end_pos; - - err = ff_cbs_alloc_unit_content(ctx, unit); - if (err < 0) - return err; - obu = unit->content; - - err = init_get_bits(&gbc, unit->data, 8 * unit->data_size); - if (err < 0) - return err; - - err = cbs_av1_read_obu_header(ctx, &gbc, &obu->header); - if (err < 0) - return err; - av_assert0(obu->header.obu_type == unit->type); - - if (obu->header.obu_has_size_field) { - uint64_t obu_size; - err = cbs_av1_read_leb128(ctx, &gbc, "obu_size", &obu_size); - if (err < 0) - return err; - obu->obu_size = obu_size; - } else { - if (unit->data_size < 1 + obu->header.obu_extension_flag) { - av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid OBU length: " - "unit too short (%"SIZE_SPECIFIER").\n", unit->data_size); - return AVERROR_INVALIDDATA; - } - obu->obu_size = unit->data_size - 1 - obu->header.obu_extension_flag; - } - - start_pos = get_bits_count(&gbc); - - if (obu->header.obu_extension_flag) { - if (obu->header.obu_type != AV1_OBU_SEQUENCE_HEADER && - obu->header.obu_type != AV1_OBU_TEMPORAL_DELIMITER && - priv->operating_point_idc) { - int in_temporal_layer = - (priv->operating_point_idc >> priv->temporal_id ) & 1; - int in_spatial_layer = - (priv->operating_point_idc >> (priv->spatial_id + 8)) & 1; - if (!in_temporal_layer || !in_spatial_layer) { - return AVERROR(EAGAIN); // drop_obu() - } - } - } - - switch (obu->header.obu_type) { - case AV1_OBU_SEQUENCE_HEADER: - { - err = cbs_av1_read_sequence_header_obu(ctx, &gbc, - &obu->obu.sequence_header); - if (err < 0) - return err; - - if (priv->operating_point >= 0) { - AV1RawSequenceHeader *sequence_header = &obu->obu.sequence_header; - - if (priv->operating_point > sequence_header->operating_points_cnt_minus_1) { - av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid Operating Point %d requested. " - "Must not be higher than %u.\n", - priv->operating_point, sequence_header->operating_points_cnt_minus_1); - return AVERROR(EINVAL); - } - priv->operating_point_idc = sequence_header->operating_point_idc[priv->operating_point]; - } - - av_buffer_unref(&priv->sequence_header_ref); - priv->sequence_header = NULL; - - priv->sequence_header_ref = av_buffer_ref(unit->content_ref); - if (!priv->sequence_header_ref) - return AVERROR(ENOMEM); - priv->sequence_header = &obu->obu.sequence_header; - } - break; - case AV1_OBU_TEMPORAL_DELIMITER: - { - err = cbs_av1_read_temporal_delimiter_obu(ctx, &gbc); - if (err < 0) - return err; - } - break; - case AV1_OBU_FRAME_HEADER: - case AV1_OBU_REDUNDANT_FRAME_HEADER: - { - err = cbs_av1_read_frame_header_obu(ctx, &gbc, - &obu->obu.frame_header, - obu->header.obu_type == - AV1_OBU_REDUNDANT_FRAME_HEADER, - unit->data_ref); - if (err < 0) - return err; - } - break; - case AV1_OBU_TILE_GROUP: - { - err = cbs_av1_read_tile_group_obu(ctx, &gbc, - &obu->obu.tile_group); - if (err < 0) - return err; - - err = cbs_av1_ref_tile_data(ctx, unit, &gbc, - &obu->obu.tile_group.tile_data); - if (err < 0) - return err; - } - break; - case AV1_OBU_FRAME: - { - err = cbs_av1_read_frame_obu(ctx, &gbc, &obu->obu.frame, - unit->data_ref); - if (err < 0) - return err; - - err = cbs_av1_ref_tile_data(ctx, unit, &gbc, - &obu->obu.frame.tile_group.tile_data); - if (err < 0) - return err; - } - break; - case AV1_OBU_TILE_LIST: - { - err = cbs_av1_read_tile_list_obu(ctx, &gbc, - &obu->obu.tile_list); - if (err < 0) - return err; - - err = cbs_av1_ref_tile_data(ctx, unit, &gbc, - &obu->obu.tile_list.tile_data); - if (err < 0) - return err; - } - break; - case AV1_OBU_METADATA: - { - err = cbs_av1_read_metadata_obu(ctx, &gbc, &obu->obu.metadata); - if (err < 0) - return err; - } - break; - case AV1_OBU_PADDING: - { - err = cbs_av1_read_padding_obu(ctx, &gbc, &obu->obu.padding); - if (err < 0) - return err; - } - break; - default: - return AVERROR(ENOSYS); - } - - end_pos = get_bits_count(&gbc); - av_assert0(end_pos <= unit->data_size * 8); - - if (obu->obu_size > 0 && - obu->header.obu_type != AV1_OBU_TILE_GROUP && - obu->header.obu_type != AV1_OBU_TILE_LIST && - obu->header.obu_type != AV1_OBU_FRAME) { - int nb_bits = obu->obu_size * 8 + start_pos - end_pos; - - if (nb_bits <= 0) - return AVERROR_INVALIDDATA; - - err = cbs_av1_read_trailing_bits(ctx, &gbc, nb_bits); - if (err < 0) - return err; - } - - return 0; -} - -static int cbs_av1_write_obu(CodedBitstreamContext *ctx, - CodedBitstreamUnit *unit, - PutBitContext *pbc) -{ - CodedBitstreamAV1Context *priv = ctx->priv_data; - AV1RawOBU *obu = unit->content; - PutBitContext pbc_tmp; - AV1RawTileData *td; - size_t header_size; - int err, start_pos, end_pos, data_pos; - CodedBitstreamAV1Context av1ctx; - - // OBUs in the normal bitstream format must contain a size field - // in every OBU (in annex B it is optional, but we don't support - // writing that). - obu->header.obu_has_size_field = 1; - av1ctx = *priv; - - if (priv->sequence_header_ref) { - av1ctx.sequence_header_ref = av_buffer_ref(priv->sequence_header_ref); - if (!av1ctx.sequence_header_ref) - return AVERROR(ENOMEM); - } - - if (priv->frame_header_ref) { - av1ctx.frame_header_ref = av_buffer_ref(priv->frame_header_ref); - if (!av1ctx.frame_header_ref) { - err = AVERROR(ENOMEM); - goto error; - } - } - - err = cbs_av1_write_obu_header(ctx, pbc, &obu->header); - if (err < 0) - goto error; - - if (obu->header.obu_has_size_field) { - pbc_tmp = *pbc; - // Add space for the size field to fill later. - put_bits32(pbc, 0); - put_bits32(pbc, 0); - } - - td = NULL; - start_pos = put_bits_count(pbc); - - switch (obu->header.obu_type) { - case AV1_OBU_SEQUENCE_HEADER: - { - err = cbs_av1_write_sequence_header_obu(ctx, pbc, - &obu->obu.sequence_header); - if (err < 0) - goto error; - - av_buffer_unref(&priv->sequence_header_ref); - priv->sequence_header = NULL; - - err = ff_cbs_make_unit_refcounted(ctx, unit); - if (err < 0) - goto error; - - priv->sequence_header_ref = av_buffer_ref(unit->content_ref); - if (!priv->sequence_header_ref) { - err = AVERROR(ENOMEM); - goto error; - } - - priv->sequence_header = &obu->obu.sequence_header; - } - break; - case AV1_OBU_TEMPORAL_DELIMITER: - { - err = cbs_av1_write_temporal_delimiter_obu(ctx, pbc); - if (err < 0) - goto error; - } - break; - case AV1_OBU_FRAME_HEADER: - case AV1_OBU_REDUNDANT_FRAME_HEADER: - { - err = cbs_av1_write_frame_header_obu(ctx, pbc, - &obu->obu.frame_header, - obu->header.obu_type == - AV1_OBU_REDUNDANT_FRAME_HEADER, - NULL); - if (err < 0) - goto error; - } - break; - case AV1_OBU_TILE_GROUP: - { - err = cbs_av1_write_tile_group_obu(ctx, pbc, - &obu->obu.tile_group); - if (err < 0) - goto error; - - td = &obu->obu.tile_group.tile_data; - } - break; - case AV1_OBU_FRAME: - { - err = cbs_av1_write_frame_obu(ctx, pbc, &obu->obu.frame, NULL); - if (err < 0) - goto error; - - td = &obu->obu.frame.tile_group.tile_data; - } - break; - case AV1_OBU_TILE_LIST: - { - err = cbs_av1_write_tile_list_obu(ctx, pbc, &obu->obu.tile_list); - if (err < 0) - goto error; - - td = &obu->obu.tile_list.tile_data; - } - break; - case AV1_OBU_METADATA: - { - err = cbs_av1_write_metadata_obu(ctx, pbc, &obu->obu.metadata); - if (err < 0) - goto error; - } - break; - case AV1_OBU_PADDING: - { - err = cbs_av1_write_padding_obu(ctx, pbc, &obu->obu.padding); - if (err < 0) - goto error; - } - break; - default: - err = AVERROR(ENOSYS); - goto error; - } - - end_pos = put_bits_count(pbc); - header_size = (end_pos - start_pos + 7) / 8; - if (td) { - obu->obu_size = header_size + td->data_size; - } else if (header_size > 0) { - // Add trailing bits and recalculate. - err = cbs_av1_write_trailing_bits(ctx, pbc, 8 - end_pos % 8); - if (err < 0) - goto error; - end_pos = put_bits_count(pbc); - obu->obu_size = header_size = (end_pos - start_pos + 7) / 8; - } else { - // Empty OBU. - obu->obu_size = 0; - } - - end_pos = put_bits_count(pbc); - // Must now be byte-aligned. - av_assert0(end_pos % 8 == 0); - flush_put_bits(pbc); - start_pos /= 8; - end_pos /= 8; - - *pbc = pbc_tmp; - err = cbs_av1_write_leb128(ctx, pbc, "obu_size", obu->obu_size); - if (err < 0) - goto error; - - data_pos = put_bits_count(pbc) / 8; - flush_put_bits(pbc); - av_assert0(data_pos <= start_pos); - - if (8 * obu->obu_size > put_bits_left(pbc)) { - av_buffer_unref(&priv->sequence_header_ref); - av_buffer_unref(&priv->frame_header_ref); - *priv = av1ctx; - - return AVERROR(ENOSPC); - } - - if (obu->obu_size > 0) { - memmove(pbc->buf + data_pos, - pbc->buf + start_pos, header_size); - skip_put_bytes(pbc, header_size); - - if (td) { - memcpy(pbc->buf + data_pos + header_size, - td->data, td->data_size); - skip_put_bytes(pbc, td->data_size); - } - } - - // OBU data must be byte-aligned. - av_assert0(put_bits_count(pbc) % 8 == 0); - err = 0; - -error: - av_buffer_unref(&av1ctx.sequence_header_ref); - av_buffer_unref(&av1ctx.frame_header_ref); - - return err; -} - -static int cbs_av1_assemble_fragment(CodedBitstreamContext *ctx, - CodedBitstreamFragment *frag) -{ - size_t size, pos; - int i; - - size = 0; - for (i = 0; i < frag->nb_units; i++) - size += frag->units[i].data_size; - - frag->data_ref = av_buffer_alloc(size + AV_INPUT_BUFFER_PADDING_SIZE); - if (!frag->data_ref) - return AVERROR(ENOMEM); - frag->data = frag->data_ref->data; - memset(frag->data + size, 0, AV_INPUT_BUFFER_PADDING_SIZE); - - pos = 0; - for (i = 0; i < frag->nb_units; i++) { - memcpy(frag->data + pos, frag->units[i].data, - frag->units[i].data_size); - pos += frag->units[i].data_size; - } - av_assert0(pos == size); - frag->data_size = size; - - return 0; -} - -static void cbs_av1_flush(CodedBitstreamContext *ctx) -{ - CodedBitstreamAV1Context *priv = ctx->priv_data; - - av_buffer_unref(&priv->frame_header_ref); - priv->sequence_header = NULL; - priv->frame_header = NULL; - - memset(priv->ref, 0, sizeof(priv->ref)); - priv->operating_point_idc = 0; - priv->seen_frame_header = 0; - priv->tile_num = 0; -} - -static void cbs_av1_close(CodedBitstreamContext *ctx) -{ - CodedBitstreamAV1Context *priv = ctx->priv_data; - - av_buffer_unref(&priv->sequence_header_ref); - av_buffer_unref(&priv->frame_header_ref); -} - -static void cbs_av1_free_metadata(void *unit, uint8_t *content) -{ - AV1RawOBU *obu = (AV1RawOBU*)content; - AV1RawMetadata *md; - - av_assert0(obu->header.obu_type == AV1_OBU_METADATA); - md = &obu->obu.metadata; - - switch (md->metadata_type) { - case AV1_METADATA_TYPE_ITUT_T35: - av_buffer_unref(&md->metadata.itut_t35.payload_ref); - break; - } - av_free(content); -} - -static const CodedBitstreamUnitTypeDescriptor cbs_av1_unit_types[] = { - CBS_UNIT_TYPE_POD(AV1_OBU_SEQUENCE_HEADER, AV1RawOBU), - CBS_UNIT_TYPE_POD(AV1_OBU_TEMPORAL_DELIMITER, AV1RawOBU), - CBS_UNIT_TYPE_POD(AV1_OBU_FRAME_HEADER, AV1RawOBU), - CBS_UNIT_TYPE_POD(AV1_OBU_REDUNDANT_FRAME_HEADER, AV1RawOBU), - - CBS_UNIT_TYPE_INTERNAL_REF(AV1_OBU_TILE_GROUP, AV1RawOBU, - obu.tile_group.tile_data.data), - CBS_UNIT_TYPE_INTERNAL_REF(AV1_OBU_FRAME, AV1RawOBU, - obu.frame.tile_group.tile_data.data), - CBS_UNIT_TYPE_INTERNAL_REF(AV1_OBU_TILE_LIST, AV1RawOBU, - obu.tile_list.tile_data.data), - CBS_UNIT_TYPE_INTERNAL_REF(AV1_OBU_PADDING, AV1RawOBU, - obu.padding.payload), - - CBS_UNIT_TYPE_COMPLEX(AV1_OBU_METADATA, AV1RawOBU, - &cbs_av1_free_metadata), - - CBS_UNIT_TYPE_END_OF_LIST -}; - -#define OFFSET(x) offsetof(CodedBitstreamAV1Context, x) -static const AVOption cbs_av1_options[] = { - { "operating_point", "Set operating point to select layers to parse from a scalable bitstream", - OFFSET(operating_point), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, AV1_MAX_OPERATING_POINTS - 1, 0 }, - { NULL } -}; - -static const AVClass cbs_av1_class = { - .class_name = "cbs_av1", - .item_name = av_default_item_name, - .option = cbs_av1_options, - .version = LIBAVUTIL_VERSION_INT, -}; - -const CodedBitstreamType ff_cbs_type_av1 = { - .codec_id = AV_CODEC_ID_AV1, - - .priv_class = &cbs_av1_class, - .priv_data_size = sizeof(CodedBitstreamAV1Context), - - .unit_types = cbs_av1_unit_types, - - .split_fragment = &cbs_av1_split_fragment, - .read_unit = &cbs_av1_read_unit, - .write_unit = &cbs_av1_write_obu, - .assemble_fragment = &cbs_av1_assemble_fragment, - - .flush = &cbs_av1_flush, - .close = &cbs_av1_close, -}; diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/cbs_mpeg2.h b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/cbs_mpeg2.h deleted file mode 100644 index f7075a460dcdf44b1002d499beffd1755ff57e0f..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/cbs_mpeg2.h +++ /dev/null @@ -1,231 +0,0 @@ -/* - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVCODEC_CBS_MPEG2_H -#define AVCODEC_CBS_MPEG2_H - -#include -#include - -#include "libavutil/buffer.h" - - -enum { - MPEG2_START_PICTURE = 0x00, - MPEG2_START_SLICE_MIN = 0x01, - MPEG2_START_SLICE_MAX = 0xaf, - MPEG2_START_USER_DATA = 0xb2, - MPEG2_START_SEQUENCE_HEADER = 0xb3, - MPEG2_START_SEQUENCE_ERROR = 0xb4, - MPEG2_START_EXTENSION = 0xb5, - MPEG2_START_SEQUENCE_END = 0xb7, - MPEG2_START_GROUP = 0xb8, -}; - -#define MPEG2_START_IS_SLICE(type) \ - ((type) >= MPEG2_START_SLICE_MIN && \ - (type) <= MPEG2_START_SLICE_MAX) - -enum { - MPEG2_EXTENSION_SEQUENCE = 0x1, - MPEG2_EXTENSION_SEQUENCE_DISPLAY = 0x2, - MPEG2_EXTENSION_QUANT_MATRIX = 0x3, - MPEG2_EXTENSION_COPYRIGHT = 0x4, - MPEG2_EXTENSION_SEQUENCE_SCALABLE = 0x5, - MPEG2_EXTENSION_PICTURE_DISPLAY = 0x7, - MPEG2_EXTENSION_PICTURE_CODING = 0x8, - MPEG2_EXTENSION_PICTURE_SPATIAL_SCALABLE = 0x9, - MPEG2_EXTENSION_PICTURE_TEMPORAL_SCALABLE = 0xa, - MPEG2_EXTENSION_CAMERA_PARAMETERS = 0xb, - MPEG2_EXTENSION_ITU_T = 0xc, -}; - - -typedef struct MPEG2RawSequenceHeader { - uint8_t sequence_header_code; - - uint16_t horizontal_size_value; - uint16_t vertical_size_value; - uint8_t aspect_ratio_information; - uint8_t frame_rate_code; - uint32_t bit_rate_value; - uint16_t vbv_buffer_size_value; - uint8_t constrained_parameters_flag; - - uint8_t load_intra_quantiser_matrix; - uint8_t intra_quantiser_matrix[64]; - uint8_t load_non_intra_quantiser_matrix; - uint8_t non_intra_quantiser_matrix[64]; -} MPEG2RawSequenceHeader; - -typedef struct MPEG2RawUserData { - uint8_t user_data_start_code; - - uint8_t *user_data; - AVBufferRef *user_data_ref; - size_t user_data_length; -} MPEG2RawUserData; - -typedef struct MPEG2RawSequenceExtension { - uint8_t profile_and_level_indication; - uint8_t progressive_sequence; - uint8_t chroma_format; - uint8_t horizontal_size_extension; - uint8_t vertical_size_extension; - uint16_t bit_rate_extension; - uint8_t vbv_buffer_size_extension; - uint8_t low_delay; - uint8_t frame_rate_extension_n; - uint8_t frame_rate_extension_d; -} MPEG2RawSequenceExtension; - -typedef struct MPEG2RawSequenceDisplayExtension { - uint8_t video_format; - - uint8_t colour_description; - uint8_t colour_primaries; - uint8_t transfer_characteristics; - uint8_t matrix_coefficients; - - uint16_t display_horizontal_size; - uint16_t display_vertical_size; -} MPEG2RawSequenceDisplayExtension; - -typedef struct MPEG2RawGroupOfPicturesHeader { - uint8_t group_start_code; - - uint32_t time_code; - uint8_t closed_gop; - uint8_t broken_link; -} MPEG2RawGroupOfPicturesHeader; - -typedef struct MPEG2RawExtraInformation { - uint8_t *extra_information; - AVBufferRef *extra_information_ref; - size_t extra_information_length; -} MPEG2RawExtraInformation; - -typedef struct MPEG2RawPictureHeader { - uint8_t picture_start_code; - - uint16_t temporal_reference; - uint8_t picture_coding_type; - uint16_t vbv_delay; - - uint8_t full_pel_forward_vector; - uint8_t forward_f_code; - uint8_t full_pel_backward_vector; - uint8_t backward_f_code; - - MPEG2RawExtraInformation extra_information_picture; -} MPEG2RawPictureHeader; - -typedef struct MPEG2RawPictureCodingExtension { - uint8_t f_code[2][2]; - - uint8_t intra_dc_precision; - uint8_t picture_structure; - uint8_t top_field_first; - uint8_t frame_pred_frame_dct; - uint8_t concealment_motion_vectors; - uint8_t q_scale_type; - uint8_t intra_vlc_format; - uint8_t alternate_scan; - uint8_t repeat_first_field; - uint8_t chroma_420_type; - uint8_t progressive_frame; - - uint8_t composite_display_flag; - uint8_t v_axis; - uint8_t field_sequence; - uint8_t sub_carrier; - uint8_t burst_amplitude; - uint8_t sub_carrier_phase; -} MPEG2RawPictureCodingExtension; - -typedef struct MPEG2RawQuantMatrixExtension { - uint8_t load_intra_quantiser_matrix; - uint8_t intra_quantiser_matrix[64]; - uint8_t load_non_intra_quantiser_matrix; - uint8_t non_intra_quantiser_matrix[64]; - uint8_t load_chroma_intra_quantiser_matrix; - uint8_t chroma_intra_quantiser_matrix[64]; - uint8_t load_chroma_non_intra_quantiser_matrix; - uint8_t chroma_non_intra_quantiser_matrix[64]; -} MPEG2RawQuantMatrixExtension; - -typedef struct MPEG2RawPictureDisplayExtension { - int16_t frame_centre_horizontal_offset[3]; - int16_t frame_centre_vertical_offset[3]; -} MPEG2RawPictureDisplayExtension; - -typedef struct MPEG2RawExtensionData { - uint8_t extension_start_code; - uint8_t extension_start_code_identifier; - - union { - MPEG2RawSequenceExtension sequence; - MPEG2RawSequenceDisplayExtension sequence_display; - MPEG2RawQuantMatrixExtension quant_matrix; - MPEG2RawPictureCodingExtension picture_coding; - MPEG2RawPictureDisplayExtension picture_display; - } data; -} MPEG2RawExtensionData; - -typedef struct MPEG2RawSliceHeader { - uint8_t slice_vertical_position; - - uint8_t slice_vertical_position_extension; - uint8_t priority_breakpoint; - - uint8_t quantiser_scale_code; - - uint8_t slice_extension_flag; - uint8_t intra_slice; - uint8_t slice_picture_id_enable; - uint8_t slice_picture_id; - - MPEG2RawExtraInformation extra_information_slice; -} MPEG2RawSliceHeader; - -typedef struct MPEG2RawSlice { - MPEG2RawSliceHeader header; - - uint8_t *data; - AVBufferRef *data_ref; - size_t data_size; - int data_bit_start; -} MPEG2RawSlice; - -typedef struct MPEG2RawSequenceEnd { - uint8_t sequence_end_code; -} MPEG2RawSequenceEnd; - - -typedef struct CodedBitstreamMPEG2Context { - // Elements stored in headers which are required for other decoding. - uint16_t horizontal_size; - uint16_t vertical_size; - uint8_t scalable; - uint8_t scalable_mode; - uint8_t progressive_sequence; - uint8_t number_of_frame_centre_offsets; -} CodedBitstreamMPEG2Context; - - -#endif /* AVCODEC_CBS_MPEG2_H */ diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dvdsubenc.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dvdsubenc.c deleted file mode 100644 index d272b57675728d0de2ad2e87121a8bef685a351b..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dvdsubenc.c +++ /dev/null @@ -1,512 +0,0 @@ -/* - * DVD subtitle encoding - * Copyright (c) 2005 Wolfram Gloger - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ -#include "avcodec.h" -#include "bytestream.h" -#include "codec_internal.h" -#include "dvdsub.h" -#include "libavutil/avassert.h" -#include "libavutil/bprint.h" -#include "libavutil/imgutils.h" -#include "libavutil/opt.h" - -typedef struct { - AVClass *class; - uint32_t global_palette[16]; - char *palette_str; - int even_rows_fix; -} DVDSubtitleContext; - -// ncnt is the nibble counter -#define PUTNIBBLE(val)\ -do {\ - if (ncnt++ & 1)\ - *q++ = bitbuf | ((val) & 0x0f);\ - else\ - bitbuf = (val) << 4;\ -} while(0) - -static void dvd_encode_rle(uint8_t **pq, - const uint8_t *bitmap, int linesize, - int w, int h, - const int cmap[256]) -{ - uint8_t *q; - unsigned int bitbuf = 0; - int ncnt; - int x, y, len, color; - - q = *pq; - - for (y = 0; y < h; ++y) { - ncnt = 0; - for(x = 0; x < w; x += len) { - color = bitmap[x]; - for (len=1; x+len < w; ++len) - if (bitmap[x+len] != color) - break; - color = cmap[color]; - av_assert0(color < 4); - if (len < 0x04) { - PUTNIBBLE((len << 2)|color); - } else if (len < 0x10) { - PUTNIBBLE(len >> 2); - PUTNIBBLE((len << 2)|color); - } else if (len < 0x40) { - PUTNIBBLE(0); - PUTNIBBLE(len >> 2); - PUTNIBBLE((len << 2)|color); - } else if (x+len == w) { - PUTNIBBLE(0); - PUTNIBBLE(0); - PUTNIBBLE(0); - PUTNIBBLE(color); - } else { - if (len > 0xff) - len = 0xff; - PUTNIBBLE(0); - PUTNIBBLE(len >> 6); - PUTNIBBLE(len >> 2); - PUTNIBBLE((len << 2)|color); - } - } - /* end of line */ - if (ncnt & 1) - PUTNIBBLE(0); - bitmap += linesize; - } - - *pq = q; -} - -static int color_distance(uint32_t a, uint32_t b) -{ - int r = 0, d, i; - int alpha_a = 8, alpha_b = 8; - - for (i = 24; i >= 0; i -= 8) { - d = alpha_a * (int)((a >> i) & 0xFF) - - alpha_b * (int)((b >> i) & 0xFF); - r += d * d; - alpha_a = a >> 28; - alpha_b = b >> 28; - } - return r; -} - -/** - * Count colors used in a rectangle, quantizing alpha and grouping by - * nearest global palette entry. - */ -static void count_colors(AVCodecContext *avctx, unsigned hits[33], - const AVSubtitleRect *r) -{ - DVDSubtitleContext *dvdc = avctx->priv_data; - unsigned count[256] = { 0 }; - uint32_t *palette = (uint32_t *)r->data[1]; - uint32_t color; - int x, y, i, j, match, d, best_d, av_uninit(best_j); - uint8_t *p = r->data[0]; - - for (y = 0; y < r->h; y++) { - for (x = 0; x < r->w; x++) - count[*(p++)]++; - p += r->linesize[0] - r->w; - } - for (i = 0; i < 256; i++) { - if (!count[i]) /* avoid useless search */ - continue; - color = palette[i]; - /* 0: transparent, 1-16: semi-transparent, 17-33 opaque */ - match = color < 0x33000000 ? 0 : color < 0xCC000000 ? 1 : 17; - if (match) { - best_d = INT_MAX; - for (j = 0; j < 16; j++) { - d = color_distance(0xFF000000 | color, - 0xFF000000 | dvdc->global_palette[j]); - if (d < best_d) { - best_d = d; - best_j = j; - } - } - match += best_j; - } - hits[match] += count[i]; - } -} - -static void select_palette(AVCodecContext *avctx, int out_palette[4], - int out_alpha[4], unsigned hits[33]) -{ - DVDSubtitleContext *dvdc = avctx->priv_data; - int i, j, bright, mult; - uint32_t color; - int selected[4] = { 0 }; - uint32_t pseudopal[33] = { 0 }; - uint32_t refcolor[3] = { 0x00000000, 0xFFFFFFFF, 0xFF000000 }; - - /* Bonus for transparent: if the rectangle fits tightly the text, the - background color can be quite rare, but it would be ugly without it */ - hits[0] *= 16; - /* Bonus for bright colors */ - for (i = 0; i < 16; i++) { - if (!(hits[1 + i] + hits[17 + i])) - continue; /* skip unused colors to gain time */ - color = dvdc->global_palette[i]; - bright = 0; - for (j = 0; j < 3; j++, color >>= 8) - bright += (color & 0xFF) < 0x40 || (color & 0xFF) >= 0xC0; - mult = 2 + FFMIN(bright, 2); - hits[ 1 + i] *= mult; - hits[17 + i] *= mult; - } - - /* Select four most frequent colors */ - for (i = 0; i < 4; i++) { - for (j = 0; j < 33; j++) - if (hits[j] > hits[selected[i]]) - selected[i] = j; - hits[selected[i]] = 0; - } - - /* Order the colors like in most DVDs: - 0: background, 1: foreground, 2: outline */ - for (i = 0; i < 16; i++) { - pseudopal[ 1 + i] = 0x80000000 | dvdc->global_palette[i]; - pseudopal[17 + i] = 0xFF000000 | dvdc->global_palette[i]; - } - for (i = 0; i < 3; i++) { - int best_d = color_distance(refcolor[i], pseudopal[selected[i]]); - for (j = i + 1; j < 4; j++) { - int d = color_distance(refcolor[i], pseudopal[selected[j]]); - if (d < best_d) { - FFSWAP(int, selected[i], selected[j]); - best_d = d; - } - } - } - - /* Output */ - for (i = 0; i < 4; i++) { - out_palette[i] = selected[i] ? (selected[i] - 1) & 0xF : 0; - out_alpha [i] = !selected[i] ? 0 : selected[i] < 17 ? 0x80 : 0xFF; - } -} - -static void build_color_map(AVCodecContext *avctx, int cmap[], - const uint32_t palette[], - const int out_palette[], unsigned int const out_alpha[]) -{ - DVDSubtitleContext *dvdc = avctx->priv_data; - int i, j, d, best_d; - uint32_t pseudopal[4]; - - for (i = 0; i < 4; i++) - pseudopal[i] = (out_alpha[i] << 24) | - dvdc->global_palette[out_palette[i]]; - for (i = 0; i < 256; i++) { - best_d = INT_MAX; - for (j = 0; j < 4; j++) { - d = color_distance(pseudopal[j], palette[i]); - if (d < best_d) { - cmap[i] = j; - best_d = d; - } - } - } -} - -static void copy_rectangle(AVSubtitleRect *dst, AVSubtitleRect *src, int cmap[]) -{ - int x, y; - uint8_t *p, *q; - - p = src->data[0]; - q = dst->data[0] + (src->x - dst->x) + - (src->y - dst->y) * dst->linesize[0]; - for (y = 0; y < src->h; y++) { - for (x = 0; x < src->w; x++) - *(q++) = cmap[*(p++)]; - p += src->linesize[0] - src->w; - q += dst->linesize[0] - src->w; - } -} - -static int encode_dvd_subtitles(AVCodecContext *avctx, - uint8_t *outbuf, int outbuf_size, - const AVSubtitle *h) -{ - DVDSubtitleContext *dvdc = avctx->priv_data; - uint8_t *q, *qq; - int offset1, offset2; - int i, rects = h->num_rects, ret; - unsigned global_palette_hits[33] = { 0 }; - int cmap[256]; - int out_palette[4]; - int out_alpha[4]; - AVSubtitleRect vrect; - uint8_t *vrect_data = NULL; - int x2, y2; - int forced = 0; - - if (rects == 0 || !h->rects) - return AVERROR(EINVAL); - for (i = 0; i < rects; i++) - if (h->rects[i]->type != SUBTITLE_BITMAP) { - av_log(avctx, AV_LOG_ERROR, "Bitmap subtitle required\n"); - return AVERROR(EINVAL); - } - /* Mark this subtitle forced if any of the rectangles is forced. */ - for (i = 0; i < rects; i++) - if ((h->rects[i]->flags & AV_SUBTITLE_FLAG_FORCED) != 0) { - forced = 1; - break; - } - - vrect = *h->rects[0]; - - if (rects > 1) { - /* DVD subtitles can have only one rectangle: build a virtual - rectangle containing all actual rectangles. - The data of the rectangles will be copied later, when the palette - is decided, because the rectangles may have different palettes. */ - int xmin = h->rects[0]->x, xmax = xmin + h->rects[0]->w; - int ymin = h->rects[0]->y, ymax = ymin + h->rects[0]->h; - for (i = 1; i < rects; i++) { - xmin = FFMIN(xmin, h->rects[i]->x); - ymin = FFMIN(ymin, h->rects[i]->y); - xmax = FFMAX(xmax, h->rects[i]->x + h->rects[i]->w); - ymax = FFMAX(ymax, h->rects[i]->y + h->rects[i]->h); - } - vrect.x = xmin; - vrect.y = ymin; - vrect.w = xmax - xmin; - vrect.h = ymax - ymin; - if ((ret = av_image_check_size(vrect.w, vrect.h, 0, avctx)) < 0) - return ret; - - /* Count pixels outside the virtual rectangle as transparent */ - global_palette_hits[0] = vrect.w * vrect.h; - for (i = 0; i < rects; i++) - global_palette_hits[0] -= h->rects[i]->w * h->rects[i]->h; - } - - for (i = 0; i < rects; i++) - count_colors(avctx, global_palette_hits, h->rects[i]); - select_palette(avctx, out_palette, out_alpha, global_palette_hits); - - if (rects > 1) { - if (!(vrect_data = av_calloc(vrect.w, vrect.h))) - return AVERROR(ENOMEM); - vrect.data [0] = vrect_data; - vrect.linesize[0] = vrect.w; - for (i = 0; i < rects; i++) { - build_color_map(avctx, cmap, (uint32_t *)h->rects[i]->data[1], - out_palette, out_alpha); - copy_rectangle(&vrect, h->rects[i], cmap); - } - for (i = 0; i < 4; i++) - cmap[i] = i; - } else { - build_color_map(avctx, cmap, (uint32_t *)h->rects[0]->data[1], - out_palette, out_alpha); - } - - av_log(avctx, AV_LOG_DEBUG, "Selected palette:"); - for (i = 0; i < 4; i++) - av_log(avctx, AV_LOG_DEBUG, " 0x%06"PRIx32"@@%02x (0x%x,0x%x)", - dvdc->global_palette[out_palette[i]], out_alpha[i], - out_palette[i], out_alpha[i] >> 4); - av_log(avctx, AV_LOG_DEBUG, "\n"); - - // encode data block - q = outbuf + 4; - offset1 = q - outbuf; - // worst case memory requirement: 1 nibble per pixel.. - if ((q - outbuf) + vrect.w * vrect.h / 2 + 17 + 21 > outbuf_size) { - av_log(NULL, AV_LOG_ERROR, "dvd_subtitle too big\n"); - ret = AVERROR_BUFFER_TOO_SMALL; - goto fail; - } - dvd_encode_rle(&q, vrect.data[0], vrect.w * 2, - vrect.w, (vrect.h + 1) >> 1, cmap); - offset2 = q - outbuf; - dvd_encode_rle(&q, vrect.data[0] + vrect.w, vrect.w * 2, - vrect.w, vrect.h >> 1, cmap); - - if (dvdc->even_rows_fix && (vrect.h & 1)) { - // Work-around for some players that want the height to be even. - vrect.h++; - *q++ = 0x00; // 0x00 0x00 == empty row, i.e. fully transparent - *q++ = 0x00; - } - - // set data packet size - qq = outbuf + 2; - bytestream_put_be16(&qq, q - outbuf); - - // send start display command - bytestream_put_be16(&q, (h->start_display_time*90) >> 10); - bytestream_put_be16(&q, (q - outbuf) /*- 2 */ + 8 + 12 + 2); - *q++ = 0x03; // palette - 4 nibbles - *q++ = (out_palette[3] << 4) | out_palette[2]; - *q++ = (out_palette[1] << 4) | out_palette[0]; - *q++ = 0x04; // alpha - 4 nibbles - *q++ = (out_alpha[3] & 0xF0) | (out_alpha[2] >> 4); - *q++ = (out_alpha[1] & 0xF0) | (out_alpha[0] >> 4); - - // 12 bytes per rect - x2 = vrect.x + vrect.w - 1; - y2 = vrect.y + vrect.h - 1; - - if (x2 > avctx->width || y2 > avctx->height) { - av_log(avctx, AV_LOG_ERROR, "canvas_size(%d:%d) is too small(%d:%d) for render\n", - avctx->width, avctx->height, x2, y2); - ret = AVERROR(EINVAL); - goto fail; - } - *q++ = 0x05; - // x1 x2 -> 6 nibbles - *q++ = vrect.x >> 4; - *q++ = (vrect.x << 4) | ((x2 >> 8) & 0xf); - *q++ = x2; - // y1 y2 -> 6 nibbles - *q++ = vrect.y >> 4; - *q++ = (vrect.y << 4) | ((y2 >> 8) & 0xf); - *q++ = y2; - - *q++ = 0x06; - // offset1, offset2 - bytestream_put_be16(&q, offset1); - bytestream_put_be16(&q, offset2); - - *q++ = forced ? 0x00 : 0x01; // start command - *q++ = 0xff; // terminating command - - // send stop display command last - bytestream_put_be16(&q, (h->end_display_time*90) >> 10); - bytestream_put_be16(&q, (q - outbuf) - 2 /*+ 4*/); - *q++ = 0x02; // set end - *q++ = 0xff; // terminating command - - qq = outbuf; - bytestream_put_be16(&qq, q - outbuf); - - av_log(NULL, AV_LOG_DEBUG, "subtitle_packet size=%"PTRDIFF_SPECIFIER"\n", q - outbuf); - ret = q - outbuf; - -fail: - av_free(vrect_data); - return ret; -} - -static int bprint_to_extradata(AVCodecContext *avctx, struct AVBPrint *buf) -{ - int ret; - char *str; - - ret = av_bprint_finalize(buf, &str); - if (ret < 0) - return ret; - if (!av_bprint_is_complete(buf)) { - av_free(str); - return AVERROR(ENOMEM); - } - - avctx->extradata = str; - /* Note: the string is NUL terminated (so extradata can be read as a - * string), but the ending character is not accounted in the size (in - * binary formats you are likely not supposed to mux that character). When - * extradata is copied, it is also padded with AV_INPUT_BUFFER_PADDING_SIZE - * zeros. */ - avctx->extradata_size = buf->len; - return 0; -} - -static int dvdsub_init(AVCodecContext *avctx) -{ - DVDSubtitleContext *dvdc = avctx->priv_data; - static const uint32_t default_palette[16] = { - 0x000000, 0x0000FF, 0x00FF00, 0xFF0000, - 0xFFFF00, 0xFF00FF, 0x00FFFF, 0xFFFFFF, - 0x808000, 0x8080FF, 0x800080, 0x80FF80, - 0x008080, 0xFF8080, 0x555555, 0xAAAAAA, - }; - AVBPrint extradata; - int i, ret; - - av_assert0(sizeof(dvdc->global_palette) == sizeof(default_palette)); - if (dvdc->palette_str) { - ff_dvdsub_parse_palette(dvdc->global_palette, dvdc->palette_str); - } else { - memcpy(dvdc->global_palette, default_palette, sizeof(dvdc->global_palette)); - } - - av_bprint_init(&extradata, 0, AV_BPRINT_SIZE_AUTOMATIC); - if (avctx->width && avctx->height) - av_bprintf(&extradata, "size: %dx%d\n", avctx->width, avctx->height); - av_bprintf(&extradata, "palette:"); - for (i = 0; i < 16; i++) - av_bprintf(&extradata, " %06"PRIx32"%c", - dvdc->global_palette[i] & 0xFFFFFF, i < 15 ? ',' : '\n'); - - ret = bprint_to_extradata(avctx, &extradata); - if (ret < 0) - return ret; - - return 0; -} - -static int dvdsub_encode(AVCodecContext *avctx, - unsigned char *buf, int buf_size, - const AVSubtitle *sub) -{ - //DVDSubtitleContext *s = avctx->priv_data; - int ret; - - ret = encode_dvd_subtitles(avctx, buf, buf_size, sub); - return ret; -} - -#define OFFSET(x) offsetof(DVDSubtitleContext, x) -#define SE AV_OPT_FLAG_SUBTITLE_PARAM | AV_OPT_FLAG_ENCODING_PARAM -static const AVOption options[] = { - {"palette", "set the global palette", OFFSET(palette_str), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, SE }, - {"even_rows_fix", "Make number of rows even (workaround for some players)", OFFSET(even_rows_fix), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, SE}, - { NULL }, -}; - -static const AVClass dvdsubenc_class = { - .class_name = "VOBSUB subtitle encoder", - .item_name = av_default_item_name, - .option = options, - .version = LIBAVUTIL_VERSION_INT, -}; - -const FFCodec ff_dvdsub_encoder = { - .p.name = "dvdsub", - CODEC_LONG_NAME("DVD subtitles"), - .p.type = AVMEDIA_TYPE_SUBTITLE, - .p.id = AV_CODEC_ID_DVD_SUBTITLE, - .init = dvdsub_init, - FF_CODEC_ENCODE_SUB_CB(dvdsub_encode), - .p.priv_class = &dvdsubenc_class, - .priv_data_size = sizeof(DVDSubtitleContext), -}; diff --git a/spaces/congsaPfin/Manga-OCR/logs/Bowmasters APK The Ultimate Game for Gore and Humor Fans.md b/spaces/congsaPfin/Manga-OCR/logs/Bowmasters APK The Ultimate Game for Gore and Humor Fans.md deleted file mode 100644 index ef81568bafcb4815c8ca428f054f6cf4c045876b..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Bowmasters APK The Ultimate Game for Gore and Humor Fans.md +++ /dev/null @@ -1,91 +0,0 @@ - -

    Bowmaster APK: A Fun and Addictive Game with Bowmen

    -

    If you are looking for a hotsy-totsy aim and shoot game that will keep you entertained for hours, then you should try Bowmaster APK. This is a brand new version of the world-famous game with bowmen that has millions of fans around the globe. In this article, we will tell you everything you need to know about Bowmaster APK, including what it is, how to download and install it, how to play it, and why you should play it. So, let's get started!

    -

    bowmaster apk


    Download > https://urlca.com/2uOb0l



    -

    What is Bowmaster APK?

    -

    Bowmaster APK is a fun and addictive game that challenges you to shoot arrows at your opponents and make them fall off their platforms. The game features 41 insane characters from all dimensions, each with their own unique weapons and abilities. You can choose from pirates, ninjas, zombies, clowns, aliens, and more. You can also play with your friends in epic duels or join tournaments and win prizes. The game has stunning graphics, smooth animations, awesome fatalities, and rag-doll physics that make every shot hilarious and satisfying.

    -

    How to download and install Bowmaster APK?

    -

    Downloading and installing Bowmaster APK is very easy and fast. All you need to do is follow these simple steps:

    -
      -
    1. Go to APKPure.com and search for "Bowmaster APK".
    2. -
    3. Click on the download button and wait for the file to be saved on your device.
    4. -
    5. Open the file manager app on your device and locate the downloaded file.
    6. -
    7. Tap on the file and allow the installation from unknown sources if prompted.
    8. -
    9. Wait for the installation to complete and launch the game.
    10. -
    -

    Congratulations! You have successfully installed Bowmaster APK on your Android device. Now you can enjoy playing the game anytime and anywhere.

    -

    How to play Bowmaster APK?

    -

    Playing Bowmaster APK is very simple and intuitive. You just need to drag your finger on the screen to aim your bow and release it to shoot. You can also adjust the power and angle of your shot by moving your finger up or down. The goal is to hit your opponent's head or body and make them fall off their platform. You can also use special items like bombs, rockets, magnets, or portals to spice up your shots. The game has four modes: Classic, Apple Shooting, Bird Hunting, and Duck Hunting. You can play solo or with friends in online or offline mode. You can also unlock new characters and weapons by winning coins or gems in tournaments or by watching ads.

    -

    bowmaster apk download
    -bowmaster apk mod
    -bowmaster apk hack
    -bowmaster apk latest version
    -bowmaster apk pure
    -bowmaster apk offline
    -bowmaster apk unlimited money
    -bowmaster apk android
    -bowmaster apk revdl
    -bowmaster apk old version
    -bowmaster apk free download
    -bowmaster apk mod menu
    -bowmaster apk mod unlock all characters
    -bowmaster apk mod unlimited gems
    -bowmaster apk mod unlimited coins and gems
    -bowmaster apk for pc
    -bowmaster apk uptodown
    -bowmaster apk rexdl
    -bowmaster apk mod all unlocked
    -bowmaster apk mod money and gems
    -bowmaster apk mod free shopping
    -bowmaster apk mod no ads
    -bowmaster apk mod god mode
    -bowmaster apk mod diamond membership
    -bowmaster apk mod vip unlocked
    -bowmaster apk mod all weapons unlocked
    -bowmaster apk mod unlimited everything
    -bowmaster apk mod online
    -bowmaster apk mod 2.14.8
    -bowmaster apk mod 2.14.7
    -bowmaster apk mod 2.14.6
    -bowmaster apk mod 2.14.5
    -bowmaster apk mod 2.14.4
    -bowmaster apk mod 2.14.3
    -bowmaster apk mod 2.14.2
    -bowmaster apk mod 2.14.1
    -bowmaster apk mod 2.14.0
    -bowmaster apkpure download latest version
    -bowmasters apkpure download old version
    -download game bowmasters apkpure offline mode
    -how to install bowmasters apkpure on android device
    -how to play bowmasters apkpure online with friends
    -how to update bowmasters apkpure to the newest version
    -how to get unlimited coins and gems in bowmasters apkpure
    -how to unlock all characters and weapons in bowmasters apkpure
    -how to remove ads from bowmasters apkpure
    -how to get diamond membership in bowmasters apkpure
    -how to get vip access in bowmasters apkpure
    -how to hack bowmasters apkpure using lucky patcher

    -

    Why should you play Bowmaster APK?

    -

    Bowmaster APK is a game that will make you laugh, scream, and cheer as you shoot arrows at your enemies. Here are some of the reasons why you should play it:

    -
      -
    • It is fun and addictive. You will never get bored of shooting arrows at different characters with different weapons.
    • -
    • It is challenging and rewarding. You will need to use your skills and strategy to aim accurately and hit your targets.
    • -
    • It is hilarious and satisfying. You will love watching your opponents fly in the air or explode in pieces as you hit them.
    • -
    • It is social and competitive. You can play with your friends or challenge other players from around the world in tournaments.
    • -
    • It is free and easy to play. You can download it from APKPure.com without any hassle and play it on any Android device.
    • -
    -

    Conclusion

    -

    Bowmaster APK is a game

    Bowmaster APK is a game that you will love to play if you are a fan of aim and shoot games. It is a game that combines fun, challenge, humor, and competition in one package. You can download it from APKPure.com and enjoy shooting arrows at your opponents with 41 insane characters. You can also play with your friends or join tournaments and win prizes. So, what are you waiting for? Download Bowmaster APK today and become the ultimate bowmaster!

    -

    FAQs

    -

    What is the difference between Bowmaster APK and Bowmasters?

    -

    Bowmaster APK is a modified version of Bowmasters that allows you to access all the features of the game without any restrictions. You can unlock all the characters and weapons without spending any money or watching any ads. You can also play the game offline without any internet connection.

    -

    Is Bowmaster APK safe to download and install?

    -

    Yes, Bowmaster APK is safe to download and install from APKPure.com. The file is scanned for viruses and malware before being uploaded to the website. You can also check the user reviews and ratings to see how other people have experienced the game.

    -

    How can I update Bowmaster APK?

    -

    You can update Bowmaster APK by visiting APKPure.com and downloading the latest version of the file. You can also enable the auto-update feature on the website to get notified when a new version is available. You will need to uninstall the previous version of the game before installing the new one.

    -

    How can I contact the developers of Bowmaster APK?

    -

    You can contact the developers of Bowmaster APK by sending an email to bowmasters@playgendary.com. You can also visit their official website at playgendary.com or follow them on social media platforms like Facebook, Twitter, Instagram, and YouTube.

    -

    What are some similar games to Bowmaster APK?

    -

    Some similar games to Bowmaster APK are Archery King, Stickman Archer, Mr Bow, Arrow.io, and Archero. These are also aim and shoot games that involve bowmen, arrows, and targets. You can find these games on APKPure.com or Google Play Store.

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Ghost Rider Bike Wallpaper HD Stunning Photos of the Flaming Motorcycle.md b/spaces/congsaPfin/Manga-OCR/logs/Ghost Rider Bike Wallpaper HD Stunning Photos of the Flaming Motorcycle.md deleted file mode 100644 index c5707ca729df5d76b7c87e7bb22b907c4e294bef..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Ghost Rider Bike Wallpaper HD Stunning Photos of the Flaming Motorcycle.md +++ /dev/null @@ -1,133 +0,0 @@ -
    -

    Ghost Rider Bike Wallpaper HD Download: A Guide for Fans

    -

    If you are a fan of the Ghost Rider movies, you might be interested in downloading some cool wallpapers of the ghost rider bike. The ghost rider bike is a custom motorcycle that can burst into flames and has a demonic appearance. It is one of the signature vehicles of the Ghost Rider, a motorcycle-riding bounty hunter for the Devil himself.

    -

    In this article, we will explore the history and features of the ghost rider bike in the movies, and how to download and use ghost rider bike wallpapers on different devices. Whether you want to spice up your phone, tablet, or computer screen, we have some tips and tricks for you.

    -

    ghost rider bike wallpaper hd download


    Downloadhttps://urlca.com/2uO7dM



    -

    The History and Features of the Ghost Rider Bike in the Movies

    -

    The ghost rider bike has appeared in two movies so far: Ghost Rider (2007) and Ghost Rider: Spirit of Vengeance (2011). Both movies star Nicolas Cage as Johnny Blaze, a stuntman who becomes the Ghost Rider after making a deal with Mephistopheles (Peter Fonda in the first movie, Ciarán Hinds in the second movie).

    -

    The Bike Used in the First Ghost Rider Movie

    -

    In the first movie, Johnny Blaze uses several motorcycles, including a Yamaha V-Max. The bike was modified for the movie by David Berryman of Las Vegas, Nevada. The customized black-and-red V-Max was equipped with an 1800 cc V-4 engine, with a custom cherry red paint job, a hand-crafted trellis frame and an OCC-style exhaust.

    -

    The Panhead Chopper is another motorcycle that Nicolas Cage rides in the movie Ghost Rider, but it is not the bike used by the Ghost Rider himself. The Panhead Chopper is a custom-built Harley-Davidson with a panhead engine, which has a distinctive shape of the rocker covers that resembles an upside-down pan.

    -

    The Bike Used in the Second Ghost Rider Movie

    -

    In the second movie, Johnny Blaze rides a new model of motorcycle that is more organic and skeletal than the previous one. The bike is based on a Buell XB12Ss Lightning Long, which is a sportbike with a 1203 cc V-twin engine.

    -

    The bike was modified by Neville Page, a concept designer who also worked on Avatar and Star Trek. The bike has a bone-like structure that covers the engine and the frame, with spikes and ribs that give it a menacing look. The bike also has a skull-shaped headlight and handlebars that resemble horns.

    -

    How to Download and Use Ghost Rider Bike Wallpapers on Different Devices

    -

    If you want to download some high-quality ghost rider bike wallpapers for your device, there are several ways to do so. Here are some methods that you can try:

    -

    ghost rider bike hd images free download
    -ghost rider bike 4k wallpaper download
    -ghost rider bike wallpaper for pc
    -ghost rider bike photos download
    -ghost rider bike flames wallpaper hd
    -ghost rider bike hd wallpapers 1080p
    -ghost rider bike wallpaper for mobile
    -ghost rider bike pics download
    -ghost rider bike skull wallpaper hd
    -ghost rider bike wallpaper cave
    -ghost rider bike live wallpaper download
    -ghost rider bike 3d wallpaper download
    -ghost rider bike wallpaper for laptop
    -ghost rider bike images download
    -ghost rider bike fire wallpaper hd
    -ghost rider bike hd wallpapers for android
    -ghost rider bike wallpaper download apk
    -ghost rider bike wallpapers free download
    -ghost rider bike wallpaper for desktop
    -ghost rider bike wallpapers hd zip file download
    -ghost rider bike animated wallpaper download
    -ghost rider bike ultra hd wallpaper download
    -ghost rider bike wallpaper for iphone
    -ghost rider bike wallpapers download
    -ghost rider bike smoke wallpaper hd
    -ghost rider bike hd wallpapers for pc
    -ghost rider bike wallpaper download for windows 10
    -ghost rider bike wallpapers in hd quality
    -ghost rider bike wallpaper for macbook
    -ghost rider bike wallpapers full hd download
    -ghost rider bike neon wallpaper hd
    -ghost rider bike high resolution wallpaper download
    -ghost rider bike wallpaper for ipad
    -ghost rider bike wallpapers collection download
    -ghost rider bike blue flame wallpaper hd
    -ghost rider bike hd wallpapers widescreen
    -ghost rider bike wallpaper download for android phone
    -ghost rider bike wallpapers in 4k resolution
    -ghost rider bike wallpaper for chromebook
    -ghost rider bike wallpapers pack download

    -

    How to Download Ghost Rider Bike Wallpapers from Google Images

    -

    One of the easiest ways to find and download ghost rider bike wallpapers is to use Google Images. You can use any web browser on your Android, iPhone, iPad, or computer to do this. Here are the steps:

    -
      -
    1. Go to [11](https://images.google.com) in a web browser.
    2. -
    3. Enter your search terms and tap or click Search. For example, if you want a galaxy-style wallpaper, try things like "ghost rider bike wallpaper hd", "ghost rider bike wallpaper 4k", "ghost rider bike wallpaper iphone", etc. If you want a certain image size and/or resolution, make sure you add that to your search terms. For example, "ghost rider bike wallpaper 1920x1080".
    4. -
    5. Tap or click on the image that you like and want to download.
    6. -
    7. Tap and hold or right-click on the image and select Save image as... or Download image.
    8. -
    9. Choose a location on your device where you want to save the image and tap or click Save.
    10. -
    -

    You can also use the Tools option under the search bar to filter the images by size, color, type, time, and usage rights. This can help you find the best wallpaper for your device and preference.

    -

    How to Download Ghost Rider Bike Wallpapers from the Google Wallpapers App for Android

    -

    If you have an Android device, you can use the Google Wallpapers app to download and set ghost rider bike wallpapers. The app has a large collection of wallpapers from various categories, including Art, Earth, Landscapes, Life, Textures, and more. You can also access wallpapers from other apps like Google Photos and Live Wallpapers. Here are the steps:

    -
      -
    1. Download and install the [Google Wallpapers app] from the Google Play Store on your Android device.
    2. -
    3. Open the app and tap on the category that you want to explore. For example, if you want a galaxy-style wallpaper, tap on Space.
    4. -
    5. Swipe left or right to browse through the wallpapers. You can also tap on Daily wallpaper to get a new wallpaper every day.
    6. -
    7. Tap on the wallpaper that you like and want to download.
    8. -
    9. Tap on Download to save the wallpaper on your device. You can also tap on Set wallpaper to apply it to your home screen or lock screen.
    10. -
    -

    How to Sync Your Wallpaper Across Desktops on Windows 10

    -

    If you have a Windows 10 computer, you can sync your wallpaper across multiple desktops using the Microsoft account. This way, you can have the same ghost rider bike wallpaper on all your devices that are signed in with the same account. Here are the steps:

    -
      -
    1. Go to [Settings] > [Accounts] > [Sync your settings] on your Windows 10 computer.
    2. -
    3. Turn on the Sync settings toggle switch if it is off.
    4. -
    5. Under Individual sync settings, make sure that Theme is turned on.
    6. -
    7. Go to [Settings] > [Personalization] > [Background] on your Windows 10 computer.
    8. -
    9. Select Picture from the Background drop-down menu.
    10. -
    11. Click on Browse and choose the ghost rider bike wallpaper that you downloaded earlier.
    12. -
    13. Click on Choose picture to set it as your background.
    14. -
    -

    The wallpaper will be synced across all your devices that are signed in with the same Microsoft account. You can also sync other theme elements like colors, sounds, and mouse cursors using this method.

    -

    Conclusion: How to Choose and Use Ghost Rider Bike Wallpapers

    -

    In this article, we have learned about the history and features of the ghost rider bike in the movies, and how to download and use ghost rider bike wallpapers on different devices. We have also seen some examples of ghost rider bike wallpapers that you can choose from.

    -

    Here are some tips for choosing and using ghost rider bike wallpapers:

    -
      -
    • Pick a wallpaper that matches your device's screen size and resolution for optimal quality and performance.
    • -
    • Pick a wallpaper that reflects your personality and mood. For example, if you want a dark and edgy wallpaper, go for a black-and-red theme. If you want a bright and colorful wallpaper, go for a galaxy-style theme.
    • -
    • Pick a wallpaper that complements your icons and widgets. For example, if you have a lot of icons and widgets on your home screen, go for a simple and minimalistic wallpaper. If you have a clean and empty home screen, go for a detailed and vibrant wallpaper.
    • -
    • Change your wallpaper regularly to keep things fresh and interesting. You can use apps like Google Wallpapers or Daily Wallpaper to get new wallpapers every day or week.
    • -
    -

    Frequently Asked Questions About Ghost Rider Bike Wallpapers

    -

    Q: What is the best resolution for ghost rider bike wallpapers?

    -

    A: The best resolution for ghost rider bike wallpapers depends on your device's screen size and pixel density. Generally, higher resolutions offer sharper and clearer images, but they also consume more battery power and storage space. A good rule of thumb is to choose a wallpaper that has at least the same resolution as your device's screen. For example, if your device has a 1080p screen, choose a wallpaper that has at least 1920 x 1080 pixels.

    -

    Q: How can I make my own ghost rider bike wallpaper?

    -

    A: If you want to make your own ghost rider bike wallpaper, you can use a photo editing software like Photoshop, GIMP, or Pixlr. You can also use online tools like Canva, Fotor, or PicMonkey. Here are some steps that you can follow:

    -
      -
    1. Find a base image of a motorcycle that you like. You can use Google Images or other sources to find one.
    2. -
    3. Open the image in your photo editing software or online tool.
    4. -
    5. Add some effects and filters to make the image look more fiery and demonic. For example, you can use the burn tool, the dodge tool, the smudge tool, the blur tool, the color balance tool, the hue/saturation tool, etc.
    6. -
    7. Add some text and graphics to personalize your wallpaper. For example, you can add the Ghost Rider logo, your name, a quote, etc.
    8. -
    9. Save and export your wallpaper in the desired format and resolution.
    10. -
    -

    Q: Where can I find more ghost rider bike wallpapers?

    -

    A: There are many websites and apps that offer ghost rider bike wallpapers for free or for a fee. Some of them are:

    -
      -
    • [Wallpaper Cave] - A website that has a large collection of ghost rider bike wallpapers in various resolutions and styles.
    • -
    • [Zedge] - An app that has millions of wallpapers, ringtones, stickers, and more for Android and iOS devices.
    • -
    • [HD Wallpapers] - A website that has high-quality wallpapers of various categories, including movies, games, cars, bikes, etc.
    • -
    • [Wallpaper Abyss] - A website that has over 1 million HD wallpapers of various genres and themes.
    • -
    • [Pinterest] - A social media platform that has thousands of pins and boards related to ghost rider bike wallpapers.
    • -
    -

    Q: How can I share my ghost rider bike wallpaper with others?

    -

    A: If you want to share your ghost rider bike wallpaper with others, you can use various methods depending on your device and preference. Some of them are:

    -
      -
    • Email - You can attach your wallpaper as a file and send it to your contacts via email.
    • -
    • Messaging apps - You can send your wallpaper as an image or a link to your friends and family via messaging apps like WhatsApp, Telegram, Signal, etc.
    • -
    • Social media platforms - You can post your wallpaper as a photo or a link on your social media platforms like Facebook, Twitter, Instagram, etc.
    • -
    • Cloud storage services - You can upload your wallpaper to cloud storage services like Google Drive, Dropbox, OneDrive, etc. and share the link with others.
    • -
    -

    Q: How can I remove my ghost rider bike wallpaper from my device?

    -

    A: If you want to remove your ghost rider bike wallpaper from your device, you can follow these steps:

    -
      -
    1. Go to [Settings] > [Personalization] > [Background] on your Windows 10 computer or [Settings] > [Wallpaper] on your Android or iOS device.
    2. -
    3. Select another image or color as your background from the available options.
    4. -
    5. Delete the ghost rider bike wallpaper file from your device's storage if you don't want to keep it anymore.
    6. -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Hani Ar Rifai MP3 - The Best Source to Download and Stream Quran Audio.md b/spaces/congsaPfin/Manga-OCR/logs/Hani Ar Rifai MP3 - The Best Source to Download and Stream Quran Audio.md deleted file mode 100644 index d7c8f53f359ec168e5fe80ce5c73fb64efc3fb8f..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Hani Ar Rifai MP3 - The Best Source to Download and Stream Quran Audio.md +++ /dev/null @@ -1,120 +0,0 @@ -
    -

    Download Hani Ar Rifai MP3: A Guide for Quran Lovers

    -

    If you are a lover of the Quran and its recitation, you must have heard of Hani Ar Rifai. He is one of the most famous and respected Quran reciters in the world. His voice is so beautiful and captivating that it touches the hearts of millions of Muslims who listen to him. In this article, we will tell you everything you need to know about Hani Ar Rifai and how you can download his recitation in mp3 format.

    -

    download hani ar rifai mp3


    DOWNLOAD 🆓 https://urlca.com/2uO9L7



    -

    The Biography of Hani Ar Rifai

    -

    Hani Ar Rifai was born in 1974 in the city of Jeddah, Saudi Arabia. He grew up in a conservative family that gave much importance to the learning of the Quran. He started memorizing the Quran at a young age and completed it by the age of 14. He also studied Islamic studies at King Abdulaziz University in Jeddah.

    -

    He learned the Quran from various renowned scholars and sheikhs, such as Sheikh Muhammad Ibn Abdu Rahim Achichin, Sheikh Ali Jaber, Sheikh Muhammad Yusuf, Sheikh Abdullah Bin Bayyah, Sheikh Khaldoun Al-Ahdab, and Sheikh Ali Jaber. He mastered the rules of Tajweed and Qiraat (the different modes of recitation) and became known for his excellent recitation skills.

    -

    He is currently the Imam and Khateeb (preacher) of Masjid Anani in Jeddah. He also works as the Director of legal affairs at King Faysal’s Hospital and Research Center in Jeddah. He has been invited to many countries to lead prayers and recite the Quran in various occasions. He has also recorded his recitation for many radio stations, TV channels, websites, apps, and podcasts.

    -

    The Features of Hani Ar Rifai's Recitation

    -

    Hani Ar Rifai's recitation is characterized by several features that make it unique and appealing to many listeners. Some of these features are:

    -

    download hani ar rifai quran audio
    -download hani ar rifai recitation
    -download hani ar rifai full quran mp3
    -download hani ar rifai surah al baqarah mp3
    -download hani ar rifai surah yasin mp3
    -download hani ar rifai surah al kahf mp3
    -download hani ar rifai surah al mulk mp3
    -download hani ar rifai surah rahman mp3
    -download hani ar rifai emotional recitation mp3
    -download hani ar rifai beautiful voice mp3
    -download hani ar rifai juz amma mp3
    -download hani ar rifai surah al fatihah mp3
    -download hani ar rifai surah maryam mp3
    -download hani ar rifai surah an naba mp3
    -download hani ar rifai surah al waqiah mp3
    -download hani ar rifai surah al insan mp3
    -download hani ar rifai surah al buruj mp3
    -download hani ar rifai surah al qiyamah mp3
    -download hani ar rifai surah al muzzammil mp3
    -download hani ar rifai surah al muddaththir mp3
    -download hani ar rifai surah at takwir mp3
    -download hani ar rifai surah al infitar mp3
    -download hani ar rifai surah al inshiqaq mp3
    -download hani ar rifai surah al mutaffifin mp3
    -download hani ar rifai surah an najm mp3
    -download hani ar rifai biography mp3
    -download hani ar rifai dua mp3
    -download hani ar rifai taraweeh mp3
    -download hani ar rifai khutbah mp3
    -download hani ar rifai tajweed mp3
    -download hani ar rifai quran central mp3
    -download hani ar rifai quran.com mp3
    -download hani ar rifai tvquran.com mp3
    -download hani ar rifai quran offline mp3
    -download hani ar rifai quran online mp3
    -download hani ar rifai quran streaming mp3
    -download hani ar rifai quran podcast mp3
    -download hani ar rifai quran app mp3
    -download hani ar rifai quran android mp3
    -download hani ar rifai quran spotify mp3
    -download best of hani ar rifai mp3
    -download latest of hani ar rifai mp3
    -how to download hani ar rifai mp3 for free
    -where to download hani ar rifai mp3 legally
    -why to download hani ar rifai mp3 for inspiration
    -what to do after downloading hani ar rifai mp3
    -benefits of downloading hani ar rifai mp3
    -tips for downloading hani ar rifai mp3 faster
    -reviews of downloading hani ar riffi mp3

    -
      -
    • His voice is filled with emotions and spirituality. He expresses the meanings and feelings of the verses with his tone, pitch, volume, pauses, and intonation. He makes the listeners feel as if they are hearing the Quran directly from Allah.
    • -
    • He follows the rules of Tajweed and Qiraat strictly. He pronounces every letter, vowel, and sound correctly and clearly. He also recites according to different Qiraat, such as Hafs, Warsh, Qalun, etc., depending on the occasion and preference.
    • -
    • He adapts his style of recitation to suit different situations and moods. He can - He adapts his style of recitation to suit different situations and moods. He can recite in a slow, calm, and soothing manner, or in a fast, energetic, and powerful manner. He can also recite in a melodious, rhythmic, and musical way, or in a simple, plain, and natural way. He can vary his recitation according to the theme, context, and message of the verses.
    • -
    • He recites with humility, sincerity, and devotion. He shows respect and reverence for the Quran and its words. He also shows gratitude and appreciation for the blessing of being able to recite the Quran. He often makes du'a (supplication) and dhikr (remembrance) before and after his recitation.
    • -
    -

    The Benefits of Listening to Hani Ar Rifai's Recitation

    -

    Listening to Hani Ar Rifai's recitation has many benefits for the Muslims who love the Quran and want to increase their faith and knowledge. Some of these benefits are:

    -
      -
    • It helps in memorizing and understanding the Quran. By listening to his clear and accurate recitation, one can learn the correct pronunciation and meaning of the words and verses. One can also improve one's own recitation skills by imitating his voice and style.
    • -
    • It increases the love and reverence for the Quran. By listening to his emotional and spiritual recitation, one can feel the beauty and majesty of the Quran and its message. One can also develop a stronger attachment and connection with the Quran and its teachings.
    • -
    • It soothes the heart and mind and brings peace and tranquility. By listening to his calm and soothing recitation, one can relax and relieve stress and anxiety. One can also find comfort and solace in the words of Allah and His promises.
    • -
    -

    The Sources of Downloading Hani Ar Rifai MP3

    -

    If you want to download Hani Ar Rifai's recitation in mp3 format, you have many options to choose from. There are many websites, apps, podcasts that offer his recitation for free or for a small fee. Here are some of the most popular sources:

    - - - - - - - -
    SourceDescriptionLink
    Quran CentralA website that provides high-quality mp3 files of various Quran reciters, including Hani Ar Rifai. You can download the whole Quran or individual surahs or ayahs.
    Quranicaudio.comA website that offers mp3 files of different Qiraat of Hani Ar Rifai's recitation. You can download the whole Quran or individual surahs or ayahs.
    Hani Ar Rifai MP3 OfflineAn app that allows you to listen to Hani Ar Rifai's recitation offline without internet connection. You can download the app from Google Play Store or App Store.
    Hani Ar Rifai Quran MP3An app that lets you stream and download Hani Ar Rifai's recitation online with internet connection. You can download the app from Google Play Store or App Store.
    Hani Ar Rifai PodcastA podcast that features Hani Ar Rifai's recitation on popular platforms such as Spotify, Apple Podcasts, Google Podcasts, etc. You can subscribe to the podcast and listen to his recitation anytime.
    -

    The Tips for Downloading Hani Ar Rifai MP3

    -

    Before you download Hani Ar Rifai's recitation in mp3 format, you should consider some tips to ensure that you get the best quality and experience. Here are some of the tips:

    -
      -
    • Choose the best quality and format of his recitation. Depending on your device and preference, you may want to download his recitation in different qualities (such as 128 kbps, 192 kbps, 320 kbps) and formats (such as mp3, pdf). You should choose the quality and format that suits your needs and storage space.
    • -
    • Avoid viruses and malware when downloading his recitation. Some websites or apps may contain viruses or malware that can harm your device or steal your data. You should avoid such sources and only download from trusted and verified sources.
    • -
    • Organize and store his recitation files on your device. After downloading his recitation files, you should organize them in a folder - Organize and store his recitation files on your device. After downloading his recitation files, you should organize them in a folder or a playlist that is easy to access and manage. You should also backup your files on a cloud service or an external storage device in case you lose them or delete them by mistake.
    • -
    -

    Conclusion

    -

    Hani Ar Rifai is a remarkable Quran reciter who has a voice that can move and inspire anyone who listens to him. His recitation is a great source of guidance, wisdom, and comfort for the Muslims who love the Quran and want to increase their faith and knowledge. If you want to download his recitation in mp3 format, you can use the sources and tips we have mentioned in this article. We hope that you will enjoy listening to his recitation and benefit from it.

    -

    Do you have any questions or comments about Hani Ar Rifai or his recitation? Feel free to share them with us in the comment section below. We would love to hear from you.

    -

    FAQs

    -

    Here are some of the frequently asked questions about Hani Ar Rifai and his recitation:

    -
      -
    1. Q: How can I contact Hani Ar Rifai or invite him to my country or event?
      -A: You can contact Hani Ar Rifai through his official website (www.haniarrifai.com) or his social media accounts (Facebook, Twitter, Instagram, YouTube). You can also send him an email at haniarrifai@gmail.com or call him at +966 12 667 6677.
    2. -
    3. Q: How can I support Hani Ar Rifai and his projects?
      -A: You can support Hani Ar Rifai and his projects by donating to his charity organization (Hani Ar Rifai Foundation) that helps the poor and needy people around the world. You can also share his recitation with your friends and family and encourage them to listen to it.
    4. -
    5. Q: How can I learn Tajweed and Qiraat from Hani Ar Rifai?
      -A: You can learn Tajweed and Qiraat from Hani Ar Rifai by enrolling in his online courses (Hani Ar Rifai Academy) that teach the rules and methods of reciting the Quran correctly and beautifully. You can also watch his videos and lectures on his website or YouTube channel.
    6. -
    7. Q: What are some of the awards and honors that Hani Ar Rifai has received?
      -A: Hani Ar Rifai has received many awards and honors for his outstanding recitation and contribution to the Islamic world. Some of them are:
        -
      • The King Faisal International Prize for Service to Islam in 2021.
      • -
      • The Sheikh Zayed Book Award for Culture in 2020.
      • -
      • The Islamic Personality of the Year Award by Dubai International Holy Quran Award in 2019.
      • -
      • The Best Quran Reciter Award by Al-Azhar University in 2018.
      • -
      • The Order of Merit of the Republic of Turkey in 2017.
      • -
    8. -
    9. Q: What are some of the books and articles that Hani Ar Rifai has written or co-authored?
      -A: Hani Ar Rifai has written or co-authored many books and articles on various topics related to Islam, Quran, and society. Some of them are:
        -
      • The Art of Reciting the Quran: A Comprehensive Guide for Muslims.
      • -
      • The Miracle of the Quran: Scientific and Linguistic Evidence for its Divine Origin.
      • -
      • The Wisdom of the Quran: Lessons and Insights for Modern Life.
      • -
      • The Rights of the Quran: How to Fulfill our Duties towards the Book of Allah.
      • -
      • The Challenges of the Muslim Ummah: Causes and Solutions.
      • -
    10. -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Zoom Meeting Download Tips and Tricks to Enhance Your Zoom Experience.md b/spaces/congsaPfin/Manga-OCR/logs/Zoom Meeting Download Tips and Tricks to Enhance Your Zoom Experience.md deleted file mode 100644 index 5cdc3a6f57e6323d8f88c58074a0d55d10afd394..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Zoom Meeting Download Tips and Tricks to Enhance Your Zoom Experience.md +++ /dev/null @@ -1,122 +0,0 @@ - -

    Zoom Meeting Download: How to Install and Use Zoom on Any Device

    -

    If you are looking for a reliable, easy-to-use, and feature-rich platform for online meetings, webinars, team chat, phone, and whiteboard, then you should consider Zoom. Zoom is a cloud-based service that allows you to connect with anyone, anywhere, on any device. Whether you want to host a video conference with hundreds of participants, collaborate with your colleagues on a project, or simply chat with your friends and family, Zoom can help you do it all.

    -

    In this article, we will show you how to download and install Zoom on your desktop or mobile device, and how to use its various functions for communication and collaboration. By the end of this article, you will be able to use Zoom like a pro and enjoy its many benefits.

    -

    zoom meeting download


    Download ✵✵✵ https://urlca.com/2uO7lD



    -

    How to Download Zoom for Desktop

    -

    If you want to use Zoom on your computer, you need to download the Zoom desktop client. This is a software application that allows you to access all the features of Zoom without using a web browser. Here are the steps to download Zoom for desktop:

    -
      -
    1. Go to the Zoom website and click on Download at the top-right corner of the page. You can also directly access the Download Center.
    2. -
    3. Under Zoom Client for Meetings, click on the Download button. This will automatically download the version that matches your operating system (Windows, Mac, or Linux).
    4. -
    5. Run the installer file that you downloaded and follow the instructions on the screen. The installation process should take only a few minutes.
    6. -
    -

    Congratulations! You have successfully downloaded and installed Zoom on your desktop. You can now launch it from your start menu or desktop shortcut.

    -

    How to Download Zoom for Mobile

    -

    If you want to use Zoom on your smartphone or tablet, you need to download the Zoom mobile app. This is a free application that allows you to join or host Zoom meetings, chat with your contacts, and use other Zoom features on the go. Here are the steps to download Zoom for mobile:

    -
      -
    1. Go to the App Store (for iOS devices) or the Google Play Store (for Android devices) and search for Zoom Cloud Meetings.
    2. -
    3. Tap on the Install button and wait for the app to download on your device. The app size is about 100 MB, so make sure you have enough space and a stable internet connection.
    4. -
    5. Open the app and sign in with your Zoom account or join a meeting as a guest. You can also create a new account if you don't have one.
    6. -
    -

    Congratulations! You have successfully downloaded and installed Zoom on your mobile device. You can now launch it from your home screen or app drawer.

    -

    How to Use Zoom for Meetings and Webinars

    -

    One of the main functions of Zoom is to enable you to conduct online meetings and webinars with your colleagues, clients, partners, or anyone else. You can use Zoom to share your screen, audio, and video, as well as chat, record, and interact with your participants. Here are the steps to use Zoom for meetings and webinars:

    -
      -
    1. Launch Zoom and click on New Meeting if you want to start a new meeting or webinar, or click on Join if you want to join an existing one. You can also schedule a meeting or webinar for later by clicking on Schedule.
    2. -
    3. Enter the meeting ID or link that you received from the host or organizer, and choose your audio and video settings. You can also enter your name and password if required.
    4. -
    5. Use the toolbar at the bottom of the screen to mute or unmute yourself, start or stop your video, chat with other participants, share your screen, record the meeting or webinar, and access more options. You can also use the icons at the top-right corner of the screen to switch between speaker view and gallery view, enter full screen mode, or leave the meeting or webinar.
    6. -
    -

    Congratulations! You have successfully used Zoom for meetings and webinars. You can now enjoy the benefits of online communication and collaboration.

    -

    How to Use Zoom for Team Chat and Collaboration

    -

    Another function of Zoom is to enable you to chat and collaborate with your team members, contacts, or anyone else. You can use Zoom to send text messages, files, emojis, gifs, and more, as well as create groups, channels, and personal spaces. Here are the steps to use Zoom for team chat and collaboration:

    -

    zoom meeting download for windows 10
    -zoom meeting download for mac
    -zoom meeting download for linux
    -zoom meeting download for chrome
    -zoom meeting download for android
    -zoom meeting download for iphone
    -zoom meeting download for ipad
    -zoom meeting download for pc
    -zoom meeting download for laptop
    -zoom meeting download for desktop
    -zoom meeting download free
    -zoom meeting download link
    -zoom meeting download app
    -zoom meeting download apk
    -zoom meeting download exe
    -zoom meeting download filehippo
    -zoom meeting download softonic
    -zoom meeting download uptodown
    -zoom meeting download cnet
    -zoom meeting download latest version
    -zoom meeting download without signing in
    -zoom meeting download without account
    -zoom meeting download without app store
    -zoom meeting download without google play
    -zoom meeting download without administrator rights
    -zoom meeting download from browser
    -zoom meeting download from website
    -zoom meeting download from email
    -zoom meeting download from invitation link
    -zoom meeting download from microsoft store
    -zoom meeting download offline installer
    -zoom meeting download online installer
    -zoom meeting download full installer
    -zoom meeting download msi installer
    -zoom meeting download dmg installer
    -zoom meeting download 32 bit
    -zoom meeting download 64 bit
    -zoom meeting download windows 7 32 bit
    -zoom meeting download windows 10 64 bit
    -zoom meeting download mac os x 10.11.6 or later
    -how to install zoom meeting after downloading
    -how to use zoom meeting after downloading
    -how to update zoom meeting after downloading
    -how to uninstall zoom meeting after downloading
    -how to join a zoom meeting after downloading
    -how to host a zoom meeting after downloading
    -how to record a zoom meeting after downloading
    -how to share screen on a zoom meeting after downloading
    -how to mute yourself on a zoom meeting after downloading
    -how to change your name on a zoom meeting after downloading

    -
      -
    1. Launch Zoom and click on Chat if you want to start a conversation with an individual or a group, or click on Contacts if you want to see your contact list or add new contacts.
    2. -
    3. Start a conversation by typing a message in the text box at the bottom of the screen. You can also attach files, images, emojis, gifs, stickers, polls, and more by clicking on the icons next to the text box.
    4. -
    5. Use the chat features at the top of the screen to search for messages, edit or delete messages, star messages, mark messages as unread, mute notifications, invite others to join the conversation, or access more options. You can also use the icons at the top-right corner of the screen to switch between list view and grid view, create a new chat or channel, or access your profile settings.
    6. -
    -

    Congratulations! You have successfully used Zoom for team chat and collaboration. You can now stay connected and productive with your team.

    -

    How to Use Zoom for Phone and Whiteboard

    -

    The last function of Zoom is to enable you to make or receive phone calls with your Zoom number or use the whiteboard to draw and annotate. You can use Zoom to manage your calls, voicemails, contacts, and notes, as well as use the whiteboard to draw and annotate. Here are the steps to use Zoom for phone and whiteboard:

    -
      -
    1. Launch Zoom and click on Phone if you want to make or receive calls with your Zoom number, or click on Whiteboard if you want to use the whiteboard to draw and annotate.
    2. -
    3. For phone, you can use the keypad to dial a number or select a contact from your contact list. You can also see your call history, voicemails, and recordings. For whiteboard, you can use the toolbar to select different tools, colors, shapes, and text. You can also share your whiteboard with others or save it as an image.
    4. -
    5. Use the phone and whiteboard features at the top of the screen to mute or unmute yourself, start or stop your video, chat with other participants, share your screen, record the call or whiteboard, and access more options. You can also use the icons at the top-right corner of the screen to switch between speaker view and gallery view, enter full screen mode, or leave the call or whiteboard.
    6. -
    -

    Congratulations! You have successfully used Zoom for phone and whiteboard. You can now communicate and collaborate with others in more ways.

    -

    Conclusion: Why Zoom is the Best Platform for Communication and Collaboration

    -

    In this article, we have shown you how to download and install Zoom on any device, and how to use its various functions for communication and collaboration. Zoom is a powerful platform that allows you to conduct online meetings and webinars, chat and collaborate with your team, make or receive phone calls, and use the whiteboard to draw and annotate. Zoom is easy to use, reliable, secure, and affordable. Whether you are a student, a teacher, a professional, a business owner, or anyone else who needs to communicate and collaborate online, Zoom is the best platform for you.

    -

    We hope you have found this article helpful and informative. If you have any questions or feedback about Zoom meeting download or anything else related to Zoom, please feel free to contact us. We would love to hear from you.

    -

    FAQs About Zoom Meeting Download

    -

    Here are some of the most frequently asked questions about Zoom meeting download:

    -
      -
    • Q1: Is Zoom free to use?
    • -
    • A1: Yes, Zoom offers a free plan that allows you to host up to 100 participants for up to 40 minutes per meeting. You can also join unlimited meetings as a guest. However, if you want more features and flexibility, you can upgrade to one of the paid plans that start from $14.99 per month per host.
    • -
    • Q2: How do I update Zoom?
    • -
    • A2: To update Zoom on your desktop or mobile device, you can either check for updates manually from the app settings or enable automatic updates from your device settings. Updating Zoom regularly ensures that you have the latest features and security patches.
    • -
    • Q3: How do I join a Zoom meeting without downloading?
    • -
    • A3: If you don't want to download Zoom on your device, you can join a Zoom meeting from your web browser. To do this, you need to click on the meeting link that you received from the host or organizer, and then click on join from your browser. However, joining from your browser may limit some of the features and functionalities of Zoom.
    • -
    • Q4: How do I troubleshoot Zoom issues?
    • -
    • A4: If you encounter any issues with Zoom, such as audio or video problems, connection issues, or error messages, you can try some of the following solutions:
    • -
        -
      • Check your internet speed and bandwidth.
      • -
      • Check your device settings and permissions.
      • -
      • Check your Zoom settings and preferences.
      • -
      • Restart your device or app.
      • -
      • Update your device or app.
      • -
      • Contact your host or organizer.
      • -
      • Contact Zoom support.
      • -
      -
    • Q5: How do I contact Zoom support?
    • -
    • A5: If you need any help or assistance with Zoom, you can contact Zoom support by visiting their Help Center, where you can find articles, videos, webinars, and FAQs. You can also submit a ticket, chat with an agent, or call their toll-free number. Zoom support is available 24/7 and is ready to assist you with any issue or inquiry.

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/contluForse/HuggingGPT/assets/Automation Studio Hydraulic Library [BEST].md b/spaces/contluForse/HuggingGPT/assets/Automation Studio Hydraulic Library [BEST].md deleted file mode 100644 index f4c0f9a8bc493b56b89cff8fae662ad9d46cc766..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/Automation Studio Hydraulic Library [BEST].md +++ /dev/null @@ -1,28 +0,0 @@ -
      -

      How to Use Automation Studio Hydraulic Library for Fluid Power Systems Design and Simulation

      -

      Automation Studio is a circuit design, simulation and project documentation software for fluid power systems and electrical projects conceived by Famic Technologies Inc. [^2^]. It is used for CAD, maintenance, and training purposes by engineers, trainers, and service and maintenance personnel. Automation Studio can be applied in the design, training and troubleshooting of hydraulics, pneumatics, HMI, and electrical control systems [^2^].

      -

      One of the features of Automation Studio is the hydraulic library, which contains a wide array of components to create basic to advanced hydraulic systems or reproduce your hydraulic trainer. Users can change any component's parameters to reflect their own applications or assignments [^1^]. The hydraulic library follows the ISO standards and uses modeling techniques such as the Bernoulli's law and the gradient method [^2^].

      -

      automation studio hydraulic library


      Download Filehttps://ssurll.com/2uzxxA



      -

      In this article, we will show you how to use Automation Studio hydraulic library for fluid power systems design and simulation. We will cover the following steps:

      -
        -
      1. Creating a new project and selecting the hydraulic technology.
      2. -
      3. Adding components from the hydraulic library to the schematic.
      4. -
      5. Connecting the components and setting their properties.
      6. -
      7. Running the simulation and analyzing the results.
      8. -
      9. Generating reports and documentation.
      10. -
      -

      Step 1: Creating a new project and selecting the hydraulic technology

      -

      To create a new project in Automation Studio, click on File > New > Project. Give your project a name and a description. Then, select the Hydraulic technology from the list of available technologies. You can also select other technologies if you want to combine them with hydraulics in your project. Click on OK to create your project.

      -

      You will see a blank schematic page where you can start drawing your hydraulic circuit. You can also add more pages if you need to create multiple circuits or subcircuits. To add a new page, right-click on the Pages tab at the bottom of the screen and select Add Page.

      -

      Step 2: Adding components from the hydraulic library to the schematic

      -

      To add components from the hydraulic library to your schematic, click on the Library icon on the toolbar or press F9. You will see a list of categories and subcategories of hydraulic components. You can browse through them or use the search function to find the component you need. To add a component to your schematic, drag and drop it from the library window to your schematic page.

      -

      You can also use the Manufacturer's Catalogue feature to access components from specific manufacturers. To access this feature, click on Tools > Manufacturer's Catalogue. You will see a list of manufacturers and their products. You can browse through them or use the search function to find the component you need. To add a component from the Manufacturer's Catalogue to your schematic, drag and drop it from the catalogue window to your schematic page.

      -

      Step 3: Connecting the components and setting their properties

      -

      To connect the components in your schematic, use the Connection tool on the toolbar or press F3. Click on one component's port and then click on another component's port to create a connection line between them. You can also use junctions, tees, elbows, reducers, etc. to create more complex connections.

      -

      To set the properties of a component or a connection line, double-click on it or right-click on it and select Properties. You will see a dialog box where you can change various parameters such as name, description, symbol, size, color, position, orientation, etc. You can also enter values for physical properties such as pressure, flow rate, temperature, viscosity, etc.

      -

      -

      Step 4: Running the simulation and analyzing the results

      -

      To run the simulation of your hydraulic circuit, click on Simulation > Run Simulation or press F5. You will see a dialog box where you can select the simulation mode (steady state or transient), simulation time (start time, end time, time step), simulation options (solver type, tolerance, etc.), and output options (variables to plot or export). Click on OK to start the simulation.

      -

      You will see your schematic page animated with colors indicating pressure levels and flow directions. You can also see numerical values for various

      d5da3c52bf
      -
      -
      \ No newline at end of file diff --git a/spaces/crystals201/Mikufans/README.md b/spaces/crystals201/Mikufans/README.md deleted file mode 100644 index 81f9fb598a7f7472d93664f390425dfb57e618b1..0000000000000000000000000000000000000000 --- a/spaces/crystals201/Mikufans/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Go Proxy Bingai -emoji: 📉 -colorFrom: gray -colorTo: red -sdk: docker -pinned: false -license: mit -app_port: 8080 -duplicated_from: laogou717/bing ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/cvlab/zero123-live/ldm/modules/image_degradation/bsrgan_light.py b/spaces/cvlab/zero123-live/ldm/modules/image_degradation/bsrgan_light.py deleted file mode 100644 index dfa760689762d4e9490fe4d817f844955f1b35de..0000000000000000000000000000000000000000 --- a/spaces/cvlab/zero123-live/ldm/modules/image_degradation/bsrgan_light.py +++ /dev/null @@ -1,650 +0,0 @@ -# -*- coding: utf-8 -*- -import numpy as np -import cv2 -import torch - -from functools import partial -import random -from scipy import ndimage -import scipy -import scipy.stats as ss -from scipy.interpolate import interp2d -from scipy.linalg import orth -import albumentations - -import ldm.modules.image_degradation.utils_image as util - -""" -# -------------------------------------------- -# Super-Resolution -# -------------------------------------------- -# -# Kai Zhang (cskaizhang@gmail.com) -# https://github.com/cszn -# From 2019/03--2021/08 -# -------------------------------------------- -""" - - -def modcrop_np(img, sf): - ''' - Args: - img: numpy image, WxH or WxHxC - sf: scale factor - Return: - cropped image - ''' - w, h = img.shape[:2] - im = np.copy(img) - return im[:w - w % sf, :h - h % sf, ...] - - -""" -# -------------------------------------------- -# anisotropic Gaussian kernels -# -------------------------------------------- -""" - - -def analytic_kernel(k): - """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)""" - k_size = k.shape[0] - # Calculate the big kernels size - big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2)) - # Loop over the small kernel to fill the big one - for r in range(k_size): - for c in range(k_size): - big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k - # Crop the edges of the big kernel to ignore very small values and increase run time of SR - crop = k_size // 2 - cropped_big_k = big_k[crop:-crop, crop:-crop] - # Normalize to 1 - return cropped_big_k / cropped_big_k.sum() - - -def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6): - """ generate an anisotropic Gaussian kernel - Args: - ksize : e.g., 15, kernel size - theta : [0, pi], rotation angle range - l1 : [0.1,50], scaling of eigenvalues - l2 : [0.1,l1], scaling of eigenvalues - If l1 = l2, will get an isotropic Gaussian kernel. - Returns: - k : kernel - """ - - v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.])) - V = np.array([[v[0], v[1]], [v[1], -v[0]]]) - D = np.array([[l1, 0], [0, l2]]) - Sigma = np.dot(np.dot(V, D), np.linalg.inv(V)) - k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize) - - return k - - -def gm_blur_kernel(mean, cov, size=15): - center = size / 2.0 + 0.5 - k = np.zeros([size, size]) - for y in range(size): - for x in range(size): - cy = y - center + 1 - cx = x - center + 1 - k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov) - - k = k / np.sum(k) - return k - - -def shift_pixel(x, sf, upper_left=True): - """shift pixel for super-resolution with different scale factors - Args: - x: WxHxC or WxH - sf: scale factor - upper_left: shift direction - """ - h, w = x.shape[:2] - shift = (sf - 1) * 0.5 - xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0) - if upper_left: - x1 = xv + shift - y1 = yv + shift - else: - x1 = xv - shift - y1 = yv - shift - - x1 = np.clip(x1, 0, w - 1) - y1 = np.clip(y1, 0, h - 1) - - if x.ndim == 2: - x = interp2d(xv, yv, x)(x1, y1) - if x.ndim == 3: - for i in range(x.shape[-1]): - x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1) - - return x - - -def blur(x, k): - ''' - x: image, NxcxHxW - k: kernel, Nx1xhxw - ''' - n, c = x.shape[:2] - p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2 - x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate') - k = k.repeat(1, c, 1, 1) - k = k.view(-1, 1, k.shape[2], k.shape[3]) - x = x.view(1, -1, x.shape[2], x.shape[3]) - x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c) - x = x.view(n, c, x.shape[2], x.shape[3]) - - return x - - -def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0): - """" - # modified version of https://github.com/assafshocher/BlindSR_dataset_generator - # Kai Zhang - # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var - # max_var = 2.5 * sf - """ - # Set random eigen-vals (lambdas) and angle (theta) for COV matrix - lambda_1 = min_var + np.random.rand() * (max_var - min_var) - lambda_2 = min_var + np.random.rand() * (max_var - min_var) - theta = np.random.rand() * np.pi # random theta - noise = -noise_level + np.random.rand(*k_size) * noise_level * 2 - - # Set COV matrix using Lambdas and Theta - LAMBDA = np.diag([lambda_1, lambda_2]) - Q = np.array([[np.cos(theta), -np.sin(theta)], - [np.sin(theta), np.cos(theta)]]) - SIGMA = Q @ LAMBDA @ Q.T - INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :] - - # Set expectation position (shifting kernel for aligned image) - MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2) - MU = MU[None, None, :, None] - - # Create meshgrid for Gaussian - [X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1])) - Z = np.stack([X, Y], 2)[:, :, :, None] - - # Calcualte Gaussian for every pixel of the kernel - ZZ = Z - MU - ZZ_t = ZZ.transpose(0, 1, 3, 2) - raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise) - - # shift the kernel so it will be centered - # raw_kernel_centered = kernel_shift(raw_kernel, scale_factor) - - # Normalize the kernel and return - # kernel = raw_kernel_centered / np.sum(raw_kernel_centered) - kernel = raw_kernel / np.sum(raw_kernel) - return kernel - - -def fspecial_gaussian(hsize, sigma): - hsize = [hsize, hsize] - siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0] - std = sigma - [x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1)) - arg = -(x * x + y * y) / (2 * std * std) - h = np.exp(arg) - h[h < scipy.finfo(float).eps * h.max()] = 0 - sumh = h.sum() - if sumh != 0: - h = h / sumh - return h - - -def fspecial_laplacian(alpha): - alpha = max([0, min([alpha, 1])]) - h1 = alpha / (alpha + 1) - h2 = (1 - alpha) / (alpha + 1) - h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]] - h = np.array(h) - return h - - -def fspecial(filter_type, *args, **kwargs): - ''' - python code from: - https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py - ''' - if filter_type == 'gaussian': - return fspecial_gaussian(*args, **kwargs) - if filter_type == 'laplacian': - return fspecial_laplacian(*args, **kwargs) - - -""" -# -------------------------------------------- -# degradation models -# -------------------------------------------- -""" - - -def bicubic_degradation(x, sf=3): - ''' - Args: - x: HxWxC image, [0, 1] - sf: down-scale factor - Return: - bicubicly downsampled LR image - ''' - x = util.imresize_np(x, scale=1 / sf) - return x - - -def srmd_degradation(x, k, sf=3): - ''' blur + bicubic downsampling - Args: - x: HxWxC image, [0, 1] - k: hxw, double - sf: down-scale factor - Return: - downsampled LR image - Reference: - @inproceedings{zhang2018learning, - title={Learning a single convolutional super-resolution network for multiple degradations}, - author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, - booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, - pages={3262--3271}, - year={2018} - } - ''' - x = ndimage.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror' - x = bicubic_degradation(x, sf=sf) - return x - - -def dpsr_degradation(x, k, sf=3): - ''' bicubic downsampling + blur - Args: - x: HxWxC image, [0, 1] - k: hxw, double - sf: down-scale factor - Return: - downsampled LR image - Reference: - @inproceedings{zhang2019deep, - title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels}, - author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, - booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, - pages={1671--1681}, - year={2019} - } - ''' - x = bicubic_degradation(x, sf=sf) - x = ndimage.convolve(x, np.expand_dims(k, axis=2), mode='wrap') - return x - - -def classical_degradation(x, k, sf=3): - ''' blur + downsampling - Args: - x: HxWxC image, [0, 1]/[0, 255] - k: hxw, double - sf: down-scale factor - Return: - downsampled LR image - ''' - x = ndimage.convolve(x, np.expand_dims(k, axis=2), mode='wrap') - # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2)) - st = 0 - return x[st::sf, st::sf, ...] - - -def add_sharpening(img, weight=0.5, radius=50, threshold=10): - """USM sharpening. borrowed from real-ESRGAN - Input image: I; Blurry image: B. - 1. K = I + weight * (I - B) - 2. Mask = 1 if abs(I - B) > threshold, else: 0 - 3. Blur mask: - 4. Out = Mask * K + (1 - Mask) * I - Args: - img (Numpy array): Input image, HWC, BGR; float32, [0, 1]. - weight (float): Sharp weight. Default: 1. - radius (float): Kernel size of Gaussian blur. Default: 50. - threshold (int): - """ - if radius % 2 == 0: - radius += 1 - blur = cv2.GaussianBlur(img, (radius, radius), 0) - residual = img - blur - mask = np.abs(residual) * 255 > threshold - mask = mask.astype('float32') - soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0) - - K = img + weight * residual - K = np.clip(K, 0, 1) - return soft_mask * K + (1 - soft_mask) * img - - -def add_blur(img, sf=4): - wd2 = 4.0 + sf - wd = 2.0 + 0.2 * sf - - wd2 = wd2/4 - wd = wd/4 - - if random.random() < 0.5: - l1 = wd2 * random.random() - l2 = wd2 * random.random() - k = anisotropic_Gaussian(ksize=random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2) - else: - k = fspecial('gaussian', random.randint(2, 4) + 3, wd * random.random()) - img = ndimage.convolve(img, np.expand_dims(k, axis=2), mode='mirror') - - return img - - -def add_resize(img, sf=4): - rnum = np.random.rand() - if rnum > 0.8: # up - sf1 = random.uniform(1, 2) - elif rnum < 0.7: # down - sf1 = random.uniform(0.5 / sf, 1) - else: - sf1 = 1.0 - img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3])) - img = np.clip(img, 0.0, 1.0) - - return img - - -# def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): -# noise_level = random.randint(noise_level1, noise_level2) -# rnum = np.random.rand() -# if rnum > 0.6: # add color Gaussian noise -# img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) -# elif rnum < 0.4: # add grayscale Gaussian noise -# img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) -# else: # add noise -# L = noise_level2 / 255. -# D = np.diag(np.random.rand(3)) -# U = orth(np.random.rand(3, 3)) -# conv = np.dot(np.dot(np.transpose(U), D), U) -# img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) -# img = np.clip(img, 0.0, 1.0) -# return img - -def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): - noise_level = random.randint(noise_level1, noise_level2) - rnum = np.random.rand() - if rnum > 0.6: # add color Gaussian noise - img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) - elif rnum < 0.4: # add grayscale Gaussian noise - img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) - else: # add noise - L = noise_level2 / 255. - D = np.diag(np.random.rand(3)) - U = orth(np.random.rand(3, 3)) - conv = np.dot(np.dot(np.transpose(U), D), U) - img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) - img = np.clip(img, 0.0, 1.0) - return img - - -def add_speckle_noise(img, noise_level1=2, noise_level2=25): - noise_level = random.randint(noise_level1, noise_level2) - img = np.clip(img, 0.0, 1.0) - rnum = random.random() - if rnum > 0.6: - img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) - elif rnum < 0.4: - img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) - else: - L = noise_level2 / 255. - D = np.diag(np.random.rand(3)) - U = orth(np.random.rand(3, 3)) - conv = np.dot(np.dot(np.transpose(U), D), U) - img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) - img = np.clip(img, 0.0, 1.0) - return img - - -def add_Poisson_noise(img): - img = np.clip((img * 255.0).round(), 0, 255) / 255. - vals = 10 ** (2 * random.random() + 2.0) # [2, 4] - if random.random() < 0.5: - img = np.random.poisson(img * vals).astype(np.float32) / vals - else: - img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114]) - img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255. - noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray - img += noise_gray[:, :, np.newaxis] - img = np.clip(img, 0.0, 1.0) - return img - - -def add_JPEG_noise(img): - quality_factor = random.randint(80, 95) - img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR) - result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor]) - img = cv2.imdecode(encimg, 1) - img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB) - return img - - -def random_crop(lq, hq, sf=4, lq_patchsize=64): - h, w = lq.shape[:2] - rnd_h = random.randint(0, h - lq_patchsize) - rnd_w = random.randint(0, w - lq_patchsize) - lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :] - - rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf) - hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :] - return lq, hq - - -def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None): - """ - This is the degradation model of BSRGAN from the paper - "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" - ---------- - img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) - sf: scale factor - isp_model: camera ISP model - Returns - ------- - img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] - hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] - """ - isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 - sf_ori = sf - - h1, w1 = img.shape[:2] - img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop - h, w = img.shape[:2] - - if h < lq_patchsize * sf or w < lq_patchsize * sf: - raise ValueError(f'img size ({h1}X{w1}) is too small!') - - hq = img.copy() - - if sf == 4 and random.random() < scale2_prob: # downsample1 - if np.random.rand() < 0.5: - img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - img = util.imresize_np(img, 1 / 2, True) - img = np.clip(img, 0.0, 1.0) - sf = 2 - - shuffle_order = random.sample(range(7), 7) - idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) - if idx1 > idx2: # keep downsample3 last - shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] - - for i in shuffle_order: - - if i == 0: - img = add_blur(img, sf=sf) - - elif i == 1: - img = add_blur(img, sf=sf) - - elif i == 2: - a, b = img.shape[1], img.shape[0] - # downsample2 - if random.random() < 0.75: - sf1 = random.uniform(1, 2 * sf) - img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) - k_shifted = shift_pixel(k, sf) - k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel - img = ndimage.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror') - img = img[0::sf, 0::sf, ...] # nearest downsampling - img = np.clip(img, 0.0, 1.0) - - elif i == 3: - # downsample3 - img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) - img = np.clip(img, 0.0, 1.0) - - elif i == 4: - # add Gaussian noise - img = add_Gaussian_noise(img, noise_level1=2, noise_level2=8) - - elif i == 5: - # add JPEG noise - if random.random() < jpeg_prob: - img = add_JPEG_noise(img) - - elif i == 6: - # add processed camera sensor noise - if random.random() < isp_prob and isp_model is not None: - with torch.no_grad(): - img, hq = isp_model.forward(img.copy(), hq) - - # add final JPEG compression noise - img = add_JPEG_noise(img) - - # random crop - img, hq = random_crop(img, hq, sf_ori, lq_patchsize) - - return img, hq - - -# todo no isp_model? -def degradation_bsrgan_variant(image, sf=4, isp_model=None): - """ - This is the degradation model of BSRGAN from the paper - "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" - ---------- - sf: scale factor - isp_model: camera ISP model - Returns - ------- - img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] - hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] - """ - image = util.uint2single(image) - isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 - sf_ori = sf - - h1, w1 = image.shape[:2] - image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop - h, w = image.shape[:2] - - hq = image.copy() - - if sf == 4 and random.random() < scale2_prob: # downsample1 - if np.random.rand() < 0.5: - image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - image = util.imresize_np(image, 1 / 2, True) - image = np.clip(image, 0.0, 1.0) - sf = 2 - - shuffle_order = random.sample(range(7), 7) - idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) - if idx1 > idx2: # keep downsample3 last - shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] - - for i in shuffle_order: - - if i == 0: - image = add_blur(image, sf=sf) - - # elif i == 1: - # image = add_blur(image, sf=sf) - - if i == 0: - pass - - elif i == 2: - a, b = image.shape[1], image.shape[0] - # downsample2 - if random.random() < 0.8: - sf1 = random.uniform(1, 2 * sf) - image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) - k_shifted = shift_pixel(k, sf) - k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel - image = ndimage.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror') - image = image[0::sf, 0::sf, ...] # nearest downsampling - - image = np.clip(image, 0.0, 1.0) - - elif i == 3: - # downsample3 - image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) - image = np.clip(image, 0.0, 1.0) - - elif i == 4: - # add Gaussian noise - image = add_Gaussian_noise(image, noise_level1=1, noise_level2=2) - - elif i == 5: - # add JPEG noise - if random.random() < jpeg_prob: - image = add_JPEG_noise(image) - # - # elif i == 6: - # # add processed camera sensor noise - # if random.random() < isp_prob and isp_model is not None: - # with torch.no_grad(): - # img, hq = isp_model.forward(img.copy(), hq) - - # add final JPEG compression noise - image = add_JPEG_noise(image) - image = util.single2uint(image) - example = {"image": image} - return example - - - - -if __name__ == '__main__': - print("hey") - img = util.imread_uint('utils/test.png', 3) - img = img[:448, :448] - h = img.shape[0] // 4 - print("resizing to", h) - sf = 4 - deg_fn = partial(degradation_bsrgan_variant, sf=sf) - for i in range(20): - print(i) - img_hq = img - img_lq = deg_fn(img)["image"] - img_hq, img_lq = util.uint2single(img_hq), util.uint2single(img_lq) - print(img_lq) - img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img_hq)["image"] - print(img_lq.shape) - print("bicubic", img_lq_bicubic.shape) - print(img_hq.shape) - lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), - interpolation=0) - lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic), - (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), - interpolation=0) - img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1) - util.imsave(img_concat, str(i) + '.png') diff --git a/spaces/cvlab/zero123-live/taming-transformers/taming/data/conditional_builder/objects_bbox.py b/spaces/cvlab/zero123-live/taming-transformers/taming/data/conditional_builder/objects_bbox.py deleted file mode 100644 index 15881e76b7ab2a914df8f2dfe08ae4f0c6c511b5..0000000000000000000000000000000000000000 --- a/spaces/cvlab/zero123-live/taming-transformers/taming/data/conditional_builder/objects_bbox.py +++ /dev/null @@ -1,60 +0,0 @@ -from itertools import cycle -from typing import List, Tuple, Callable, Optional - -from PIL import Image as pil_image, ImageDraw as pil_img_draw, ImageFont -from more_itertools.recipes import grouper -from taming.data.image_transforms import convert_pil_to_tensor -from torch import LongTensor, Tensor - -from taming.data.helper_types import BoundingBox, Annotation -from taming.data.conditional_builder.objects_center_points import ObjectsCenterPointsConditionalBuilder -from taming.data.conditional_builder.utils import COLOR_PALETTE, WHITE, GRAY_75, BLACK, additional_parameters_string, \ - pad_list, get_plot_font_size, absolute_bbox - - -class ObjectsBoundingBoxConditionalBuilder(ObjectsCenterPointsConditionalBuilder): - @property - def object_descriptor_length(self) -> int: - return 3 - - def _make_object_descriptors(self, annotations: List[Annotation]) -> List[Tuple[int, ...]]: - object_triples = [ - (self.object_representation(ann), *self.token_pair_from_bbox(ann.bbox)) - for ann in annotations - ] - empty_triple = (self.none, self.none, self.none) - object_triples = pad_list(object_triples, empty_triple, self.no_max_objects) - return object_triples - - def inverse_build(self, conditional: LongTensor) -> Tuple[List[Tuple[int, BoundingBox]], Optional[BoundingBox]]: - conditional_list = conditional.tolist() - crop_coordinates = None - if self.encode_crop: - crop_coordinates = self.bbox_from_token_pair(conditional_list[-2], conditional_list[-1]) - conditional_list = conditional_list[:-2] - object_triples = grouper(conditional_list, 3) - assert conditional.shape[0] == self.embedding_dim - return [ - (object_triple[0], self.bbox_from_token_pair(object_triple[1], object_triple[2])) - for object_triple in object_triples if object_triple[0] != self.none - ], crop_coordinates - - def plot(self, conditional: LongTensor, label_for_category_no: Callable[[int], str], figure_size: Tuple[int, int], - line_width: int = 3, font_size: Optional[int] = None) -> Tensor: - plot = pil_image.new('RGB', figure_size, WHITE) - draw = pil_img_draw.Draw(plot) - font = ImageFont.truetype( - "/usr/share/fonts/truetype/lato/Lato-Regular.ttf", - size=get_plot_font_size(font_size, figure_size) - ) - width, height = plot.size - description, crop_coordinates = self.inverse_build(conditional) - for (representation, bbox), color in zip(description, cycle(COLOR_PALETTE)): - annotation = self.representation_to_annotation(representation) - class_label = label_for_category_no(annotation.category_no) + ' ' + additional_parameters_string(annotation) - bbox = absolute_bbox(bbox, width, height) - draw.rectangle(bbox, outline=color, width=line_width) - draw.text((bbox[0] + line_width, bbox[1] + line_width), class_label, anchor='la', fill=BLACK, font=font) - if crop_coordinates is not None: - draw.rectangle(absolute_bbox(crop_coordinates, width, height), outline=GRAY_75, width=line_width) - return convert_pil_to_tensor(plot) / 127.5 - 1. diff --git a/spaces/cynika/taffy/inference_main.py b/spaces/cynika/taffy/inference_main.py deleted file mode 100644 index 825e791db86d37e955f42e8cb34323dbb248ed32..0000000000000000000000000000000000000000 --- a/spaces/cynika/taffy/inference_main.py +++ /dev/null @@ -1,65 +0,0 @@ -import io -import logging -import time -from pathlib import Path - -import librosa -import numpy as np -import soundfile - -from inference import infer_tool -from inference import slicer -from inference.infer_tool import Svc - -logging.getLogger('numba').setLevel(logging.WARNING) -chunks_dict = infer_tool.read_temp("inference/chunks_temp.json") - -model_path = "logs/48k/G_174000-Copy1.pth" -config_path = "configs/config.json" -svc_model = Svc(model_path, config_path) -infer_tool.mkdir(["raw", "results"]) - -# 支持多个wav文件,放在raw文件夹下 -clean_names = ["君の知らない物語-src"] -trans = [-5] # 音高调整,支持正负(半音) -spk_list = ['yunhao'] # 每次同时合成多语者音色 -slice_db = -40 # 默认-40,嘈杂的音频可以-30,干声保留呼吸可以-50 -wav_format = 'flac' # 音频输出格式 - -infer_tool.fill_a_to_b(trans, clean_names) -for clean_name, tran in zip(clean_names, trans): - raw_audio_path = f"raw/{clean_name}" - if "." not in raw_audio_path: - raw_audio_path += ".wav" - infer_tool.format_wav(raw_audio_path) - wav_path = Path(raw_audio_path).with_suffix('.wav') - audio, sr = librosa.load(wav_path, mono=True, sr=None) - wav_hash = infer_tool.get_md5(audio) - if wav_hash in chunks_dict.keys(): - print("load chunks from temp") - chunks = chunks_dict[wav_hash]["chunks"] - else: - chunks = slicer.cut(wav_path, db_thresh=slice_db) - print(chunks) - chunks_dict[wav_hash] = {"chunks": chunks, "time": int(time.time())} - infer_tool.write_temp("inference/chunks_temp.json", chunks_dict) - audio_data, audio_sr = slicer.chunks2audio(wav_path, chunks) - - for spk in spk_list: - audio = [] - for (slice_tag, data) in audio_data: - print(f'#=====segment start, {round(len(data) / audio_sr, 3)}s======') - length = int(np.ceil(len(data) / audio_sr * svc_model.target_sample)) - raw_path = io.BytesIO() - soundfile.write(raw_path, data, audio_sr, format="wav") - raw_path.seek(0) - if slice_tag: - print('jump empty segment') - _audio = np.zeros(length) - else: - out_audio, out_sr = svc_model.infer(spk, tran, raw_path) - _audio = out_audio.cpu().numpy() - audio.extend(list(_audio)) - - res_path = f'./results/{clean_name}_{tran}key_{spk}.{wav_format}' - soundfile.write(res_path, audio, svc_model.target_sample, format=wav_format) diff --git a/spaces/danterivers/music-generation-samples/CONTRIBUTING.md b/spaces/danterivers/music-generation-samples/CONTRIBUTING.md deleted file mode 100644 index 55b99140204d785d572ada9761dd77f302ae31c6..0000000000000000000000000000000000000000 --- a/spaces/danterivers/music-generation-samples/CONTRIBUTING.md +++ /dev/null @@ -1,35 +0,0 @@ -# Contributing to Audiocraft - -We want to make contributing to this project as easy and transparent as -possible. - -## Pull Requests - -Audiocraft is the implementation of a research paper. -Therefore, we do not plan on accepting many pull requests for new features. -We certainly welcome them for bug fixes. - -1. Fork the repo and create your branch from `main`. -2. If you've added code that should be tested, add tests. -3. If you've changed APIs, update the documentation. -4. Ensure the test suite passes. -5. Make sure your code lints. -6. If you haven't already, complete the Contributor License Agreement ("CLA"). - -## Contributor License Agreement ("CLA") -In order to accept your pull request, we need you to submit a CLA. You only need -to do this once to work on any of Meta's open source projects. - -Complete your CLA here: - -## Issues -We use GitHub issues to track public bugs. Please ensure your description is -clear and has sufficient instructions to be able to reproduce the issue. - -Meta has a [bounty program](https://www.facebook.com/whitehat/) for the safe -disclosure of security bugs. In those cases, please go through the process -outlined on that page and do not file a public issue. - -## License -By contributing to encodec, you agree that your contributions will be licensed -under the LICENSE file in the root directory of this source tree. diff --git a/spaces/dawood/audioldm-text-to-audio-generation/audioldm/clap/encoders.py b/spaces/dawood/audioldm-text-to-audio-generation/audioldm/clap/encoders.py deleted file mode 100644 index 8f42d9832c31fa51cf361c472c639d68869769f1..0000000000000000000000000000000000000000 --- a/spaces/dawood/audioldm-text-to-audio-generation/audioldm/clap/encoders.py +++ /dev/null @@ -1,169 +0,0 @@ -import torch -import torch.nn as nn -from audioldm.clap.open_clip import create_model -from audioldm.clap.training.data import get_audio_features -import torchaudio -from transformers import RobertaTokenizer -import torch.nn.functional as F - - -class CLAPAudioEmbeddingClassifierFreev2(nn.Module): - def __init__( - self, - pretrained_path="", - key="class", - sampling_rate=16000, - embed_mode="audio", - unconditional_prob=0.1, - random_mute=False, - max_random_mute_portion=0.5, - training_mode=True, - ): - super().__init__() - - self.key = key - self.device = "cpu" - self.precision = "fp32" - self.amodel = "HTSAT-tiny" # or 'PANN-14' - self.tmodel = "roberta" # the best text encoder in our training - self.enable_fusion = False # False if you do not want to use the fusion model - self.fusion_type = "aff_2d" - self.pretrained = pretrained_path - self.embed_mode = embed_mode - self.embed_mode_orig = embed_mode - self.sampling_rate = sampling_rate - self.unconditional_prob = unconditional_prob - self.random_mute = random_mute - self.tokenize = RobertaTokenizer.from_pretrained("roberta-base") - self.max_random_mute_portion = max_random_mute_portion - self.training_mode = training_mode - self.model, self.model_cfg = create_model( - self.amodel, - self.tmodel, - self.pretrained, - precision=self.precision, - device=self.device, - enable_fusion=self.enable_fusion, - fusion_type=self.fusion_type, - ) - for p in self.model.parameters(): - p.requires_grad = False - - self.model.eval() - - def get_unconditional_condition(self, batchsize): - self.unconditional_token = self.model.get_text_embedding( - self.tokenizer(["", ""]) - )[0:1] - return torch.cat([self.unconditional_token.unsqueeze(0)] * batchsize, dim=0) - - def batch_to_list(self, batch): - ret = [] - for i in range(batch.size(0)): - ret.append(batch[i]) - return ret - - def make_decision(self, probability): - if float(torch.rand(1)) < probability: - return True - else: - return False - - def random_uniform(self, start, end): - val = torch.rand(1).item() - return start + (end - start) * val - - def _random_mute(self, waveform): - # waveform: [bs, t-steps] - t_steps = waveform.size(-1) - for i in range(waveform.size(0)): - mute_size = int( - self.random_uniform(0, end=int(t_steps * self.max_random_mute_portion)) - ) - mute_start = int(self.random_uniform(0, t_steps - mute_size)) - waveform[i, mute_start : mute_start + mute_size] = 0 - return waveform - - def cos_similarity(self, waveform, text): - # waveform: [bs, t_steps] - with torch.no_grad(): - self.embed_mode = "audio" - audio_emb = self(waveform.cuda()) - self.embed_mode = "text" - text_emb = self(text) - similarity = F.cosine_similarity(audio_emb, text_emb, dim=2) - return similarity.squeeze() - - def forward(self, batch, key=None): - # If you want this conditioner to be unconditional, set self.unconditional_prob = 1.0 - # If you want this conditioner to be fully conditional, set self.unconditional_prob = 0.0 - if self.model.training == True and not self.training_mode: - print( - "The pretrained CLAP model should always be in eval mode. Reloading model just in case you change the parameters." - ) - self.model, self.model_cfg = create_model( - self.amodel, - self.tmodel, - self.pretrained, - precision=self.precision, - device="cuda", - enable_fusion=self.enable_fusion, - fusion_type=self.fusion_type, - ) - for p in self.model.parameters(): - p.requires_grad = False - self.model.eval() - - # the 'fusion' truncate mode can be changed to 'rand_trunc' if run in unfusion mode - if self.embed_mode == "audio": - with torch.no_grad(): - audio_dict_list = [] - assert ( - self.sampling_rate == 16000 - ), "We only support 16000 sampling rate" - if self.random_mute: - batch = self._random_mute(batch) - # batch: [bs, 1, t-samples] - batch = torchaudio.functional.resample( - batch, orig_freq=self.sampling_rate, new_freq=48000 - ) - for waveform in self.batch_to_list(batch): - audio_dict = {} - audio_dict = get_audio_features( - audio_dict, - waveform, - 480000, - data_truncating="fusion", - data_filling="repeatpad", - audio_cfg=self.model_cfg["audio_cfg"], - ) - audio_dict_list.append(audio_dict) - # [bs, 512] - embed = self.model.get_audio_embedding(audio_dict_list) - elif self.embed_mode == "text": - with torch.no_grad(): - # the 'fusion' truncate mode can be changed to 'rand_trunc' if run in unfusion mode - text_data = self.tokenizer(batch) - embed = self.model.get_text_embedding(text_data) - - embed = embed.unsqueeze(1) - self.unconditional_token = self.model.get_text_embedding( - self.tokenizer(["", ""]) - )[0:1] - - for i in range(embed.size(0)): - if self.make_decision(self.unconditional_prob): - embed[i] = self.unconditional_token - - # [bs, 1, 512] - return embed.detach() - - def tokenizer(self, text): - result = self.tokenize( - text, - padding="max_length", - truncation=True, - max_length=77, - return_tensors="pt", - ) - return {k: v.squeeze(0) for k, v in result.items()} diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/aiofiles/threadpool/__init__.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/aiofiles/threadpool/__init__.py deleted file mode 100644 index a1cc673d1a7398f23a1e8f00c19cef1cafa906c2..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/aiofiles/threadpool/__init__.py +++ /dev/null @@ -1,141 +0,0 @@ -"""Handle files using a thread pool executor.""" -import asyncio -import sys -from functools import partial, singledispatch -from io import ( - BufferedIOBase, - BufferedRandom, - BufferedReader, - BufferedWriter, - FileIO, - TextIOBase, -) -from types import coroutine - -from ..base import AiofilesContextManager -from .binary import ( - AsyncBufferedIOBase, - AsyncBufferedReader, - AsyncFileIO, - AsyncIndirectBufferedIOBase, -) -from .text import AsyncTextIndirectIOWrapper, AsyncTextIOWrapper - -sync_open = open - -__all__ = ( - "open", - "stdin", - "stdout", - "stderr", - "stdin_bytes", - "stdout_bytes", - "stderr_bytes", -) - - -def open( - file, - mode="r", - buffering=-1, - encoding=None, - errors=None, - newline=None, - closefd=True, - opener=None, - *, - loop=None, - executor=None, -): - return AiofilesContextManager( - _open( - file, - mode=mode, - buffering=buffering, - encoding=encoding, - errors=errors, - newline=newline, - closefd=closefd, - opener=opener, - loop=loop, - executor=executor, - ) - ) - - -@coroutine -def _open( - file, - mode="r", - buffering=-1, - encoding=None, - errors=None, - newline=None, - closefd=True, - opener=None, - *, - loop=None, - executor=None, -): - """Open an asyncio file.""" - if loop is None: - loop = asyncio.get_running_loop() - cb = partial( - sync_open, - file, - mode=mode, - buffering=buffering, - encoding=encoding, - errors=errors, - newline=newline, - closefd=closefd, - opener=opener, - ) - f = yield from loop.run_in_executor(executor, cb) - - return wrap(f, loop=loop, executor=executor) - - -@singledispatch -def wrap(file, *, loop=None, executor=None): - raise TypeError("Unsupported io type: {}.".format(file)) - - -@wrap.register(TextIOBase) -def _(file, *, loop=None, executor=None): - return AsyncTextIOWrapper(file, loop=loop, executor=executor) - - -@wrap.register(BufferedWriter) -@wrap.register(BufferedIOBase) -def _(file, *, loop=None, executor=None): - return AsyncBufferedIOBase(file, loop=loop, executor=executor) - - -@wrap.register(BufferedReader) -@wrap.register(BufferedRandom) -def _(file, *, loop=None, executor=None): - return AsyncBufferedReader(file, loop=loop, executor=executor) - - -@wrap.register(FileIO) -def _(file, *, loop=None, executor=None): - return AsyncFileIO(file, loop=loop, executor=executor) - - -stdin = AsyncTextIndirectIOWrapper("sys.stdin", None, None, indirect=lambda: sys.stdin) -stdout = AsyncTextIndirectIOWrapper( - "sys.stdout", None, None, indirect=lambda: sys.stdout -) -stderr = AsyncTextIndirectIOWrapper( - "sys.stderr", None, None, indirect=lambda: sys.stderr -) -stdin_bytes = AsyncIndirectBufferedIOBase( - "sys.stdin.buffer", None, None, indirect=lambda: sys.stdin.buffer -) -stdout_bytes = AsyncIndirectBufferedIOBase( - "sys.stdout.buffer", None, None, indirect=lambda: sys.stdout.buffer -) -stderr_bytes = AsyncIndirectBufferedIOBase( - "sys.stderr.buffer", None, None, indirect=lambda: sys.stderr.buffer -) diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/attr/converters.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/attr/converters.py deleted file mode 100644 index 4cada106b01c564faf17969d24038f80abd5de6f..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/attr/converters.py +++ /dev/null @@ -1,144 +0,0 @@ -# SPDX-License-Identifier: MIT - -""" -Commonly useful converters. -""" - - -import typing - -from ._compat import _AnnotationExtractor -from ._make import NOTHING, Factory, pipe - - -__all__ = [ - "default_if_none", - "optional", - "pipe", - "to_bool", -] - - -def optional(converter): - """ - A converter that allows an attribute to be optional. An optional attribute - is one which can be set to ``None``. - - Type annotations will be inferred from the wrapped converter's, if it - has any. - - :param callable converter: the converter that is used for non-``None`` - values. - - .. versionadded:: 17.1.0 - """ - - def optional_converter(val): - if val is None: - return None - return converter(val) - - xtr = _AnnotationExtractor(converter) - - t = xtr.get_first_param_type() - if t: - optional_converter.__annotations__["val"] = typing.Optional[t] - - rt = xtr.get_return_type() - if rt: - optional_converter.__annotations__["return"] = typing.Optional[rt] - - return optional_converter - - -def default_if_none(default=NOTHING, factory=None): - """ - A converter that allows to replace ``None`` values by *default* or the - result of *factory*. - - :param default: Value to be used if ``None`` is passed. Passing an instance - of `attrs.Factory` is supported, however the ``takes_self`` option - is *not*. - :param callable factory: A callable that takes no parameters whose result - is used if ``None`` is passed. - - :raises TypeError: If **neither** *default* or *factory* is passed. - :raises TypeError: If **both** *default* and *factory* are passed. - :raises ValueError: If an instance of `attrs.Factory` is passed with - ``takes_self=True``. - - .. versionadded:: 18.2.0 - """ - if default is NOTHING and factory is None: - raise TypeError("Must pass either `default` or `factory`.") - - if default is not NOTHING and factory is not None: - raise TypeError( - "Must pass either `default` or `factory` but not both." - ) - - if factory is not None: - default = Factory(factory) - - if isinstance(default, Factory): - if default.takes_self: - raise ValueError( - "`takes_self` is not supported by default_if_none." - ) - - def default_if_none_converter(val): - if val is not None: - return val - - return default.factory() - - else: - - def default_if_none_converter(val): - if val is not None: - return val - - return default - - return default_if_none_converter - - -def to_bool(val): - """ - Convert "boolean" strings (e.g., from env. vars.) to real booleans. - - Values mapping to :code:`True`: - - - :code:`True` - - :code:`"true"` / :code:`"t"` - - :code:`"yes"` / :code:`"y"` - - :code:`"on"` - - :code:`"1"` - - :code:`1` - - Values mapping to :code:`False`: - - - :code:`False` - - :code:`"false"` / :code:`"f"` - - :code:`"no"` / :code:`"n"` - - :code:`"off"` - - :code:`"0"` - - :code:`0` - - :raises ValueError: for any other value. - - .. versionadded:: 21.3.0 - """ - if isinstance(val, str): - val = val.lower() - truthy = {True, "true", "t", "yes", "y", "on", "1", 1} - falsy = {False, "false", "f", "no", "n", "off", "0", 0} - try: - if val in truthy: - return True - if val in falsy: - return False - except TypeError: - # Raised when "val" is not hashable (e.g., lists) - pass - raise ValueError(f"Cannot convert value to bool: {val}") diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/charset_normalizer/assets/__init__.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/charset_normalizer/assets/__init__.py deleted file mode 100644 index 9075930dc8f9a382c0bd7663e546fa2a93a4d257..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/charset_normalizer/assets/__init__.py +++ /dev/null @@ -1,1440 +0,0 @@ -# -*- coding: utf-8 -*- -from typing import Dict, List - -# Language label that contain the em dash "—" -# character are to be considered alternative seq to origin -FREQUENCIES: Dict[str, List[str]] = { - "English": [ - "e", - "a", - "t", - "i", - "o", - "n", - "s", - "r", - "h", - "l", - "d", - "c", - "u", - "m", - "f", - "p", - "g", - "w", - "y", - "b", - "v", - "k", - "x", - "j", - "z", - "q", - ], - "English—": [ - "e", - "a", - "t", - "i", - "o", - "n", - "s", - "r", - "h", - "l", - "d", - "c", - "m", - "u", - "f", - "p", - "g", - "w", - "b", - "y", - "v", - "k", - "j", - "x", - "z", - "q", - ], - "German": [ - "e", - "n", - "i", - "r", - "s", - "t", - "a", - "d", - "h", - "u", - "l", - "g", - "o", - "c", - "m", - "b", - "f", - "k", - "w", - "z", - "p", - "v", - "ü", - "ä", - "ö", - "j", - ], - "French": [ - "e", - "a", - "s", - "n", - "i", - "t", - "r", - "l", - "u", - "o", - "d", - "c", - "p", - "m", - "é", - "v", - "g", - "f", - "b", - "h", - "q", - "à", - "x", - "è", - "y", - "j", - ], - "Dutch": [ - "e", - "n", - "a", - "i", - "r", - "t", - "o", - "d", - "s", - "l", - "g", - "h", - "v", - "m", - "u", - "k", - "c", - "p", - "b", - "w", - "j", - "z", - "f", - "y", - "x", - "ë", - ], - "Italian": [ - "e", - "i", - "a", - "o", - "n", - "l", - "t", - "r", - "s", - "c", - "d", - "u", - "p", - "m", - "g", - "v", - "f", - "b", - "z", - "h", - "q", - "è", - "à", - "k", - "y", - "ò", - ], - "Polish": [ - "a", - "i", - "o", - "e", - "n", - "r", - "z", - "w", - "s", - "c", - "t", - "k", - "y", - "d", - "p", - "m", - "u", - "l", - "j", - "ł", - "g", - "b", - "h", - "ą", - "ę", - "ó", - ], - "Spanish": [ - "e", - "a", - "o", - "n", - "s", - "r", - "i", - "l", - "d", - "t", - "c", - "u", - "m", - "p", - "b", - "g", - "v", - "f", - "y", - "ó", - "h", - "q", - "í", - "j", - "z", - "á", - ], - "Russian": [ - "о", - "а", - "е", - "и", - "н", - "с", - "т", - "р", - "в", - "л", - "к", - "м", - "д", - "п", - "у", - "г", - "я", - "ы", - "з", - "б", - "й", - "ь", - "ч", - "х", - "ж", - "ц", - ], - # Jap-Kanji - "Japanese": [ - "人", - "一", - "大", - "亅", - "丁", - "丨", - "竹", - "笑", - "口", - "日", - "今", - "二", - "彳", - "行", - "十", - "土", - "丶", - "寸", - "寺", - "時", - "乙", - "丿", - "乂", - "气", - "気", - "冂", - "巾", - "亠", - "市", - "目", - "儿", - "見", - "八", - "小", - "凵", - "県", - "月", - "彐", - "門", - "間", - "木", - "東", - "山", - "出", - "本", - "中", - "刀", - "分", - "耳", - "又", - "取", - "最", - "言", - "田", - "心", - "思", - "刂", - "前", - "京", - "尹", - "事", - "生", - "厶", - "云", - "会", - "未", - "来", - "白", - "冫", - "楽", - "灬", - "馬", - "尸", - "尺", - "駅", - "明", - "耂", - "者", - "了", - "阝", - "都", - "高", - "卜", - "占", - "厂", - "广", - "店", - "子", - "申", - "奄", - "亻", - "俺", - "上", - "方", - "冖", - "学", - "衣", - "艮", - "食", - "自", - ], - # Jap-Katakana - "Japanese—": [ - "ー", - "ン", - "ス", - "・", - "ル", - "ト", - "リ", - "イ", - "ア", - "ラ", - "ッ", - "ク", - "ド", - "シ", - "レ", - "ジ", - "タ", - "フ", - "ロ", - "カ", - "テ", - "マ", - "ィ", - "グ", - "バ", - "ム", - "プ", - "オ", - "コ", - "デ", - "ニ", - "ウ", - "メ", - "サ", - "ビ", - "ナ", - "ブ", - "ャ", - "エ", - "ュ", - "チ", - "キ", - "ズ", - "ダ", - "パ", - "ミ", - "ェ", - "ョ", - "ハ", - "セ", - "ベ", - "ガ", - "モ", - "ツ", - "ネ", - "ボ", - "ソ", - "ノ", - "ァ", - "ヴ", - "ワ", - "ポ", - "ペ", - "ピ", - "ケ", - "ゴ", - "ギ", - "ザ", - "ホ", - "ゲ", - "ォ", - "ヤ", - "ヒ", - "ユ", - "ヨ", - "ヘ", - "ゼ", - "ヌ", - "ゥ", - "ゾ", - "ヶ", - "ヂ", - "ヲ", - "ヅ", - "ヵ", - "ヱ", - "ヰ", - "ヮ", - "ヽ", - "゠", - "ヾ", - "ヷ", - "ヿ", - "ヸ", - "ヹ", - "ヺ", - ], - # Jap-Hiragana - "Japanese——": [ - "の", - "に", - "る", - "た", - "と", - "は", - "し", - "い", - "を", - "で", - "て", - "が", - "な", - "れ", - "か", - "ら", - "さ", - "っ", - "り", - "す", - "あ", - "も", - "こ", - "ま", - "う", - "く", - "よ", - "き", - "ん", - "め", - "お", - "け", - "そ", - "つ", - "だ", - "や", - "え", - "ど", - "わ", - "ち", - "み", - "せ", - "じ", - "ば", - "へ", - "び", - "ず", - "ろ", - "ほ", - "げ", - "む", - "べ", - "ひ", - "ょ", - "ゆ", - "ぶ", - "ご", - "ゃ", - "ね", - "ふ", - "ぐ", - "ぎ", - "ぼ", - "ゅ", - "づ", - "ざ", - "ぞ", - "ぬ", - "ぜ", - "ぱ", - "ぽ", - "ぷ", - "ぴ", - "ぃ", - "ぁ", - "ぇ", - "ぺ", - "ゞ", - "ぢ", - "ぉ", - "ぅ", - "ゐ", - "ゝ", - "ゑ", - "゛", - "゜", - "ゎ", - "ゔ", - "゚", - "ゟ", - "゙", - "ゕ", - "ゖ", - ], - "Portuguese": [ - "a", - "e", - "o", - "s", - "i", - "r", - "d", - "n", - "t", - "m", - "u", - "c", - "l", - "p", - "g", - "v", - "b", - "f", - "h", - "ã", - "q", - "é", - "ç", - "á", - "z", - "í", - ], - "Swedish": [ - "e", - "a", - "n", - "r", - "t", - "s", - "i", - "l", - "d", - "o", - "m", - "k", - "g", - "v", - "h", - "f", - "u", - "p", - "ä", - "c", - "b", - "ö", - "å", - "y", - "j", - "x", - ], - "Chinese": [ - "的", - "一", - "是", - "不", - "了", - "在", - "人", - "有", - "我", - "他", - "这", - "个", - "们", - "中", - "来", - "上", - "大", - "为", - "和", - "国", - "地", - "到", - "以", - "说", - "时", - "要", - "就", - "出", - "会", - "可", - "也", - "你", - "对", - "生", - "能", - "而", - "子", - "那", - "得", - "于", - "着", - "下", - "自", - "之", - "年", - "过", - "发", - "后", - "作", - "里", - "用", - "道", - "行", - "所", - "然", - "家", - "种", - "事", - "成", - "方", - "多", - "经", - "么", - "去", - "法", - "学", - "如", - "都", - "同", - "现", - "当", - "没", - "动", - "面", - "起", - "看", - "定", - "天", - "分", - "还", - "进", - "好", - "小", - "部", - "其", - "些", - "主", - "样", - "理", - "心", - "她", - "本", - "前", - "开", - "但", - "因", - "只", - "从", - "想", - "实", - ], - "Ukrainian": [ - "о", - "а", - "н", - "і", - "и", - "р", - "в", - "т", - "е", - "с", - "к", - "л", - "у", - "д", - "м", - "п", - "з", - "я", - "ь", - "б", - "г", - "й", - "ч", - "х", - "ц", - "ї", - ], - "Norwegian": [ - "e", - "r", - "n", - "t", - "a", - "s", - "i", - "o", - "l", - "d", - "g", - "k", - "m", - "v", - "f", - "p", - "u", - "b", - "h", - "å", - "y", - "j", - "ø", - "c", - "æ", - "w", - ], - "Finnish": [ - "a", - "i", - "n", - "t", - "e", - "s", - "l", - "o", - "u", - "k", - "ä", - "m", - "r", - "v", - "j", - "h", - "p", - "y", - "d", - "ö", - "g", - "c", - "b", - "f", - "w", - "z", - ], - "Vietnamese": [ - "n", - "h", - "t", - "i", - "c", - "g", - "a", - "o", - "u", - "m", - "l", - "r", - "à", - "đ", - "s", - "e", - "v", - "p", - "b", - "y", - "ư", - "d", - "á", - "k", - "ộ", - "ế", - ], - "Czech": [ - "o", - "e", - "a", - "n", - "t", - "s", - "i", - "l", - "v", - "r", - "k", - "d", - "u", - "m", - "p", - "í", - "c", - "h", - "z", - "á", - "y", - "j", - "b", - "ě", - "é", - "ř", - ], - "Hungarian": [ - "e", - "a", - "t", - "l", - "s", - "n", - "k", - "r", - "i", - "o", - "z", - "á", - "é", - "g", - "m", - "b", - "y", - "v", - "d", - "h", - "u", - "p", - "j", - "ö", - "f", - "c", - ], - "Korean": [ - "이", - "다", - "에", - "의", - "는", - "로", - "하", - "을", - "가", - "고", - "지", - "서", - "한", - "은", - "기", - "으", - "년", - "대", - "사", - "시", - "를", - "리", - "도", - "인", - "스", - "일", - ], - "Indonesian": [ - "a", - "n", - "e", - "i", - "r", - "t", - "u", - "s", - "d", - "k", - "m", - "l", - "g", - "p", - "b", - "o", - "h", - "y", - "j", - "c", - "w", - "f", - "v", - "z", - "x", - "q", - ], - "Turkish": [ - "a", - "e", - "i", - "n", - "r", - "l", - "ı", - "k", - "d", - "t", - "s", - "m", - "y", - "u", - "o", - "b", - "ü", - "ş", - "v", - "g", - "z", - "h", - "c", - "p", - "ç", - "ğ", - ], - "Romanian": [ - "e", - "i", - "a", - "r", - "n", - "t", - "u", - "l", - "o", - "c", - "s", - "d", - "p", - "m", - "ă", - "f", - "v", - "î", - "g", - "b", - "ș", - "ț", - "z", - "h", - "â", - "j", - ], - "Farsi": [ - "ا", - "ی", - "ر", - "د", - "ن", - "ه", - "و", - "م", - "ت", - "ب", - "س", - "ل", - "ک", - "ش", - "ز", - "ف", - "گ", - "ع", - "خ", - "ق", - "ج", - "آ", - "پ", - "ح", - "ط", - "ص", - ], - "Arabic": [ - "ا", - "ل", - "ي", - "م", - "و", - "ن", - "ر", - "ت", - "ب", - "ة", - "ع", - "د", - "س", - "ف", - "ه", - "ك", - "ق", - "أ", - "ح", - "ج", - "ش", - "ط", - "ص", - "ى", - "خ", - "إ", - ], - "Danish": [ - "e", - "r", - "n", - "t", - "a", - "i", - "s", - "d", - "l", - "o", - "g", - "m", - "k", - "f", - "v", - "u", - "b", - "h", - "p", - "å", - "y", - "ø", - "æ", - "c", - "j", - "w", - ], - "Serbian": [ - "а", - "и", - "о", - "е", - "н", - "р", - "с", - "у", - "т", - "к", - "ј", - "в", - "д", - "м", - "п", - "л", - "г", - "з", - "б", - "a", - "i", - "e", - "o", - "n", - "ц", - "ш", - ], - "Lithuanian": [ - "i", - "a", - "s", - "o", - "r", - "e", - "t", - "n", - "u", - "k", - "m", - "l", - "p", - "v", - "d", - "j", - "g", - "ė", - "b", - "y", - "ų", - "š", - "ž", - "c", - "ą", - "į", - ], - "Slovene": [ - "e", - "a", - "i", - "o", - "n", - "r", - "s", - "l", - "t", - "j", - "v", - "k", - "d", - "p", - "m", - "u", - "z", - "b", - "g", - "h", - "č", - "c", - "š", - "ž", - "f", - "y", - ], - "Slovak": [ - "o", - "a", - "e", - "n", - "i", - "r", - "v", - "t", - "s", - "l", - "k", - "d", - "m", - "p", - "u", - "c", - "h", - "j", - "b", - "z", - "á", - "y", - "ý", - "í", - "č", - "é", - ], - "Hebrew": [ - "י", - "ו", - "ה", - "ל", - "ר", - "ב", - "ת", - "מ", - "א", - "ש", - "נ", - "ע", - "ם", - "ד", - "ק", - "ח", - "פ", - "ס", - "כ", - "ג", - "ט", - "צ", - "ן", - "ז", - "ך", - ], - "Bulgarian": [ - "а", - "и", - "о", - "е", - "н", - "т", - "р", - "с", - "в", - "л", - "к", - "д", - "п", - "м", - "з", - "г", - "я", - "ъ", - "у", - "б", - "ч", - "ц", - "й", - "ж", - "щ", - "х", - ], - "Croatian": [ - "a", - "i", - "o", - "e", - "n", - "r", - "j", - "s", - "t", - "u", - "k", - "l", - "v", - "d", - "m", - "p", - "g", - "z", - "b", - "c", - "č", - "h", - "š", - "ž", - "ć", - "f", - ], - "Hindi": [ - "क", - "र", - "स", - "न", - "त", - "म", - "ह", - "प", - "य", - "ल", - "व", - "ज", - "द", - "ग", - "ब", - "श", - "ट", - "अ", - "ए", - "थ", - "भ", - "ड", - "च", - "ध", - "ष", - "इ", - ], - "Estonian": [ - "a", - "i", - "e", - "s", - "t", - "l", - "u", - "n", - "o", - "k", - "r", - "d", - "m", - "v", - "g", - "p", - "j", - "h", - "ä", - "b", - "õ", - "ü", - "f", - "c", - "ö", - "y", - ], - "Thai": [ - "า", - "น", - "ร", - "อ", - "ก", - "เ", - "ง", - "ม", - "ย", - "ล", - "ว", - "ด", - "ท", - "ส", - "ต", - "ะ", - "ป", - "บ", - "ค", - "ห", - "แ", - "จ", - "พ", - "ช", - "ข", - "ใ", - ], - "Greek": [ - "α", - "τ", - "ο", - "ι", - "ε", - "ν", - "ρ", - "σ", - "κ", - "η", - "π", - "ς", - "υ", - "μ", - "λ", - "ί", - "ό", - "ά", - "γ", - "έ", - "δ", - "ή", - "ω", - "χ", - "θ", - "ύ", - ], - "Tamil": [ - "க", - "த", - "ப", - "ட", - "ர", - "ம", - "ல", - "ன", - "வ", - "ற", - "ய", - "ள", - "ச", - "ந", - "இ", - "ண", - "அ", - "ஆ", - "ழ", - "ங", - "எ", - "உ", - "ஒ", - "ஸ", - ], - "Kazakh": [ - "а", - "ы", - "е", - "н", - "т", - "р", - "л", - "і", - "д", - "с", - "м", - "қ", - "к", - "о", - "б", - "и", - "у", - "ғ", - "ж", - "ң", - "з", - "ш", - "й", - "п", - "г", - "ө", - ], -} diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/dateutil/zoneinfo/__init__.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/dateutil/zoneinfo/__init__.py deleted file mode 100644 index 34f11ad66c88047f2c049a4cdcc937b4b78ea6d6..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/dateutil/zoneinfo/__init__.py +++ /dev/null @@ -1,167 +0,0 @@ -# -*- coding: utf-8 -*- -import warnings -import json - -from tarfile import TarFile -from pkgutil import get_data -from io import BytesIO - -from dateutil.tz import tzfile as _tzfile - -__all__ = ["get_zonefile_instance", "gettz", "gettz_db_metadata"] - -ZONEFILENAME = "dateutil-zoneinfo.tar.gz" -METADATA_FN = 'METADATA' - - -class tzfile(_tzfile): - def __reduce__(self): - return (gettz, (self._filename,)) - - -def getzoneinfofile_stream(): - try: - return BytesIO(get_data(__name__, ZONEFILENAME)) - except IOError as e: # TODO switch to FileNotFoundError? - warnings.warn("I/O error({0}): {1}".format(e.errno, e.strerror)) - return None - - -class ZoneInfoFile(object): - def __init__(self, zonefile_stream=None): - if zonefile_stream is not None: - with TarFile.open(fileobj=zonefile_stream) as tf: - self.zones = {zf.name: tzfile(tf.extractfile(zf), filename=zf.name) - for zf in tf.getmembers() - if zf.isfile() and zf.name != METADATA_FN} - # deal with links: They'll point to their parent object. Less - # waste of memory - links = {zl.name: self.zones[zl.linkname] - for zl in tf.getmembers() if - zl.islnk() or zl.issym()} - self.zones.update(links) - try: - metadata_json = tf.extractfile(tf.getmember(METADATA_FN)) - metadata_str = metadata_json.read().decode('UTF-8') - self.metadata = json.loads(metadata_str) - except KeyError: - # no metadata in tar file - self.metadata = None - else: - self.zones = {} - self.metadata = None - - def get(self, name, default=None): - """ - Wrapper for :func:`ZoneInfoFile.zones.get`. This is a convenience method - for retrieving zones from the zone dictionary. - - :param name: - The name of the zone to retrieve. (Generally IANA zone names) - - :param default: - The value to return in the event of a missing key. - - .. versionadded:: 2.6.0 - - """ - return self.zones.get(name, default) - - -# The current API has gettz as a module function, although in fact it taps into -# a stateful class. So as a workaround for now, without changing the API, we -# will create a new "global" class instance the first time a user requests a -# timezone. Ugly, but adheres to the api. -# -# TODO: Remove after deprecation period. -_CLASS_ZONE_INSTANCE = [] - - -def get_zonefile_instance(new_instance=False): - """ - This is a convenience function which provides a :class:`ZoneInfoFile` - instance using the data provided by the ``dateutil`` package. By default, it - caches a single instance of the ZoneInfoFile object and returns that. - - :param new_instance: - If ``True``, a new instance of :class:`ZoneInfoFile` is instantiated and - used as the cached instance for the next call. Otherwise, new instances - are created only as necessary. - - :return: - Returns a :class:`ZoneInfoFile` object. - - .. versionadded:: 2.6 - """ - if new_instance: - zif = None - else: - zif = getattr(get_zonefile_instance, '_cached_instance', None) - - if zif is None: - zif = ZoneInfoFile(getzoneinfofile_stream()) - - get_zonefile_instance._cached_instance = zif - - return zif - - -def gettz(name): - """ - This retrieves a time zone from the local zoneinfo tarball that is packaged - with dateutil. - - :param name: - An IANA-style time zone name, as found in the zoneinfo file. - - :return: - Returns a :class:`dateutil.tz.tzfile` time zone object. - - .. warning:: - It is generally inadvisable to use this function, and it is only - provided for API compatibility with earlier versions. This is *not* - equivalent to ``dateutil.tz.gettz()``, which selects an appropriate - time zone based on the inputs, favoring system zoneinfo. This is ONLY - for accessing the dateutil-specific zoneinfo (which may be out of - date compared to the system zoneinfo). - - .. deprecated:: 2.6 - If you need to use a specific zoneinfofile over the system zoneinfo, - instantiate a :class:`dateutil.zoneinfo.ZoneInfoFile` object and call - :func:`dateutil.zoneinfo.ZoneInfoFile.get(name)` instead. - - Use :func:`get_zonefile_instance` to retrieve an instance of the - dateutil-provided zoneinfo. - """ - warnings.warn("zoneinfo.gettz() will be removed in future versions, " - "to use the dateutil-provided zoneinfo files, instantiate a " - "ZoneInfoFile object and use ZoneInfoFile.zones.get() " - "instead. See the documentation for details.", - DeprecationWarning) - - if len(_CLASS_ZONE_INSTANCE) == 0: - _CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream())) - return _CLASS_ZONE_INSTANCE[0].zones.get(name) - - -def gettz_db_metadata(): - """ Get the zonefile metadata - - See `zonefile_metadata`_ - - :returns: - A dictionary with the database metadata - - .. deprecated:: 2.6 - See deprecation warning in :func:`zoneinfo.gettz`. To get metadata, - query the attribute ``zoneinfo.ZoneInfoFile.metadata``. - """ - warnings.warn("zoneinfo.gettz_db_metadata() will be removed in future " - "versions, to use the dateutil-provided zoneinfo files, " - "ZoneInfoFile object and query the 'metadata' attribute " - "instead. See the documentation for details.", - DeprecationWarning) - - if len(_CLASS_ZONE_INSTANCE) == 0: - _CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream())) - return _CLASS_ZONE_INSTANCE[0].metadata diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/markdown_it/rules_inline/fragments_join.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/markdown_it/rules_inline/fragments_join.py deleted file mode 100644 index f795c1364b8ac098b7a17f34cd31d7070280cf36..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/markdown_it/rules_inline/fragments_join.py +++ /dev/null @@ -1,43 +0,0 @@ -from .state_inline import StateInline - - -def fragments_join(state: StateInline) -> None: - """ - Clean up tokens after emphasis and strikethrough postprocessing: - merge adjacent text nodes into one and re-calculate all token levels - - This is necessary because initially emphasis delimiter markers (``*, _, ~``) - are treated as their own separate text tokens. Then emphasis rule either - leaves them as text (needed to merge with adjacent text) or turns them - into opening/closing tags (which messes up levels inside). - """ - level = 0 - maximum = len(state.tokens) - - curr = last = 0 - while curr < maximum: - # re-calculate levels after emphasis/strikethrough turns some text nodes - # into opening/closing tags - if state.tokens[curr].nesting < 0: - level -= 1 # closing tag - state.tokens[curr].level = level - if state.tokens[curr].nesting > 0: - level += 1 # opening tag - - if ( - state.tokens[curr].type == "text" - and curr + 1 < maximum - and state.tokens[curr + 1].type == "text" - ): - # collapse two adjacent text nodes - state.tokens[curr + 1].content = ( - state.tokens[curr].content + state.tokens[curr + 1].content - ) - else: - if curr != last: - state.tokens[last] = state.tokens[curr] - last += 1 - curr += 1 - - if curr != last: - del state.tokens[last:] diff --git a/spaces/declare-lab/tango/diffusers/examples/textual_inversion/textual_inversion.py b/spaces/declare-lab/tango/diffusers/examples/textual_inversion/textual_inversion.py deleted file mode 100644 index 42ea9c946c47aba12c2207ef6a57d868f05ad86b..0000000000000000000000000000000000000000 --- a/spaces/declare-lab/tango/diffusers/examples/textual_inversion/textual_inversion.py +++ /dev/null @@ -1,875 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -# Copyright 2023 The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and - -import argparse -import logging -import math -import os -import random -import warnings -from pathlib import Path - -import numpy as np -import PIL -import torch -import torch.nn.functional as F -import torch.utils.checkpoint -import transformers -from accelerate import Accelerator -from accelerate.logging import get_logger -from accelerate.utils import ProjectConfiguration, set_seed -from huggingface_hub import create_repo, upload_folder - -# TODO: remove and import from diffusers.utils when the new version of diffusers is released -from packaging import version -from PIL import Image -from torch.utils.data import Dataset -from torchvision import transforms -from tqdm.auto import tqdm -from transformers import CLIPTextModel, CLIPTokenizer - -import diffusers -from diffusers import ( - AutoencoderKL, - DDPMScheduler, - DiffusionPipeline, - DPMSolverMultistepScheduler, - StableDiffusionPipeline, - UNet2DConditionModel, -) -from diffusers.optimization import get_scheduler -from diffusers.utils import check_min_version, is_wandb_available -from diffusers.utils.import_utils import is_xformers_available - - -if is_wandb_available(): - import wandb - -if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): - PIL_INTERPOLATION = { - "linear": PIL.Image.Resampling.BILINEAR, - "bilinear": PIL.Image.Resampling.BILINEAR, - "bicubic": PIL.Image.Resampling.BICUBIC, - "lanczos": PIL.Image.Resampling.LANCZOS, - "nearest": PIL.Image.Resampling.NEAREST, - } -else: - PIL_INTERPOLATION = { - "linear": PIL.Image.LINEAR, - "bilinear": PIL.Image.BILINEAR, - "bicubic": PIL.Image.BICUBIC, - "lanczos": PIL.Image.LANCZOS, - "nearest": PIL.Image.NEAREST, - } -# ------------------------------------------------------------------------------ - - -# Will error if the minimal version of diffusers is not installed. Remove at your own risks. -check_min_version("0.15.0.dev0") - -logger = get_logger(__name__) - - -def log_validation(text_encoder, tokenizer, unet, vae, args, accelerator, weight_dtype, epoch): - logger.info( - f"Running validation... \n Generating {args.num_validation_images} images with prompt:" - f" {args.validation_prompt}." - ) - # create pipeline (note: unet and vae are loaded again in float32) - pipeline = DiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, - text_encoder=accelerator.unwrap_model(text_encoder), - tokenizer=tokenizer, - unet=unet, - vae=vae, - revision=args.revision, - torch_dtype=weight_dtype, - ) - pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config) - pipeline = pipeline.to(accelerator.device) - pipeline.set_progress_bar_config(disable=True) - - # run inference - generator = None if args.seed is None else torch.Generator(device=accelerator.device).manual_seed(args.seed) - images = [] - for _ in range(args.num_validation_images): - with torch.autocast("cuda"): - image = pipeline(args.validation_prompt, num_inference_steps=25, generator=generator).images[0] - images.append(image) - - for tracker in accelerator.trackers: - if tracker.name == "tensorboard": - np_images = np.stack([np.asarray(img) for img in images]) - tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC") - if tracker.name == "wandb": - tracker.log( - { - "validation": [ - wandb.Image(image, caption=f"{i}: {args.validation_prompt}") for i, image in enumerate(images) - ] - } - ) - - del pipeline - torch.cuda.empty_cache() - - -def save_progress(text_encoder, placeholder_token_id, accelerator, args, save_path): - logger.info("Saving embeddings") - learned_embeds = accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[placeholder_token_id] - learned_embeds_dict = {args.placeholder_token: learned_embeds.detach().cpu()} - torch.save(learned_embeds_dict, save_path) - - -def parse_args(): - parser = argparse.ArgumentParser(description="Simple example of a training script.") - parser.add_argument( - "--save_steps", - type=int, - default=500, - help="Save learned_embeds.bin every X updates steps.", - ) - parser.add_argument( - "--only_save_embeds", - action="store_true", - default=False, - help="Save only the embeddings for the new concept.", - ) - parser.add_argument( - "--pretrained_model_name_or_path", - type=str, - default=None, - required=True, - help="Path to pretrained model or model identifier from huggingface.co/models.", - ) - parser.add_argument( - "--revision", - type=str, - default=None, - required=False, - help="Revision of pretrained model identifier from huggingface.co/models.", - ) - parser.add_argument( - "--tokenizer_name", - type=str, - default=None, - help="Pretrained tokenizer name or path if not the same as model_name", - ) - parser.add_argument( - "--train_data_dir", type=str, default=None, required=True, help="A folder containing the training data." - ) - parser.add_argument( - "--placeholder_token", - type=str, - default=None, - required=True, - help="A token to use as a placeholder for the concept.", - ) - parser.add_argument( - "--initializer_token", type=str, default=None, required=True, help="A token to use as initializer word." - ) - parser.add_argument("--learnable_property", type=str, default="object", help="Choose between 'object' and 'style'") - parser.add_argument("--repeats", type=int, default=100, help="How many times to repeat the training data.") - parser.add_argument( - "--output_dir", - type=str, - default="text-inversion-model", - help="The output directory where the model predictions and checkpoints will be written.", - ) - parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") - parser.add_argument( - "--resolution", - type=int, - default=512, - help=( - "The resolution for input images, all the images in the train/validation dataset will be resized to this" - " resolution" - ), - ) - parser.add_argument( - "--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution." - ) - parser.add_argument( - "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." - ) - parser.add_argument("--num_train_epochs", type=int, default=100) - parser.add_argument( - "--max_train_steps", - type=int, - default=5000, - help="Total number of training steps to perform. If provided, overrides num_train_epochs.", - ) - parser.add_argument( - "--gradient_accumulation_steps", - type=int, - default=1, - help="Number of updates steps to accumulate before performing a backward/update pass.", - ) - parser.add_argument( - "--gradient_checkpointing", - action="store_true", - help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", - ) - parser.add_argument( - "--learning_rate", - type=float, - default=1e-4, - help="Initial learning rate (after the potential warmup period) to use.", - ) - parser.add_argument( - "--scale_lr", - action="store_true", - default=False, - help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", - ) - parser.add_argument( - "--lr_scheduler", - type=str, - default="constant", - help=( - 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' - ' "constant", "constant_with_warmup"]' - ), - ) - parser.add_argument( - "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." - ) - parser.add_argument( - "--dataloader_num_workers", - type=int, - default=0, - help=( - "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." - ), - ) - parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") - parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") - parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") - parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") - parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") - parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") - parser.add_argument( - "--hub_model_id", - type=str, - default=None, - help="The name of the repository to keep in sync with the local `output_dir`.", - ) - parser.add_argument( - "--logging_dir", - type=str, - default="logs", - help=( - "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" - " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." - ), - ) - parser.add_argument( - "--mixed_precision", - type=str, - default="no", - choices=["no", "fp16", "bf16"], - help=( - "Whether to use mixed precision. Choose" - "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." - "and an Nvidia Ampere GPU." - ), - ) - parser.add_argument( - "--allow_tf32", - action="store_true", - help=( - "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" - " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" - ), - ) - parser.add_argument( - "--report_to", - type=str, - default="tensorboard", - help=( - 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' - ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' - ), - ) - parser.add_argument( - "--validation_prompt", - type=str, - default=None, - help="A prompt that is used during validation to verify that the model is learning.", - ) - parser.add_argument( - "--num_validation_images", - type=int, - default=4, - help="Number of images that should be generated during validation with `validation_prompt`.", - ) - parser.add_argument( - "--validation_steps", - type=int, - default=100, - help=( - "Run validation every X steps. Validation consists of running the prompt" - " `args.validation_prompt` multiple times: `args.num_validation_images`" - " and logging the images." - ), - ) - parser.add_argument( - "--validation_epochs", - type=int, - default=None, - help=( - "Deprecated in favor of validation_steps. Run validation every X epochs. Validation consists of running the prompt" - " `args.validation_prompt` multiple times: `args.num_validation_images`" - " and logging the images." - ), - ) - parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") - parser.add_argument( - "--checkpointing_steps", - type=int, - default=500, - help=( - "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming" - " training using `--resume_from_checkpoint`." - ), - ) - parser.add_argument( - "--checkpoints_total_limit", - type=int, - default=None, - help=( - "Max number of checkpoints to store. Passed as `total_limit` to the `Accelerator` `ProjectConfiguration`." - " See Accelerator::save_state https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.save_state" - " for more docs" - ), - ) - parser.add_argument( - "--resume_from_checkpoint", - type=str, - default=None, - help=( - "Whether training should be resumed from a previous checkpoint. Use a path saved by" - ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' - ), - ) - parser.add_argument( - "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." - ) - - args = parser.parse_args() - env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) - if env_local_rank != -1 and env_local_rank != args.local_rank: - args.local_rank = env_local_rank - - if args.train_data_dir is None: - raise ValueError("You must specify a train data directory.") - - return args - - -imagenet_templates_small = [ - "a photo of a {}", - "a rendering of a {}", - "a cropped photo of the {}", - "the photo of a {}", - "a photo of a clean {}", - "a photo of a dirty {}", - "a dark photo of the {}", - "a photo of my {}", - "a photo of the cool {}", - "a close-up photo of a {}", - "a bright photo of the {}", - "a cropped photo of a {}", - "a photo of the {}", - "a good photo of the {}", - "a photo of one {}", - "a close-up photo of the {}", - "a rendition of the {}", - "a photo of the clean {}", - "a rendition of a {}", - "a photo of a nice {}", - "a good photo of a {}", - "a photo of the nice {}", - "a photo of the small {}", - "a photo of the weird {}", - "a photo of the large {}", - "a photo of a cool {}", - "a photo of a small {}", -] - -imagenet_style_templates_small = [ - "a painting in the style of {}", - "a rendering in the style of {}", - "a cropped painting in the style of {}", - "the painting in the style of {}", - "a clean painting in the style of {}", - "a dirty painting in the style of {}", - "a dark painting in the style of {}", - "a picture in the style of {}", - "a cool painting in the style of {}", - "a close-up painting in the style of {}", - "a bright painting in the style of {}", - "a cropped painting in the style of {}", - "a good painting in the style of {}", - "a close-up painting in the style of {}", - "a rendition in the style of {}", - "a nice painting in the style of {}", - "a small painting in the style of {}", - "a weird painting in the style of {}", - "a large painting in the style of {}", -] - - -class TextualInversionDataset(Dataset): - def __init__( - self, - data_root, - tokenizer, - learnable_property="object", # [object, style] - size=512, - repeats=100, - interpolation="bicubic", - flip_p=0.5, - set="train", - placeholder_token="*", - center_crop=False, - ): - self.data_root = data_root - self.tokenizer = tokenizer - self.learnable_property = learnable_property - self.size = size - self.placeholder_token = placeholder_token - self.center_crop = center_crop - self.flip_p = flip_p - - self.image_paths = [os.path.join(self.data_root, file_path) for file_path in os.listdir(self.data_root)] - - self.num_images = len(self.image_paths) - self._length = self.num_images - - if set == "train": - self._length = self.num_images * repeats - - self.interpolation = { - "linear": PIL_INTERPOLATION["linear"], - "bilinear": PIL_INTERPOLATION["bilinear"], - "bicubic": PIL_INTERPOLATION["bicubic"], - "lanczos": PIL_INTERPOLATION["lanczos"], - }[interpolation] - - self.templates = imagenet_style_templates_small if learnable_property == "style" else imagenet_templates_small - self.flip_transform = transforms.RandomHorizontalFlip(p=self.flip_p) - - def __len__(self): - return self._length - - def __getitem__(self, i): - example = {} - image = Image.open(self.image_paths[i % self.num_images]) - - if not image.mode == "RGB": - image = image.convert("RGB") - - placeholder_string = self.placeholder_token - text = random.choice(self.templates).format(placeholder_string) - - example["input_ids"] = self.tokenizer( - text, - padding="max_length", - truncation=True, - max_length=self.tokenizer.model_max_length, - return_tensors="pt", - ).input_ids[0] - - # default to score-sde preprocessing - img = np.array(image).astype(np.uint8) - - if self.center_crop: - crop = min(img.shape[0], img.shape[1]) - ( - h, - w, - ) = ( - img.shape[0], - img.shape[1], - ) - img = img[(h - crop) // 2 : (h + crop) // 2, (w - crop) // 2 : (w + crop) // 2] - - image = Image.fromarray(img) - image = image.resize((self.size, self.size), resample=self.interpolation) - - image = self.flip_transform(image) - image = np.array(image).astype(np.uint8) - image = (image / 127.5 - 1.0).astype(np.float32) - - example["pixel_values"] = torch.from_numpy(image).permute(2, 0, 1) - return example - - -def main(): - args = parse_args() - logging_dir = os.path.join(args.output_dir, args.logging_dir) - - accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit) - - accelerator = Accelerator( - gradient_accumulation_steps=args.gradient_accumulation_steps, - mixed_precision=args.mixed_precision, - log_with=args.report_to, - logging_dir=logging_dir, - project_config=accelerator_project_config, - ) - - if args.report_to == "wandb": - if not is_wandb_available(): - raise ImportError("Make sure to install wandb if you want to use it for logging during training.") - - # Make one log on every process with the configuration for debugging. - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - level=logging.INFO, - ) - logger.info(accelerator.state, main_process_only=False) - if accelerator.is_local_main_process: - transformers.utils.logging.set_verbosity_warning() - diffusers.utils.logging.set_verbosity_info() - else: - transformers.utils.logging.set_verbosity_error() - diffusers.utils.logging.set_verbosity_error() - - # If passed along, set the training seed now. - if args.seed is not None: - set_seed(args.seed) - - # Handle the repository creation - if accelerator.is_main_process: - if args.output_dir is not None: - os.makedirs(args.output_dir, exist_ok=True) - - if args.push_to_hub: - repo_id = create_repo( - repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token - ).repo_id - - # Load tokenizer - if args.tokenizer_name: - tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name) - elif args.pretrained_model_name_or_path: - tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer") - - # Load scheduler and models - noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") - text_encoder = CLIPTextModel.from_pretrained( - args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision - ) - vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision) - unet = UNet2DConditionModel.from_pretrained( - args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision - ) - - # Add the placeholder token in tokenizer - num_added_tokens = tokenizer.add_tokens(args.placeholder_token) - if num_added_tokens == 0: - raise ValueError( - f"The tokenizer already contains the token {args.placeholder_token}. Please pass a different" - " `placeholder_token` that is not already in the tokenizer." - ) - - # Convert the initializer_token, placeholder_token to ids - token_ids = tokenizer.encode(args.initializer_token, add_special_tokens=False) - # Check if initializer_token is a single token or a sequence of tokens - if len(token_ids) > 1: - raise ValueError("The initializer token must be a single token.") - - initializer_token_id = token_ids[0] - placeholder_token_id = tokenizer.convert_tokens_to_ids(args.placeholder_token) - - # Resize the token embeddings as we are adding new special tokens to the tokenizer - text_encoder.resize_token_embeddings(len(tokenizer)) - - # Initialise the newly added placeholder token with the embeddings of the initializer token - token_embeds = text_encoder.get_input_embeddings().weight.data - token_embeds[placeholder_token_id] = token_embeds[initializer_token_id] - - # Freeze vae and unet - vae.requires_grad_(False) - unet.requires_grad_(False) - # Freeze all parameters except for the token embeddings in text encoder - text_encoder.text_model.encoder.requires_grad_(False) - text_encoder.text_model.final_layer_norm.requires_grad_(False) - text_encoder.text_model.embeddings.position_embedding.requires_grad_(False) - - if args.gradient_checkpointing: - # Keep unet in train mode if we are using gradient checkpointing to save memory. - # The dropout cannot be != 0 so it doesn't matter if we are in eval or train mode. - unet.train() - text_encoder.gradient_checkpointing_enable() - unet.enable_gradient_checkpointing() - - if args.enable_xformers_memory_efficient_attention: - if is_xformers_available(): - import xformers - - xformers_version = version.parse(xformers.__version__) - if xformers_version == version.parse("0.0.16"): - logger.warn( - "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." - ) - unet.enable_xformers_memory_efficient_attention() - else: - raise ValueError("xformers is not available. Make sure it is installed correctly") - - # Enable TF32 for faster training on Ampere GPUs, - # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices - if args.allow_tf32: - torch.backends.cuda.matmul.allow_tf32 = True - - if args.scale_lr: - args.learning_rate = ( - args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes - ) - - # Initialize the optimizer - optimizer = torch.optim.AdamW( - text_encoder.get_input_embeddings().parameters(), # only optimize the embeddings - lr=args.learning_rate, - betas=(args.adam_beta1, args.adam_beta2), - weight_decay=args.adam_weight_decay, - eps=args.adam_epsilon, - ) - - # Dataset and DataLoaders creation: - train_dataset = TextualInversionDataset( - data_root=args.train_data_dir, - tokenizer=tokenizer, - size=args.resolution, - placeholder_token=args.placeholder_token, - repeats=args.repeats, - learnable_property=args.learnable_property, - center_crop=args.center_crop, - set="train", - ) - train_dataloader = torch.utils.data.DataLoader( - train_dataset, batch_size=args.train_batch_size, shuffle=True, num_workers=args.dataloader_num_workers - ) - if args.validation_epochs is not None: - warnings.warn( - f"FutureWarning: You are doing logging with validation_epochs={args.validation_epochs}." - " Deprecated validation_epochs in favor of `validation_steps`" - f"Setting `args.validation_steps` to {args.validation_epochs * len(train_dataset)}", - FutureWarning, - stacklevel=2, - ) - args.validation_steps = args.validation_epochs * len(train_dataset) - - # Scheduler and math around the number of training steps. - overrode_max_train_steps = False - num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) - if args.max_train_steps is None: - args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch - overrode_max_train_steps = True - - lr_scheduler = get_scheduler( - args.lr_scheduler, - optimizer=optimizer, - num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps, - num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, - ) - - # Prepare everything with our `accelerator`. - text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( - text_encoder, optimizer, train_dataloader, lr_scheduler - ) - - # For mixed precision training we cast the unet and vae weights to half-precision - # as these models are only used for inference, keeping weights in full precision is not required. - weight_dtype = torch.float32 - if accelerator.mixed_precision == "fp16": - weight_dtype = torch.float16 - elif accelerator.mixed_precision == "bf16": - weight_dtype = torch.bfloat16 - - # Move vae and unet to device and cast to weight_dtype - unet.to(accelerator.device, dtype=weight_dtype) - vae.to(accelerator.device, dtype=weight_dtype) - - # We need to recalculate our total training steps as the size of the training dataloader may have changed. - num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) - if overrode_max_train_steps: - args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch - # Afterwards we recalculate our number of training epochs - args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) - - # We need to initialize the trackers we use, and also store our configuration. - # The trackers initializes automatically on the main process. - if accelerator.is_main_process: - accelerator.init_trackers("textual_inversion", config=vars(args)) - - # Train! - total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps - - logger.info("***** Running training *****") - logger.info(f" Num examples = {len(train_dataset)}") - logger.info(f" Num Epochs = {args.num_train_epochs}") - logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") - logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") - logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") - logger.info(f" Total optimization steps = {args.max_train_steps}") - global_step = 0 - first_epoch = 0 - # Potentially load in the weights and states from a previous save - if args.resume_from_checkpoint: - if args.resume_from_checkpoint != "latest": - path = os.path.basename(args.resume_from_checkpoint) - else: - # Get the most recent checkpoint - dirs = os.listdir(args.output_dir) - dirs = [d for d in dirs if d.startswith("checkpoint")] - dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) - path = dirs[-1] if len(dirs) > 0 else None - - if path is None: - accelerator.print( - f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." - ) - args.resume_from_checkpoint = None - else: - accelerator.print(f"Resuming from checkpoint {path}") - accelerator.load_state(os.path.join(args.output_dir, path)) - global_step = int(path.split("-")[1]) - - resume_global_step = global_step * args.gradient_accumulation_steps - first_epoch = global_step // num_update_steps_per_epoch - resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps) - - # Only show the progress bar once on each machine. - progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process) - progress_bar.set_description("Steps") - - # keep original embeddings as reference - orig_embeds_params = accelerator.unwrap_model(text_encoder).get_input_embeddings().weight.data.clone() - - for epoch in range(first_epoch, args.num_train_epochs): - text_encoder.train() - for step, batch in enumerate(train_dataloader): - # Skip steps until we reach the resumed step - if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step: - if step % args.gradient_accumulation_steps == 0: - progress_bar.update(1) - continue - - with accelerator.accumulate(text_encoder): - # Convert images to latent space - latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample().detach() - latents = latents * vae.config.scaling_factor - - # Sample noise that we'll add to the latents - noise = torch.randn_like(latents) - bsz = latents.shape[0] - # Sample a random timestep for each image - timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device) - timesteps = timesteps.long() - - # Add noise to the latents according to the noise magnitude at each timestep - # (this is the forward diffusion process) - noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) - - # Get the text embedding for conditioning - encoder_hidden_states = text_encoder(batch["input_ids"])[0].to(dtype=weight_dtype) - - # Predict the noise residual - model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample - - # Get the target for loss depending on the prediction type - if noise_scheduler.config.prediction_type == "epsilon": - target = noise - elif noise_scheduler.config.prediction_type == "v_prediction": - target = noise_scheduler.get_velocity(latents, noise, timesteps) - else: - raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") - - loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") - - accelerator.backward(loss) - - optimizer.step() - lr_scheduler.step() - optimizer.zero_grad() - - # Let's make sure we don't update any embedding weights besides the newly added token - index_no_updates = torch.arange(len(tokenizer)) != placeholder_token_id - with torch.no_grad(): - accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[ - index_no_updates - ] = orig_embeds_params[index_no_updates] - - # Checks if the accelerator has performed an optimization step behind the scenes - if accelerator.sync_gradients: - progress_bar.update(1) - global_step += 1 - if global_step % args.save_steps == 0: - save_path = os.path.join(args.output_dir, f"learned_embeds-steps-{global_step}.bin") - save_progress(text_encoder, placeholder_token_id, accelerator, args, save_path) - - if accelerator.is_main_process: - if global_step % args.checkpointing_steps == 0: - save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") - accelerator.save_state(save_path) - logger.info(f"Saved state to {save_path}") - - if args.validation_prompt is not None and global_step % args.validation_steps == 0: - log_validation(text_encoder, tokenizer, unet, vae, args, accelerator, weight_dtype, epoch) - - logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} - progress_bar.set_postfix(**logs) - accelerator.log(logs, step=global_step) - - if global_step >= args.max_train_steps: - break - # Create the pipeline using using the trained modules and save it. - accelerator.wait_for_everyone() - if accelerator.is_main_process: - if args.push_to_hub and args.only_save_embeds: - logger.warn("Enabling full model saving because --push_to_hub=True was specified.") - save_full_model = True - else: - save_full_model = not args.only_save_embeds - if save_full_model: - pipeline = StableDiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, - text_encoder=accelerator.unwrap_model(text_encoder), - vae=vae, - unet=unet, - tokenizer=tokenizer, - ) - pipeline.save_pretrained(args.output_dir) - # Save the newly trained embeddings - save_path = os.path.join(args.output_dir, "learned_embeds.bin") - save_progress(text_encoder, placeholder_token_id, accelerator, args, save_path) - - if args.push_to_hub: - upload_folder( - repo_id=repo_id, - folder_path=args.output_dir, - commit_message="End of training", - ignore_patterns=["step_*", "epoch_*"], - ) - - accelerator.end_training() - - -if __name__ == "__main__": - main() diff --git a/spaces/declare-lab/tango/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py b/spaces/declare-lab/tango/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py deleted file mode 100644 index a0befdae73c4d1c152c0e5180d78d6e708aadc48..0000000000000000000000000000000000000000 --- a/spaces/declare-lab/tango/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +++ /dev/null @@ -1,762 +0,0 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import inspect -from typing import Any, Callable, Dict, List, Optional, Union - -import numpy as np -import PIL -import torch -from packaging import version -from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer - -from ...configuration_utils import FrozenDict -from ...image_processor import VaeImageProcessor -from ...loaders import TextualInversionLoaderMixin -from ...models import AutoencoderKL, UNet2DConditionModel -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import ( - PIL_INTERPOLATION, - deprecate, - is_accelerate_available, - is_accelerate_version, - logging, - randn_tensor, - replace_example_docstring, -) -from ..pipeline_utils import DiffusionPipeline -from . import StableDiffusionPipelineOutput -from .safety_checker import StableDiffusionSafetyChecker - - -logger = logging.get_logger(__name__) # pylint: disable=invalid-name - -EXAMPLE_DOC_STRING = """ - Examples: - ```py - >>> import requests - >>> import torch - >>> from PIL import Image - >>> from io import BytesIO - - >>> from diffusers import StableDiffusionImg2ImgPipeline - - >>> device = "cuda" - >>> model_id_or_path = "runwayml/stable-diffusion-v1-5" - >>> pipe = StableDiffusionImg2ImgPipeline.from_pretrained(model_id_or_path, torch_dtype=torch.float16) - >>> pipe = pipe.to(device) - - >>> url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" - - >>> response = requests.get(url) - >>> init_image = Image.open(BytesIO(response.content)).convert("RGB") - >>> init_image = init_image.resize((768, 512)) - - >>> prompt = "A fantasy landscape, trending on artstation" - - >>> images = pipe(prompt=prompt, image=init_image, strength=0.75, guidance_scale=7.5).images - >>> images[0].save("fantasy_landscape.png") - ``` -""" - - -def preprocess(image): - if isinstance(image, torch.Tensor): - return image - elif isinstance(image, PIL.Image.Image): - image = [image] - - if isinstance(image[0], PIL.Image.Image): - w, h = image[0].size - w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 - - image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] - image = np.concatenate(image, axis=0) - image = np.array(image).astype(np.float32) / 255.0 - image = image.transpose(0, 3, 1, 2) - image = 2.0 * image - 1.0 - image = torch.from_numpy(image) - elif isinstance(image[0], torch.Tensor): - image = torch.cat(image, dim=0) - return image - - -class StableDiffusionImg2ImgPipeline(DiffusionPipeline, TextualInversionLoaderMixin): - r""" - Pipeline for text-guided image to image generation using Stable Diffusion. - - This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the - library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) - - Args: - vae ([`AutoencoderKL`]): - Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. - text_encoder ([`CLIPTextModel`]): - Frozen text-encoder. Stable Diffusion uses the text portion of - [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically - the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. - tokenizer (`CLIPTokenizer`): - Tokenizer of class - [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). - unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. - scheduler ([`SchedulerMixin`]): - A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of - [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. - safety_checker ([`StableDiffusionSafetyChecker`]): - Classification module that estimates whether generated images could be considered offensive or harmful. - Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. - feature_extractor ([`CLIPImageProcessor`]): - Model that extracts features from generated images to be used as inputs for the `safety_checker`. - """ - _optional_components = ["safety_checker", "feature_extractor"] - - def __init__( - self, - vae: AutoencoderKL, - text_encoder: CLIPTextModel, - tokenizer: CLIPTokenizer, - unet: UNet2DConditionModel, - scheduler: KarrasDiffusionSchedulers, - safety_checker: StableDiffusionSafetyChecker, - feature_extractor: CLIPImageProcessor, - requires_safety_checker: bool = True, - ): - super().__init__() - - if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: - deprecation_message = ( - f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" - f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " - "to update the config accordingly as leaving `steps_offset` might led to incorrect results" - " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," - " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" - " file" - ) - deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) - new_config = dict(scheduler.config) - new_config["steps_offset"] = 1 - scheduler._internal_dict = FrozenDict(new_config) - - if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: - deprecation_message = ( - f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." - " `clip_sample` should be set to False in the configuration file. Please make sure to update the" - " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" - " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" - " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" - ) - deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) - new_config = dict(scheduler.config) - new_config["clip_sample"] = False - scheduler._internal_dict = FrozenDict(new_config) - - if safety_checker is None and requires_safety_checker: - logger.warning( - f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" - " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" - " results in services or applications open to the public. Both the diffusers team and Hugging Face" - " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" - " it only for use-cases that involve analyzing network behavior or auditing its results. For more" - " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." - ) - - if safety_checker is not None and feature_extractor is None: - raise ValueError( - "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" - " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." - ) - - is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( - version.parse(unet.config._diffusers_version).base_version - ) < version.parse("0.9.0.dev0") - is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 - if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: - deprecation_message = ( - "The configuration file of the unet has set the default `sample_size` to smaller than" - " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" - " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" - " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" - " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" - " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" - " in the config might lead to incorrect results in future versions. If you have downloaded this" - " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" - " the `unet/config.json` file" - ) - deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) - new_config = dict(unet.config) - new_config["sample_size"] = 64 - unet._internal_dict = FrozenDict(new_config) - self.register_modules( - vae=vae, - text_encoder=text_encoder, - tokenizer=tokenizer, - unet=unet, - scheduler=scheduler, - safety_checker=safety_checker, - feature_extractor=feature_extractor, - ) - self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) - - self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) - self.register_to_config( - requires_safety_checker=requires_safety_checker, - ) - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_sequential_cpu_offload - def enable_sequential_cpu_offload(self, gpu_id=0): - r""" - Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, - text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a - `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called. - Note that offloading happens on a submodule basis. Memory savings are higher than with - `enable_model_cpu_offload`, but performance is lower. - """ - if is_accelerate_available() and is_accelerate_version(">=", "0.14.0"): - from accelerate import cpu_offload - else: - raise ImportError("`enable_sequential_cpu_offload` requires `accelerate v0.14.0` or higher") - - device = torch.device(f"cuda:{gpu_id}") - - if self.device.type != "cpu": - self.to("cpu", silence_dtype_warnings=True) - torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) - - for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: - cpu_offload(cpu_offloaded_model, device) - - if self.safety_checker is not None: - cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True) - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_model_cpu_offload - def enable_model_cpu_offload(self, gpu_id=0): - r""" - Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared - to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` - method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with - `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`. - """ - if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"): - from accelerate import cpu_offload_with_hook - else: - raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.") - - device = torch.device(f"cuda:{gpu_id}") - - if self.device.type != "cpu": - self.to("cpu", silence_dtype_warnings=True) - torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) - - hook = None - for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]: - _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook) - - if self.safety_checker is not None: - _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook) - - # We'll offload the last model manually. - self.final_offload_hook = hook - - @property - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device - def _execution_device(self): - r""" - Returns the device on which the pipeline's models will be executed. After calling - `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module - hooks. - """ - if not hasattr(self.unet, "_hf_hook"): - return self.device - for module in self.unet.modules(): - if ( - hasattr(module, "_hf_hook") - and hasattr(module._hf_hook, "execution_device") - and module._hf_hook.execution_device is not None - ): - return torch.device(module._hf_hook.execution_device) - return self.device - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt - def _encode_prompt( - self, - prompt, - device, - num_images_per_prompt, - do_classifier_free_guidance, - negative_prompt=None, - prompt_embeds: Optional[torch.FloatTensor] = None, - negative_prompt_embeds: Optional[torch.FloatTensor] = None, - ): - r""" - Encodes the prompt into text encoder hidden states. - - Args: - prompt (`str` or `List[str]`, *optional*): - prompt to be encoded - device: (`torch.device`): - torch device - num_images_per_prompt (`int`): - number of images that should be generated per prompt - do_classifier_free_guidance (`bool`): - whether to use classifier free guidance or not - negative_prompt (`str` or `List[str]`, *optional*): - The prompt or prompts not to guide the image generation. If not defined, one has to pass - `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is - less than `1`). - prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not - provided, text embeddings will be generated from `prompt` input argument. - negative_prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt - weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input - argument. - """ - if prompt is not None and isinstance(prompt, str): - batch_size = 1 - elif prompt is not None and isinstance(prompt, list): - batch_size = len(prompt) - else: - batch_size = prompt_embeds.shape[0] - - if prompt_embeds is None: - # textual inversion: procecss multi-vector tokens if necessary - if isinstance(self, TextualInversionLoaderMixin): - prompt = self.maybe_convert_prompt(prompt, self.tokenizer) - - text_inputs = self.tokenizer( - prompt, - padding="max_length", - max_length=self.tokenizer.model_max_length, - truncation=True, - return_tensors="pt", - ) - text_input_ids = text_inputs.input_ids - untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids - - if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( - text_input_ids, untruncated_ids - ): - removed_text = self.tokenizer.batch_decode( - untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] - ) - logger.warning( - "The following part of your input was truncated because CLIP can only handle sequences up to" - f" {self.tokenizer.model_max_length} tokens: {removed_text}" - ) - - if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: - attention_mask = text_inputs.attention_mask.to(device) - else: - attention_mask = None - - prompt_embeds = self.text_encoder( - text_input_ids.to(device), - attention_mask=attention_mask, - ) - prompt_embeds = prompt_embeds[0] - - prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) - - bs_embed, seq_len, _ = prompt_embeds.shape - # duplicate text embeddings for each generation per prompt, using mps friendly method - prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) - prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) - - # get unconditional embeddings for classifier free guidance - if do_classifier_free_guidance and negative_prompt_embeds is None: - uncond_tokens: List[str] - if negative_prompt is None: - uncond_tokens = [""] * batch_size - elif type(prompt) is not type(negative_prompt): - raise TypeError( - f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" - f" {type(prompt)}." - ) - elif isinstance(negative_prompt, str): - uncond_tokens = [negative_prompt] - elif batch_size != len(negative_prompt): - raise ValueError( - f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" - f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" - " the batch size of `prompt`." - ) - else: - uncond_tokens = negative_prompt - - # textual inversion: procecss multi-vector tokens if necessary - if isinstance(self, TextualInversionLoaderMixin): - uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) - - max_length = prompt_embeds.shape[1] - uncond_input = self.tokenizer( - uncond_tokens, - padding="max_length", - max_length=max_length, - truncation=True, - return_tensors="pt", - ) - - if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: - attention_mask = uncond_input.attention_mask.to(device) - else: - attention_mask = None - - negative_prompt_embeds = self.text_encoder( - uncond_input.input_ids.to(device), - attention_mask=attention_mask, - ) - negative_prompt_embeds = negative_prompt_embeds[0] - - if do_classifier_free_guidance: - # duplicate unconditional embeddings for each generation per prompt, using mps friendly method - seq_len = negative_prompt_embeds.shape[1] - - negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) - - negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) - negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) - - # For classifier free guidance, we need to do two forward passes. - # Here we concatenate the unconditional and text embeddings into a single batch - # to avoid doing two forward passes - prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) - - return prompt_embeds - - def run_safety_checker(self, image, device, dtype): - feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") - safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) - image, has_nsfw_concept = self.safety_checker( - images=image, clip_input=safety_checker_input.pixel_values.to(dtype) - ) - return image, has_nsfw_concept - - def decode_latents(self, latents): - latents = 1 / self.vae.config.scaling_factor * latents - image = self.vae.decode(latents).sample - image = (image / 2 + 0.5).clamp(0, 1) - return image - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs - def prepare_extra_step_kwargs(self, generator, eta): - # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature - # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. - # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 - # and should be between [0, 1] - - accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) - extra_step_kwargs = {} - if accepts_eta: - extra_step_kwargs["eta"] = eta - - # check if the scheduler accepts generator - accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) - if accepts_generator: - extra_step_kwargs["generator"] = generator - return extra_step_kwargs - - def check_inputs( - self, prompt, strength, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None - ): - if strength < 0 or strength > 1: - raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") - - if (callback_steps is None) or ( - callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) - ): - raise ValueError( - f"`callback_steps` has to be a positive integer but is {callback_steps} of type" - f" {type(callback_steps)}." - ) - - if prompt is not None and prompt_embeds is not None: - raise ValueError( - f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" - " only forward one of the two." - ) - elif prompt is None and prompt_embeds is None: - raise ValueError( - "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." - ) - elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): - raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") - - if negative_prompt is not None and negative_prompt_embeds is not None: - raise ValueError( - f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" - f" {negative_prompt_embeds}. Please make sure to only forward one of the two." - ) - - if prompt_embeds is not None and negative_prompt_embeds is not None: - if prompt_embeds.shape != negative_prompt_embeds.shape: - raise ValueError( - "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" - f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" - f" {negative_prompt_embeds.shape}." - ) - - def get_timesteps(self, num_inference_steps, strength, device): - # get the original timestep using init_timestep - init_timestep = min(int(num_inference_steps * strength), num_inference_steps) - - t_start = max(num_inference_steps - init_timestep, 0) - timesteps = self.scheduler.timesteps[t_start:] - - return timesteps, num_inference_steps - t_start - - def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): - if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): - raise ValueError( - f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" - ) - - image = image.to(device=device, dtype=dtype) - - batch_size = batch_size * num_images_per_prompt - if isinstance(generator, list) and len(generator) != batch_size: - raise ValueError( - f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" - f" size of {batch_size}. Make sure the batch size matches the length of the generators." - ) - - if isinstance(generator, list): - init_latents = [ - self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size) - ] - init_latents = torch.cat(init_latents, dim=0) - else: - init_latents = self.vae.encode(image).latent_dist.sample(generator) - - init_latents = self.vae.config.scaling_factor * init_latents - - if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: - # expand init_latents for batch_size - deprecation_message = ( - f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial" - " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" - " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" - " your script to pass as many initial images as text prompts to suppress this warning." - ) - deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) - additional_image_per_prompt = batch_size // init_latents.shape[0] - init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) - elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: - raise ValueError( - f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." - ) - else: - init_latents = torch.cat([init_latents], dim=0) - - shape = init_latents.shape - noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) - - # get latents - init_latents = self.scheduler.add_noise(init_latents, noise, timestep) - latents = init_latents - - return latents - - @torch.no_grad() - @replace_example_docstring(EXAMPLE_DOC_STRING) - def __call__( - self, - prompt: Union[str, List[str]] = None, - image: Union[torch.FloatTensor, PIL.Image.Image] = None, - strength: float = 0.8, - num_inference_steps: Optional[int] = 50, - guidance_scale: Optional[float] = 7.5, - negative_prompt: Optional[Union[str, List[str]]] = None, - num_images_per_prompt: Optional[int] = 1, - eta: Optional[float] = 0.0, - generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, - prompt_embeds: Optional[torch.FloatTensor] = None, - negative_prompt_embeds: Optional[torch.FloatTensor] = None, - output_type: Optional[str] = "pil", - return_dict: bool = True, - callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, - callback_steps: int = 1, - cross_attention_kwargs: Optional[Dict[str, Any]] = None, - ): - r""" - Function invoked when calling the pipeline for generation. - - Args: - prompt (`str` or `List[str]`, *optional*): - The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. - instead. - image (`torch.FloatTensor` or `PIL.Image.Image`): - `Image`, or tensor representing an image batch, that will be used as the starting point for the - process. - strength (`float`, *optional*, defaults to 0.8): - Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` - will be used as a starting point, adding more noise to it the larger the `strength`. The number of - denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will - be maximum and the denoising process will run for the full number of iterations specified in - `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. - num_inference_steps (`int`, *optional*, defaults to 50): - The number of denoising steps. More denoising steps usually lead to a higher quality image at the - expense of slower inference. This parameter will be modulated by `strength`. - guidance_scale (`float`, *optional*, defaults to 7.5): - Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). - `guidance_scale` is defined as `w` of equation 2. of [Imagen - Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > - 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, - usually at the expense of lower image quality. - negative_prompt (`str` or `List[str]`, *optional*): - The prompt or prompts not to guide the image generation. If not defined, one has to pass - `negative_prompt_embeds`. instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` - is less than `1`). - num_images_per_prompt (`int`, *optional*, defaults to 1): - The number of images to generate per prompt. - eta (`float`, *optional*, defaults to 0.0): - Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to - [`schedulers.DDIMScheduler`], will be ignored for others. - generator (`torch.Generator`, *optional*): - One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) - to make generation deterministic. - prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not - provided, text embeddings will be generated from `prompt` input argument. - negative_prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt - weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input - argument. - output_type (`str`, *optional*, defaults to `"pil"`): - The output format of the generate image. Choose between - [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a - plain tuple. - callback (`Callable`, *optional*): - A function that will be called every `callback_steps` steps during inference. The function will be - called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. - callback_steps (`int`, *optional*, defaults to 1): - The frequency at which the `callback` function will be called. If not specified, the callback will be - called at every step. - cross_attention_kwargs (`dict`, *optional*): - A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under - `self.processor` in - [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py). - Examples: - - Returns: - [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: - [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. - When returning a tuple, the first element is a list with the generated images, and the second element is a - list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" - (nsfw) content, according to the `safety_checker`. - """ - # 1. Check inputs. Raise error if not correct - self.check_inputs(prompt, strength, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) - - # 2. Define call parameters - if prompt is not None and isinstance(prompt, str): - batch_size = 1 - elif prompt is not None and isinstance(prompt, list): - batch_size = len(prompt) - else: - batch_size = prompt_embeds.shape[0] - device = self._execution_device - # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) - # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` - # corresponds to doing no classifier free guidance. - do_classifier_free_guidance = guidance_scale > 1.0 - - # 3. Encode input prompt - prompt_embeds = self._encode_prompt( - prompt, - device, - num_images_per_prompt, - do_classifier_free_guidance, - negative_prompt, - prompt_embeds=prompt_embeds, - negative_prompt_embeds=negative_prompt_embeds, - ) - - # 4. Preprocess image - image = self.image_processor.preprocess(image) - - # 5. set timesteps - self.scheduler.set_timesteps(num_inference_steps, device=device) - timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) - latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) - - # 6. Prepare latent variables - latents = self.prepare_latents( - image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator - ) - - # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline - extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) - - # 8. Denoising loop - num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order - with self.progress_bar(total=num_inference_steps) as progress_bar: - for i, t in enumerate(timesteps): - # expand the latents if we are doing classifier free guidance - latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents - latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) - - # predict the noise residual - noise_pred = self.unet( - latent_model_input, - t, - encoder_hidden_states=prompt_embeds, - cross_attention_kwargs=cross_attention_kwargs, - ).sample - - # perform guidance - if do_classifier_free_guidance: - noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) - noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) - - # compute the previous noisy sample x_t -> x_t-1 - latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample - - # call the callback, if provided - if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): - progress_bar.update() - if callback is not None and i % callback_steps == 0: - callback(i, t, latents) - - if output_type not in ["latent", "pt", "np", "pil"]: - deprecation_message = ( - f"the output_type {output_type} is outdated. Please make sure to set it to one of these instead: " - "`pil`, `np`, `pt`, `latent`" - ) - deprecate("Unsupported output_type", "1.0.0", deprecation_message, standard_warn=False) - output_type = "np" - - if output_type == "latent": - image = latents - has_nsfw_concept = None - - else: - image = self.decode_latents(latents) - - if self.safety_checker is not None: - image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) - else: - has_nsfw_concept = False - - image = self.image_processor.postprocess(image, output_type=output_type) - - # Offload last model to CPU - if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: - self.final_offload_hook.offload() - - if not return_dict: - return (image, has_nsfw_concept) - - return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/spaces/deepwisdom/MetaGPT/metagpt/document_store/qdrant_store.py b/spaces/deepwisdom/MetaGPT/metagpt/document_store/qdrant_store.py deleted file mode 100644 index 98b82cf872ae0514487f88dbeadb7682da36f877..0000000000000000000000000000000000000000 --- a/spaces/deepwisdom/MetaGPT/metagpt/document_store/qdrant_store.py +++ /dev/null @@ -1,129 +0,0 @@ -from dataclasses import dataclass -from typing import List - -from qdrant_client import QdrantClient -from qdrant_client.models import Filter, PointStruct, VectorParams - -from metagpt.document_store.base_store import BaseStore - - -@dataclass -class QdrantConnection: - """ - Args: - url: qdrant url - host: qdrant host - port: qdrant port - memory: qdrant service use memory mode - api_key: qdrant cloud api_key - """ - url: str = None - host: str = None - port: int = None - memory: bool = False - api_key: str = None - - -class QdrantStore(BaseStore): - def __init__(self, connect: QdrantConnection): - if connect.memory: - self.client = QdrantClient(":memory:") - elif connect.url: - self.client = QdrantClient(url=connect.url, api_key=connect.api_key) - elif connect.host and connect.port: - self.client = QdrantClient( - host=connect.host, port=connect.port, api_key=connect.api_key - ) - else: - raise Exception("please check QdrantConnection.") - - def create_collection( - self, - collection_name: str, - vectors_config: VectorParams, - force_recreate=False, - **kwargs, - ): - """ - create a collection - Args: - collection_name: collection name - vectors_config: VectorParams object,detail in https://github.com/qdrant/qdrant-client - force_recreate: default is False, if True, will delete exists collection,then create it - **kwargs: - - Returns: - - """ - try: - self.client.get_collection(collection_name) - if force_recreate: - res = self.client.recreate_collection( - collection_name, vectors_config=vectors_config, **kwargs - ) - return res - return True - except: # noqa: E722 - return self.client.recreate_collection( - collection_name, vectors_config=vectors_config, **kwargs - ) - - def has_collection(self, collection_name: str): - try: - self.client.get_collection(collection_name) - return True - except: # noqa: E722 - return False - - def delete_collection(self, collection_name: str, timeout=60): - res = self.client.delete_collection(collection_name, timeout=timeout) - if not res: - raise Exception(f"Delete collection {collection_name} failed.") - - def add(self, collection_name: str, points: List[PointStruct]): - """ - add some vector data to qdrant - Args: - collection_name: collection name - points: list of PointStruct object, about PointStruct detail in https://github.com/qdrant/qdrant-client - - Returns: NoneX - - """ - # self.client.upload_records() - self.client.upsert( - collection_name, - points, - ) - - def search( - self, - collection_name: str, - query: List[float], - query_filter: Filter = None, - k=10, - return_vector=False, - ): - """ - vector search - Args: - collection_name: qdrant collection name - query: input vector - query_filter: Filter object, detail in https://github.com/qdrant/qdrant-client - k: return the most similar k pieces of data - return_vector: whether return vector - - Returns: list of dict - - """ - hits = self.client.search( - collection_name=collection_name, - query_vector=query, - query_filter=query_filter, - limit=k, - with_vectors=return_vector, - ) - return [hit.__dict__ for hit in hits] - - def write(self, *args, **kwargs): - pass diff --git a/spaces/diacanFperku/AutoGPT/Bypassing Kaspersky Endpoint Security 11.md b/spaces/diacanFperku/AutoGPT/Bypassing Kaspersky Endpoint Security 11.md deleted file mode 100644 index 23dbf6eacbd00b4b0766c2f46ae624c599e45e96..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Bypassing Kaspersky Endpoint Security 11.md +++ /dev/null @@ -1,33 +0,0 @@ -
      -

      How to Bypass Kaspersky Endpoint Security 11 for Windows

      -

      Kaspersky Endpoint Security 11 for Windows is a comprehensive security solution that protects your business from cyberthreats. It offers advanced features such as anti-malware, firewall, encryption, application control, device control, web control, patch management, vulnerability assessment, and more.

      -

      Bypassing Kaspersky Endpoint Security 11


      DOWNLOADhttps://gohhs.com/2uFTiw



      -

      However, sometimes you may need to bypass Kaspersky Endpoint Security 11 for Windows for various reasons. For example, you may want to install a third-party security software that is incompatible with Kaspersky Endpoint Security 11 for Windows, or you may want to perform some troubleshooting tasks that require disabling Kaspersky Endpoint Security 11 for Windows temporarily.

      -

      In this article, we will show you how to bypass Kaspersky Endpoint Security 11 for Windows in two ways: by skipping the check for incompatible software during the installation process, and by stopping incompatible applications from being removed automatically.

      -

      How to Skip the Check for Incompatible Software During the Installation Process

      -

      Before installation, Kaspersky Endpoint Security 11 for Windows performs a search for incompatible software. If incompatible software is detected, the installation stops. To resume the installation, you should remove the detected software.

      -

      If you are sure that the detected application will not cause any compatibility issues, you can skip the check for incompatible software by following these steps:

      -
        -
      1. Download the latest version of Kaspersky Endpoint Security 11 for Windows from here.
      2. -
      3. Run the installation file as an administrator.
      4. -
      5. On the Welcome screen, click Next.
      6. -
      7. On the License Agreement screen, read the agreement and select I accept the terms of the license agreement. Click Next.
      8. -
      9. On the Installation Options screen, select Custom installation. Click Next.
      10. -
      11. On the Custom Installation screen, click Advanced settings.
      12. -
      13. On the Advanced Settings screen, select Skip check for incompatible software. Click OK.
      14. -
      15. Click Next to continue with the installation process.
      16. -
      -

      How to Stop Incompatible Applications from Being Removed Automatically

      -

      If Kaspersky Endpoint Security 11 for Windows is already installed on your computer and you want to install an additional third-party security solution, read the information about its compatibility with security software from other vendors before installing it.

      -

      -

      If you install an incompatible application, Kaspersky Endpoint Security 11 for Windows may remove it automatically or prompt you to remove it manually. To prevent this from happening, you can stop incompatible applications from being removed automatically by following these steps:

      -
        -
      1. Open Kaspersky Endpoint Security 11 for Windows.
      2. -
      3. Click Settings in the lower-left corner of the main window.
      4. -
      5. Select General Settings in the left pane.
      6. -
      7. In the right pane, scroll down to Incompatible Software Detection and Removal section.
      8. -
      9. Clear the checkbox next to Remove incompatible applications automatically without user confirmation. Click Save.
      10. -
      -

      Note that this option does not affect the detection of incompatible software. Kaspersky Endpoint Security 11 for Windows will still notify you about any incompatible applications on your computer. You can decide whether to keep them or remove them manually.

      d5da3c52bf
      -
      -
      \ No newline at end of file diff --git a/spaces/diagaiwei/ir_chinese_medqa/app.py b/spaces/diagaiwei/ir_chinese_medqa/app.py deleted file mode 100644 index 4e3357e4132b477b6db3e4dab04611db695f0c9b..0000000000000000000000000000000000000000 --- a/spaces/diagaiwei/ir_chinese_medqa/app.py +++ /dev/null @@ -1,62 +0,0 @@ -import random -import gradio as gr -from colbert.data import Queries -from colbert.infra import Run, RunConfig, ColBERTConfig -from colbert import Searcher - - -# def init(): -searcher = None -with Run().context(RunConfig(nranks=1, experiment="medqa")): - config = ColBERTConfig( - root="./experiments", - ) - searcher = Searcher(index="medqa_idx", config=config) - - - -def search(query): - results = searcher.search(query, k=5) - responses=[] - # idx = 0 - for passage_id, _, _ in zip(*results): - responses.append(searcher.collection[passage_id]) - # idx = idx+1 - return responses - - - - - -def chat(question): - # history = history or [] - # message = message.lower() - - # if message.startswith("how many"): - # response = random.randint(1, 10) - # elif message.startswith("how"): - # response = random.choice(["Great", "Good", "Okay", "Bad"]) - # elif message.startswith("where"): - # response = random.choice(["Here", "There", "Somewhere"]) - # else: - # response = "I don't know" - responses = search(question) - # history.append((message, response)) - return responses - - -title = "基于ColBERT的中文健康问题QA模型" -description = "用中文输入健康问题,比如 '高血压吃什么药物?', 程序返回5条跟问题最相关的回答。" - - - -chatbot = gr.Chatbot().style(color_map=("green", "pink")) -demo = gr.Interface( - chat, - inputs=gr.Textbox(lines=2, placeholder="输入你的问题"), - title = title, - description=description, - outputs =["text", "text","text","text","text"] -) -if __name__ == "__main__": - demo.launch() diff --git a/spaces/diego2554/RemBG_super/rembg/commands/s_command.py b/spaces/diego2554/RemBG_super/rembg/commands/s_command.py deleted file mode 100644 index 4fba1ce3e71e0e55fd2d9d81a4b35b5637c1be37..0000000000000000000000000000000000000000 --- a/spaces/diego2554/RemBG_super/rembg/commands/s_command.py +++ /dev/null @@ -1,285 +0,0 @@ -import json -import os -import webbrowser -from typing import Optional, Tuple, cast - -import aiohttp -import click -import gradio as gr -import uvicorn -from asyncer import asyncify -from fastapi import Depends, FastAPI, File, Form, Query -from fastapi.middleware.cors import CORSMiddleware -from starlette.responses import Response - -from .._version import get_versions -from ..bg import remove -from ..session_factory import new_session -from ..sessions import sessions_names -from ..sessions.base import BaseSession - - -@click.command( - name="s", - help="for a http server", -) -@click.option( - "-p", - "--port", - default=5000, - type=int, - show_default=True, - help="port", -) -@click.option( - "-l", - "--log_level", - default="info", - type=str, - show_default=True, - help="log level", -) -@click.option( - "-t", - "--threads", - default=None, - type=int, - show_default=True, - help="number of worker threads", -) -def s_command(port: int, log_level: str, threads: int) -> None: - sessions: dict[str, BaseSession] = {} - tags_metadata = [ - { - "name": "Background Removal", - "description": "Endpoints that perform background removal with different image sources.", - "externalDocs": { - "description": "GitHub Source", - "url": "https://github.com/danielgatis/rembg", - }, - }, - ] - app = FastAPI( - title="Rembg", - description="Rembg is a tool to remove images background. That is it.", - version=get_versions()["version"], - contact={ - "name": "Daniel Gatis", - "url": "https://github.com/danielgatis", - "email": "danielgatis@gmail.com", - }, - license_info={ - "name": "MIT License", - "url": "https://github.com/danielgatis/rembg/blob/main/LICENSE.txt", - }, - openapi_tags=tags_metadata, - docs_url="/api", - ) - - app.add_middleware( - CORSMiddleware, - allow_credentials=True, - allow_origins=["*"], - allow_methods=["*"], - allow_headers=["*"], - ) - - class CommonQueryParams: - def __init__( - self, - model: str = Query( - description="Model to use when processing image", - regex=r"(" + "|".join(sessions_names) + ")", - default="u2net", - ), - a: bool = Query(default=False, description="Enable Alpha Matting"), - af: int = Query( - default=240, - ge=0, - le=255, - description="Alpha Matting (Foreground Threshold)", - ), - ab: int = Query( - default=10, - ge=0, - le=255, - description="Alpha Matting (Background Threshold)", - ), - ae: int = Query( - default=10, ge=0, description="Alpha Matting (Erode Structure Size)" - ), - om: bool = Query(default=False, description="Only Mask"), - ppm: bool = Query(default=False, description="Post Process Mask"), - bgc: Optional[str] = Query(default=None, description="Background Color"), - extras: Optional[str] = Query( - default=None, description="Extra parameters as JSON" - ), - ): - self.model = model - self.a = a - self.af = af - self.ab = ab - self.ae = ae - self.om = om - self.ppm = ppm - self.extras = extras - self.bgc = ( - cast(Tuple[int, int, int, int], tuple(map(int, bgc.split(",")))) - if bgc - else None - ) - - class CommonQueryPostParams: - def __init__( - self, - model: str = Form( - description="Model to use when processing image", - regex=r"(" + "|".join(sessions_names) + ")", - default="u2net", - ), - a: bool = Form(default=False, description="Enable Alpha Matting"), - af: int = Form( - default=240, - ge=0, - le=255, - description="Alpha Matting (Foreground Threshold)", - ), - ab: int = Form( - default=10, - ge=0, - le=255, - description="Alpha Matting (Background Threshold)", - ), - ae: int = Form( - default=10, ge=0, description="Alpha Matting (Erode Structure Size)" - ), - om: bool = Form(default=False, description="Only Mask"), - ppm: bool = Form(default=False, description="Post Process Mask"), - bgc: Optional[str] = Query(default=None, description="Background Color"), - extras: Optional[str] = Query( - default=None, description="Extra parameters as JSON" - ), - ): - self.model = model - self.a = a - self.af = af - self.ab = ab - self.ae = ae - self.om = om - self.ppm = ppm - self.extras = extras - self.bgc = ( - cast(Tuple[int, int, int, int], tuple(map(int, bgc.split(",")))) - if bgc - else None - ) - - def im_without_bg(content: bytes, commons: CommonQueryParams) -> Response: - kwargs = {} - - if commons.extras: - try: - kwargs.update(json.loads(commons.extras)) - except Exception: - pass - - return Response( - remove( - content, - session=sessions.setdefault(commons.model, new_session(commons.model)), - alpha_matting=commons.a, - alpha_matting_foreground_threshold=commons.af, - alpha_matting_background_threshold=commons.ab, - alpha_matting_erode_size=commons.ae, - only_mask=commons.om, - post_process_mask=commons.ppm, - bgcolor=commons.bgc, - **kwargs, - ), - media_type="image/png", - ) - - @app.on_event("startup") - def startup(): - try: - webbrowser.open(f"http://localhost:{port}") - except Exception: - pass - - if threads is not None: - from anyio import CapacityLimiter - from anyio.lowlevel import RunVar - - RunVar("_default_thread_limiter").set(CapacityLimiter(threads)) - - @app.get( - path="/api/remove", - tags=["Background Removal"], - summary="Remove from URL", - description="Removes the background from an image obtained by retrieving an URL.", - ) - async def get_index( - url: str = Query( - default=..., description="URL of the image that has to be processed." - ), - commons: CommonQueryParams = Depends(), - ): - async with aiohttp.ClientSession() as session: - async with session.get(url) as response: - file = await response.read() - return await asyncify(im_without_bg)(file, commons) - - @app.post( - path="/api/remove", - tags=["Background Removal"], - summary="Remove from Stream", - description="Removes the background from an image sent within the request itself.", - ) - async def post_index( - file: bytes = File( - default=..., - description="Image file (byte stream) that has to be processed.", - ), - commons: CommonQueryPostParams = Depends(), - ): - return await asyncify(im_without_bg)(file, commons) # type: ignore - - def gr_app(app): - def inference(input_path, model): - output_path = "output.png" - with open(input_path, "rb") as i: - with open(output_path, "wb") as o: - input = i.read() - output = remove(input, session=new_session(model)) - o.write(output) - return os.path.join(output_path) - - interface = gr.Interface( - inference, - [ - gr.components.Image(type="filepath", label="Input"), - gr.components.Dropdown( - [ - "u2net", - "u2netp", - "u2net_human_seg", - "u2net_cloth_seg", - "silueta", - "isnet-general-use", - "isnet-anime", - ], - value="u2net", - label="Models", - ), - ], - gr.components.Image(type="filepath", label="Output"), - ) - - interface.queue(concurrency_count=3) - app = gr.mount_gradio_app(app, interface, path="/") - return app - - print(f"To access the API documentation, go to http://localhost:{port}/api") - print(f"To access the UI, go to http://localhost:{port}") - - uvicorn.run(gr_app(app), host="0.0.0.0", port=port, log_level=log_level) diff --git a/spaces/digitalxingtong/Miiu-Bert-Vits2/models.py b/spaces/digitalxingtong/Miiu-Bert-Vits2/models.py deleted file mode 100644 index d4afe44d883691610c5903e602a3ca245fcb3a5c..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Miiu-Bert-Vits2/models.py +++ /dev/null @@ -1,707 +0,0 @@ -import copy -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import modules -import attentions -import monotonic_align - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm - -from commons import init_weights, get_padding -from text import symbols, num_tones, num_languages -class DurationDiscriminator(nn.Module): #vits2 - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0): - super().__init__() - - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.gin_channels = gin_channels - - self.drop = nn.Dropout(p_dropout) - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_1 = modules.LayerNorm(filter_channels) - self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_2 = modules.LayerNorm(filter_channels) - self.dur_proj = nn.Conv1d(1, filter_channels, 1) - - self.pre_out_conv_1 = nn.Conv1d(2*filter_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.pre_out_norm_1 = modules.LayerNorm(filter_channels) - self.pre_out_conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.pre_out_norm_2 = modules.LayerNorm(filter_channels) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, in_channels, 1) - - self.output_layer = nn.Sequential( - nn.Linear(filter_channels, 1), - nn.Sigmoid() - ) - - def forward_probability(self, x, x_mask, dur, g=None): - dur = self.dur_proj(dur) - x = torch.cat([x, dur], dim=1) - x = self.pre_out_conv_1(x * x_mask) - x = torch.relu(x) - x = self.pre_out_norm_1(x) - x = self.drop(x) - x = self.pre_out_conv_2(x * x_mask) - x = torch.relu(x) - x = self.pre_out_norm_2(x) - x = self.drop(x) - x = x * x_mask - x = x.transpose(1, 2) - output_prob = self.output_layer(x) - return output_prob - - def forward(self, x, x_mask, dur_r, dur_hat, g=None): - x = torch.detach(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.conv_1(x * x_mask) - x = torch.relu(x) - x = self.norm_1(x) - x = self.drop(x) - x = self.conv_2(x * x_mask) - x = torch.relu(x) - x = self.norm_2(x) - x = self.drop(x) - - output_probs = [] - for dur in [dur_r, dur_hat]: - output_prob = self.forward_probability(x, x_mask, dur, g) - output_probs.append(output_prob) - - return output_probs - -class TransformerCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - n_flows=4, - gin_channels=0, - share_parameter=False - ): - - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - - self.wn = attentions.FFT(hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout, isflow = True, gin_channels = self.gin_channels) if share_parameter else None - - for i in range(n_flows): - self.flows.append( - modules.TransformerCouplingLayer(channels, hidden_channels, kernel_size, n_layers, n_heads, p_dropout, filter_channels, mean_only=True, wn_sharing_parameter=self.wn, gin_channels = self.gin_channels)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - -class StochasticDurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0): - super().__init__() - filter_channels = in_channels # it needs to be removed from future version. - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.log_flow = modules.Log() - self.flows = nn.ModuleList() - self.flows.append(modules.ElementwiseAffine(2)) - for i in range(n_flows): - self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.flows.append(modules.Flip()) - - self.post_pre = nn.Conv1d(1, filter_channels, 1) - self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - self.post_flows = nn.ModuleList() - self.post_flows.append(modules.ElementwiseAffine(2)) - for i in range(4): - self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.post_flows.append(modules.Flip()) - - self.pre = nn.Conv1d(in_channels, filter_channels, 1) - self.proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, filter_channels, 1) - - def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): - x = torch.detach(x) - x = self.pre(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.convs(x, x_mask) - x = self.proj(x) * x_mask - - if not reverse: - flows = self.flows - assert w is not None - - logdet_tot_q = 0 - h_w = self.post_pre(w) - h_w = self.post_convs(h_w, x_mask) - h_w = self.post_proj(h_w) * x_mask - e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask - z_q = e_q - for flow in self.post_flows: - z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) - logdet_tot_q += logdet_q - z_u, z1 = torch.split(z_q, [1, 1], 1) - u = torch.sigmoid(z_u) * x_mask - z0 = (w - u) * x_mask - logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1, 2]) - logq = torch.sum(-0.5 * (math.log(2 * math.pi) + (e_q ** 2)) * x_mask, [1, 2]) - logdet_tot_q - - logdet_tot = 0 - z0, logdet = self.log_flow(z0, x_mask) - logdet_tot += logdet - z = torch.cat([z0, z1], 1) - for flow in flows: - z, logdet = flow(z, x_mask, g=x, reverse=reverse) - logdet_tot = logdet_tot + logdet - nll = torch.sum(0.5 * (math.log(2 * math.pi) + (z ** 2)) * x_mask, [1, 2]) - logdet_tot - return nll + logq # [b] - else: - flows = list(reversed(self.flows)) - flows = flows[:-2] + [flows[-1]] # remove a useless vflow - z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale - for flow in flows: - z = flow(z, x_mask, g=x, reverse=reverse) - z0, z1 = torch.split(z, [1, 1], 1) - logw = z0 - return logw - - -class DurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0): - super().__init__() - - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.gin_channels = gin_channels - - self.drop = nn.Dropout(p_dropout) - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2) - self.norm_1 = modules.LayerNorm(filter_channels) - self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2) - self.norm_2 = modules.LayerNorm(filter_channels) - self.proj = nn.Conv1d(filter_channels, 1, 1) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, in_channels, 1) - - def forward(self, x, x_mask, g=None): - x = torch.detach(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.conv_1(x * x_mask) - x = torch.relu(x) - x = self.norm_1(x) - x = self.drop(x) - x = self.conv_2(x * x_mask) - x = torch.relu(x) - x = self.norm_2(x) - x = self.drop(x) - x = self.proj(x * x_mask) - return x * x_mask - - -class TextEncoder(nn.Module): - def __init__(self, - n_vocab, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - gin_channels=0): - super().__init__() - self.n_vocab = n_vocab - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.gin_channels = gin_channels - self.emb = nn.Embedding(len(symbols), hidden_channels) - nn.init.normal_(self.emb.weight, 0.0, hidden_channels ** -0.5) - self.tone_emb = nn.Embedding(num_tones, hidden_channels) - nn.init.normal_(self.tone_emb.weight, 0.0, hidden_channels ** -0.5) - self.language_emb = nn.Embedding(num_languages, hidden_channels) - nn.init.normal_(self.language_emb.weight, 0.0, hidden_channels ** -0.5) - self.bert_proj = nn.Conv1d(1024, hidden_channels, 1) - - self.encoder = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - gin_channels=self.gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, tone, language, bert, g=None): - x = (self.emb(x)+ self.tone_emb(tone)+ self.language_emb(language)+self.bert_proj(bert).transpose(1,2)) * math.sqrt(self.hidden_channels) # [b, t, h] - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - - x = self.encoder(x * x_mask, x_mask, g=g) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return x, m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, - gin_channels=gin_channels, mean_only=True)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class PosteriorEncoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class Generator(torch.nn.Module): - def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, - upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) - resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append(weight_norm( - ConvTranspose1d(upsample_initial_channel // (2 ** i), upsample_initial_channel // (2 ** (i + 1)), - k, u, padding=(k - u) // 2))) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - -class ReferenceEncoder(nn.Module): - ''' - inputs --- [N, Ty/r, n_mels*r] mels - outputs --- [N, ref_enc_gru_size] - ''' - - def __init__(self, spec_channels, gin_channels=0): - - super().__init__() - self.spec_channels = spec_channels - ref_enc_filters = [32, 32, 64, 64, 128, 128] - K = len(ref_enc_filters) - filters = [1] + ref_enc_filters - convs = [weight_norm(nn.Conv2d(in_channels=filters[i], - out_channels=filters[i + 1], - kernel_size=(3, 3), - stride=(2, 2), - padding=(1, 1))) for i in range(K)] - self.convs = nn.ModuleList(convs) - # self.wns = nn.ModuleList([weight_norm(num_features=ref_enc_filters[i]) for i in range(K)]) - - out_channels = self.calculate_channels(spec_channels, 3, 2, 1, K) - self.gru = nn.GRU(input_size=ref_enc_filters[-1] * out_channels, - hidden_size=256 // 2, - batch_first=True) - self.proj = nn.Linear(128, gin_channels) - - def forward(self, inputs, mask=None): - N = inputs.size(0) - out = inputs.view(N, 1, -1, self.spec_channels) # [N, 1, Ty, n_freqs] - for conv in self.convs: - out = conv(out) - # out = wn(out) - out = F.relu(out) # [N, 128, Ty//2^K, n_mels//2^K] - - out = out.transpose(1, 2) # [N, Ty//2^K, 128, n_mels//2^K] - T = out.size(1) - N = out.size(0) - out = out.contiguous().view(N, T, -1) # [N, Ty//2^K, 128*n_mels//2^K] - - self.gru.flatten_parameters() - memory, out = self.gru(out) # out --- [1, N, 128] - - return self.proj(out.squeeze(0)) - - def calculate_channels(self, L, kernel_size, stride, pad, n_convs): - for i in range(n_convs): - L = (L - kernel_size + 2 * pad) // stride + 1 - return L - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - n_vocab, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - n_speakers=256, - gin_channels=256, - use_sdp=True, - n_flow_layer = 4, - n_layers_trans_flow = 3, - flow_share_parameter = False, - use_transformer_flow = True, - **kwargs): - - super().__init__() - self.n_vocab = n_vocab - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.n_speakers = n_speakers - self.gin_channels = gin_channels - self.n_layers_trans_flow = n_layers_trans_flow - self.use_spk_conditioned_encoder = kwargs.get("use_spk_conditioned_encoder", True) - self.use_sdp = use_sdp - self.use_noise_scaled_mas = kwargs.get("use_noise_scaled_mas", False) - self.mas_noise_scale_initial = kwargs.get("mas_noise_scale_initial", 0.01) - self.noise_scale_delta = kwargs.get("noise_scale_delta", 2e-6) - self.current_mas_noise_scale = self.mas_noise_scale_initial - if self.use_spk_conditioned_encoder and gin_channels > 0: - self.enc_gin_channels = gin_channels - self.enc_p = TextEncoder(n_vocab, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - gin_channels=self.enc_gin_channels) - self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, - upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) - self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, - gin_channels=gin_channels) - if use_transformer_flow: - self.flow = TransformerCouplingBlock(inter_channels, hidden_channels, filter_channels, n_heads, n_layers_trans_flow, 5, p_dropout, n_flow_layer, gin_channels=gin_channels,share_parameter= flow_share_parameter) - else: - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, n_flow_layer, gin_channels=gin_channels) - self.sdp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels) - self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels) - - if n_speakers >= 1: - self.emb_g = nn.Embedding(n_speakers, gin_channels) - else: - self.ref_enc = ReferenceEncoder(spec_channels, gin_channels) - - def forward(self, x, x_lengths, y, y_lengths, sid, tone, language, bert): - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = self.ref_enc(y.transpose(1,2)).unsqueeze(-1) - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert,g=g) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - - with torch.no_grad(): - # negative cross-entropy - s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t] - neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s] - neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), - s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s] - neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4 - if self.use_noise_scaled_mas: - epsilon = torch.std(neg_cent) * torch.randn_like(neg_cent) * self.current_mas_noise_scale - neg_cent = neg_cent + epsilon - - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach() - - w = attn.sum(2) - - l_length_sdp = self.sdp(x, x_mask, w, g=g) - l_length_sdp = l_length_sdp / torch.sum(x_mask) - - logw_ = torch.log(w + 1e-6) * x_mask - logw = self.dp(x, x_mask, g=g) - l_length_dp = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(x_mask) # for averaging - - l_length = l_length_dp + l_length_sdp - - # expand prior - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) - - z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size) - o = self.dec(z_slice, g=g) - return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (x, logw, logw_) - - def infer(self, x, x_lengths, sid, tone, language, bert, noise_scale=.667, length_scale=1, noise_scale_w=0.8, max_len=None, sdp_ratio=0,y=None): - #x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert) - # g = self.gst(y) - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = self.ref_enc(y.transpose(1,2)).unsqueeze(-1) - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert,g=g) - logw = self.sdp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) * (sdp_ratio) + self.dp(x, x_mask, g=g) * (1 - sdp_ratio) - w = torch.exp(logw) * x_mask * length_scale - w_ceil = torch.ceil(w) - y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() - y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype) - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = commons.generate_path(w_ceil, attn_mask) - - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, - 2) # [b, t', t], [b, t, d] -> [b, d, t'] - - z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale - z = self.flow(z_p, y_mask, g=g, reverse=True) - o = self.dec((z * y_mask)[:, :, :max_len], g=g) - return o, attn, y_mask, (z, z_p, m_p, logs_p) diff --git a/spaces/digitalxingtong/Nailv-read-Bert-Vits2/text/symbols.py b/spaces/digitalxingtong/Nailv-read-Bert-Vits2/text/symbols.py deleted file mode 100644 index 9dfae4e633829f20c4fd767b1c7a9198911ed801..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Nailv-read-Bert-Vits2/text/symbols.py +++ /dev/null @@ -1,51 +0,0 @@ -punctuation = ['!', '?', '…', ",", ".", "'", '-'] -pu_symbols = punctuation + ["SP", "UNK"] -pad = '_' - -# chinese -zh_symbols = ['E', 'En', 'a', 'ai', 'an', 'ang', 'ao', 'b', 'c', 'ch', 'd', 'e', 'ei', 'en', 'eng', 'er', 'f', 'g', 'h', - 'i', 'i0', 'ia', 'ian', 'iang', 'iao', 'ie', 'in', 'ing', 'iong', 'ir', 'iu', 'j', 'k', 'l', 'm', 'n', 'o', - 'ong', - 'ou', 'p', 'q', 'r', 's', 'sh', 't', 'u', 'ua', 'uai', 'uan', 'uang', 'ui', 'un', 'uo', 'v', 'van', 've', 'vn', - 'w', 'x', 'y', 'z', 'zh', - "AA", "EE", "OO"] -num_zh_tones = 6 - -# japanese -ja_symbols = ['I', 'N', 'U', 'a', 'b', 'by', 'ch', 'cl', 'd', 'dy', 'e', 'f', 'g', 'gy', 'h', 'hy', 'i', 'j', 'k', 'ky', - 'm', 'my', 'n', 'ny', 'o', 'p', 'py', 'r', 'ry', 's', 'sh', 't', 'ts', 'u', 'V', 'w', 'y', 'z'] -num_ja_tones = 1 - -# English -en_symbols = ['aa', 'ae', 'ah', 'ao', 'aw', 'ay', 'b', 'ch', 'd', 'dh', 'eh', 'er', 'ey', 'f', 'g', 'hh', 'ih', 'iy', - 'jh', 'k', 'l', 'm', 'n', 'ng', 'ow', 'oy', 'p', 'r', 's', - 'sh', 't', 'th', 'uh', 'uw', 'V', 'w', 'y', 'z', 'zh'] -num_en_tones = 4 - -# combine all symbols -normal_symbols = sorted(set(zh_symbols + ja_symbols + en_symbols)) -symbols = [pad] + normal_symbols + pu_symbols -sil_phonemes_ids = [symbols.index(i) for i in pu_symbols] - -# combine all tones -num_tones = num_zh_tones + num_ja_tones + num_en_tones - -# language maps -language_id_map = { - 'ZH': 0, - "JA": 1, - "EN": 2 -} -num_languages = len(language_id_map.keys()) - -language_tone_start_map = { - 'ZH': 0, - "JA": num_zh_tones, - "EN": num_zh_tones + num_ja_tones -} - -if __name__ == '__main__': - a = set(zh_symbols) - b = set(en_symbols) - print(sorted(a&b)) - diff --git a/spaces/dmeck/RVC-Speakers/vits/text/__init__.py b/spaces/dmeck/RVC-Speakers/vits/text/__init__.py deleted file mode 100644 index 43d013185c214d3d3e459a3fd895ab0891c91611..0000000000000000000000000000000000000000 --- a/spaces/dmeck/RVC-Speakers/vits/text/__init__.py +++ /dev/null @@ -1,57 +0,0 @@ -""" from https://github.com/keithito/tacotron """ -from vits.text import cleaners -from vits.text.symbols import symbols - - -# Mappings from symbol to numeric ID and vice versa: -_symbol_to_id = {s: i for i, s in enumerate(symbols)} -_id_to_symbol = {i: s for i, s in enumerate(symbols)} - - -def text_to_sequence(text, symbols, cleaner_names): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - cleaner_names: names of the cleaner functions to run the text through - Returns: - List of integers corresponding to the symbols in the text - ''' - _symbol_to_id = {s: i for i, s in enumerate(symbols)} - sequence = [] - - clean_text = _clean_text(text, cleaner_names) - for symbol in clean_text: - if symbol not in _symbol_to_id.keys(): - continue - symbol_id = _symbol_to_id[symbol] - sequence += [symbol_id] - return sequence, clean_text - - -def cleaned_text_to_sequence(cleaned_text): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - Returns: - List of integers corresponding to the symbols in the text - ''' - sequence = [_symbol_to_id[symbol] for symbol in cleaned_text if symbol in _symbol_to_id.keys()] - return sequence - - -def sequence_to_text(sequence): - '''Converts a sequence of IDs back to a string''' - result = '' - for symbol_id in sequence: - s = _id_to_symbol[symbol_id] - result += s - return result - - -def _clean_text(text, cleaner_names): - for name in cleaner_names: - cleaner = getattr(cleaners, name) - if not cleaner: - raise Exception('Unknown cleaner: %s' % name) - text = cleaner(text) - return text diff --git a/spaces/ds520/bingo/src/components/chat-image.tsx b/spaces/ds520/bingo/src/components/chat-image.tsx deleted file mode 100644 index 05ecc9771eada27a0f2d160bb01cba170d37bb09..0000000000000000000000000000000000000000 --- a/spaces/ds520/bingo/src/components/chat-image.tsx +++ /dev/null @@ -1,170 +0,0 @@ -import { - useEffect, - useState, - useCallback, - ChangeEvent, - ClipboardEvent, - MouseEventHandler, - FormEvent, - useRef -} from "react" -import Image from 'next/image' -import PasteIcon from '@/assets/images/paste.svg' -import UploadIcon from '@/assets/images/upload.svg' -import CameraIcon from '@/assets/images/camera.svg' -import { useBing } from '@/lib/hooks/use-bing' -import { cn } from '@/lib/utils' - -interface ChatImageProps extends Pick, 'uploadImage'> {} - -const preventDefault: MouseEventHandler = (event) => { - event.nativeEvent.stopImmediatePropagation() -} - -const toBase64 = (file: File): Promise => new Promise((resolve, reject) => { - const reader = new FileReader() - reader.readAsDataURL(file) - reader.onload = () => resolve(reader.result as string) - reader.onerror = reject -}) - -export function ChatImage({ children, uploadImage }: React.PropsWithChildren) { - const videoRef = useRef(null) - const canvasRef = useRef(null) - const mediaStream = useRef() - const [panel, setPanel] = useState('none') - - const upload = useCallback((url: string) => { - if (url) { - uploadImage(url) - } - setPanel('none') - }, [panel]) - - const onUpload = useCallback(async (event: ChangeEvent) => { - const file = event.target.files?.[0] - if (file) { - const fileDataUrl = await toBase64(file) - if (fileDataUrl) { - upload(fileDataUrl) - } - } - }, []) - - const onPaste = useCallback((event: ClipboardEvent) => { - const pasteUrl = event.clipboardData.getData('text') ?? '' - upload(pasteUrl) - }, []) - - const onEnter = useCallback((event: FormEvent) => { - event.preventDefault() - event.stopPropagation() - // @ts-ignore - const inputUrl = event.target.elements.image.value - if (inputUrl) { - upload(inputUrl) - } - }, []) - - const openVideo: MouseEventHandler = async (event) => { - event.stopPropagation() - setPanel('camera-mode') - } - - const onCapture = () => { - if (canvasRef.current && videoRef.current) { - const canvas = canvasRef.current - canvas.width = videoRef.current!.videoWidth - canvas.height = videoRef.current!.videoHeight - canvas.getContext('2d')?.drawImage(videoRef.current, 0, 0, canvas.width, canvas.height) - const cameraUrl = canvas.toDataURL('image/jpeg') - upload(cameraUrl) - } - } - - useEffect(() => { - const handleBlur = () => { - if (panel !== 'none') { - setPanel('none') - } - } - document.addEventListener('click', handleBlur) - return () => { - document.removeEventListener('click', handleBlur) - } - }, [panel]) - - useEffect(() => { - if (panel === 'camera-mode') { - navigator.mediaDevices.getUserMedia({ video: true, audio: false }) - .then(videoStream => { - mediaStream.current = videoStream - if (videoRef.current) { - videoRef.current.srcObject = videoStream - } - }) - } else { - if (mediaStream.current) { - mediaStream.current.getTracks().forEach(function(track) { - track.stop() - }) - mediaStream.current = undefined - } - } - }, [panel]) - - return ( -
      -
      panel === 'none' ? setPanel('normal') : setPanel('none')}>{children}
      -
      -
      -
      -

      添加图像

      -
      -
      - paste -
      - e.stopPropagation()} - /> -
      -
      -
      - - -
      -
      - {panel === 'camera-mode' &&
      -
      -
      -
      -
      -
      -
      -
      } -
      -
      - ) -} diff --git a/spaces/dukecsxu/hotdogclassifier/app.py b/spaces/dukecsxu/hotdogclassifier/app.py deleted file mode 100644 index a6ca33b71945f3ff595d30c4bce8047190fecf34..0000000000000000000000000000000000000000 --- a/spaces/dukecsxu/hotdogclassifier/app.py +++ /dev/null @@ -1,20 +0,0 @@ -import streamlit as st -from transformers import pipeline -from PIL import Image - -pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog") - -st.title("Hot Dog? Or Not?") - -file_name = st.file_uploader("Upload a hot dog candidate image") - -if file_name is not None: - col1, col2 = st.columns(2) - - image = Image.open(file_name) - col1.image(image, use_column_width=True) - predictions = pipeline(image) - - col2.header("Probabilities") - for p in predictions: - col2.subheader(f"{ p['label'] }: { round(p['score'] * 100, 1)}%") \ No newline at end of file diff --git a/spaces/eaedk/agri-tech-fastapi-with-GUI/Dockerfile b/spaces/eaedk/agri-tech-fastapi-with-GUI/Dockerfile deleted file mode 100644 index e3bb773f480f1c6419c683bc9192df51580a854d..0000000000000000000000000000000000000000 --- a/spaces/eaedk/agri-tech-fastapi-with-GUI/Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -FROM python:3.9 -#-slim - -WORKDIR /app - -COPY requirements.txt ./ - -RUN pip install -r requirements.txt - -EXPOSE 7860 - -COPY . . - -CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"] \ No newline at end of file diff --git a/spaces/eaglev/whales/app.py b/spaces/eaglev/whales/app.py deleted file mode 100644 index 29bd7a76ca34b4f55b1a1c2e4cc7c0b7640cc3eb..0000000000000000000000000000000000000000 --- a/spaces/eaglev/whales/app.py +++ /dev/null @@ -1,32 +0,0 @@ -from fastai.vision.all import * -import gradio as gr -import skimage - -learn = load_learner('model.pkl') - -categories = ('dolphin', 'whale') - -def classify_image(img): - pred, idx, probs = learn.predict(img) - return(dict(zip(categories, map(float, probs)))) - - -image = gr.inputs.Image(shape=(224, 224), type="pil") -label = gr.outputs.Label() -examples = ['dolphin1.jpeg', 'whale1.jpg', 'dolphin2.jpg', 'whale2.jpeg', 'whale3.jpeg'] -title = "Classification of Whales vs. Dolphins" -description = "A whale vs. dolphin classifier trained with resnet18 fine-tuned on images from duckduckgo searches. Application of the fastai Deep Learning course." -interpretation = 'default' -enable_queue = True - -iface = gr.Interface( - fn=classify_image, - inputs=image, - outputs=label, - examples=examples, - title=title, - description=description, - interpretation=interpretation, - enable_queue=enable_queue -) -iface.launch(inline=False) diff --git a/spaces/epexVfeibi/Imagedeblurr/ActivationAutoCADPID2017keygen [BETTER].md b/spaces/epexVfeibi/Imagedeblurr/ActivationAutoCADPID2017keygen [BETTER].md deleted file mode 100644 index 916d54fe4bccf4ddd4841fc03bbc4a2da48c00d2..0000000000000000000000000000000000000000 --- a/spaces/epexVfeibi/Imagedeblurr/ActivationAutoCADPID2017keygen [BETTER].md +++ /dev/null @@ -1,6 +0,0 @@ -

      activationAutoCADPID2017keygen


      DOWNLOAD ››› https://jinyurl.com/2uErab



      -
      - d5da3c52bf
      -
      -
      -

      diff --git a/spaces/erbanku/gpt-academic/crazy_functions/crazy_functions_test.py b/spaces/erbanku/gpt-academic/crazy_functions/crazy_functions_test.py deleted file mode 100644 index 6020fa2ffc3cdcb288f03e55ff37313b0be78222..0000000000000000000000000000000000000000 --- a/spaces/erbanku/gpt-academic/crazy_functions/crazy_functions_test.py +++ /dev/null @@ -1,130 +0,0 @@ -""" -这是什么? - 这个文件用于函数插件的单元测试 - 运行方法 python crazy_functions/crazy_functions_test.py -""" - -def validate_path(): - import os, sys - dir_name = os.path.dirname(__file__) - root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..') - os.chdir(root_dir_assume) - sys.path.append(root_dir_assume) - -validate_path() # validate path so you can run from base directory -from colorful import * -from toolbox import get_conf, ChatBotWithCookies -proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY = \ - get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY') - -llm_kwargs = { - 'api_key': API_KEY, - 'llm_model': LLM_MODEL, - 'top_p':1.0, - 'max_length': None, - 'temperature':1.0, -} -plugin_kwargs = { } -chatbot = ChatBotWithCookies(llm_kwargs) -history = [] -system_prompt = "Serve me as a writing and programming assistant." -web_port = 1024 - - -def test_解析一个Python项目(): - from crazy_functions.解析项目源代码 import 解析一个Python项目 - txt = "crazy_functions/test_project/python/dqn" - for cookies, cb, hist, msg in 解析一个Python项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - print(cb) - -def test_解析一个Cpp项目(): - from crazy_functions.解析项目源代码 import 解析一个C项目 - txt = "crazy_functions/test_project/cpp/cppipc" - for cookies, cb, hist, msg in 解析一个C项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - print(cb) - -def test_Latex英文润色(): - from crazy_functions.Latex全文润色 import Latex英文润色 - txt = "crazy_functions/test_project/latex/attention" - for cookies, cb, hist, msg in Latex英文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - print(cb) - -def test_Markdown中译英(): - from crazy_functions.批量Markdown翻译 import Markdown中译英 - txt = "README.md" - for cookies, cb, hist, msg in Markdown中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - print(cb) - -def test_批量翻译PDF文档(): - from crazy_functions.批量翻译PDF文档_多线程 import 批量翻译PDF文档 - txt = "crazy_functions/test_project/pdf_and_word" - for cookies, cb, hist, msg in 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - print(cb) - -def test_谷歌检索小助手(): - from crazy_functions.谷歌检索小助手 import 谷歌检索小助手 - txt = "https://scholar.google.com/scholar?hl=en&as_sdt=0%2C5&q=auto+reinforcement+learning&btnG=" - for cookies, cb, hist, msg in 谷歌检索小助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - print(cb) - -def test_总结word文档(): - from crazy_functions.总结word文档 import 总结word文档 - txt = "crazy_functions/test_project/pdf_and_word" - for cookies, cb, hist, msg in 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - print(cb) - -def test_下载arxiv论文并翻译摘要(): - from crazy_functions.下载arxiv论文翻译摘要 import 下载arxiv论文并翻译摘要 - txt = "1812.10695" - for cookies, cb, hist, msg in 下载arxiv论文并翻译摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - print(cb) - -def test_联网回答问题(): - from crazy_functions.联网的ChatGPT import 连接网络回答问题 - # txt = "“我们称之为高效”是什么梗?" - # >> 从第0份、第1份、第2份搜索结果可以看出,“我们称之为高效”是指在游戏社区中,用户们用来形容一些游戏策略或行为非常高效且能够带来好的效果的用语。这个用语最初可能是在群星(Stellaris)这个游戏里面流行起来的,后来也传播到了其他游戏中,比如巨像(Titan)等游戏。其中第1份搜索结果中的一篇文章也指出,“我们称之为高效”这 一用语来源于群星(Stellaris)游戏中的一个情节。 - # txt = "为什么说枪毙P社玩家没有一个冤枉的?" - # >> 它们都是关于一个知乎用户所发的帖子,引用了一群游戏玩家对于需要对P社玩家进行枪毙的讨论,这个话题的本质是玩家们对于P 社游戏中的政治与历史元素的不同看法,以及其中不少玩家以极端立场宣扬的想法和言论,因此有人就以枪毙这些玩家来回应此类言论。但是这个话题本身并没有实质内容,只是一个玩笑或者恶搞,并不应该被当做真实的态度或者观点,因此这种说法没有实际意义。 - # txt = "谁是应急食品?" - # >> '根据以上搜索结果可以得知,应急食品是“原神”游戏中的角色派蒙的外号。' - # txt = "道路千万条,安全第一条。后面两句是?" - # >> '行车不规范,亲人两行泪。' - # txt = "What is in the canister?" - # >> Rainbow Six Siege 游戏中 Smoke 的 Canister 中装有何种物质相关的官方信息。 - # txt = "失败的man是什么?" - # >> 根据第1份搜索结果,可以得知失败的man是指一位在B站购买了蜘蛛侠COS服后穿上后被网友嘲笑的UP主,而“失败的man”是蜘蛛侠英文名“spiderman”的谐音梗,并且网友们还 给这位UP主起了“苍蝇侠”的外号。因此,失败的man是指这位UP主在穿上蜘蛛侠COS服后被网友嘲笑的情况。 - # txt = "老六是什么,起源于哪里?" - # >> 老六是网络流行语,最初起源于游戏《CSGO》,指游戏中玩家中独来独往、游离于队伍之外的“自由人”或玩得比较菜或者玩得比较阴险的人 ,后来逐渐演变成指玩得比较阴险的玩家。 - # txt = "罗小黑战记因为什么经常被吐槽?" - # >> 3. 更新速度。罗小黑战记的更新时间不定,时而快时而慢,给观众留下了等待的时间过长的印象。 - # txt = "沙特、伊朗最近的关系如何?" - # >> 最近在中国的斡旋下,沙特和伊朗于3月10日达成了恢复两国外交关系的协议,这表明两国关系已经重新回到正常化状态。 - # txt = "You should have gone for the head. What does that mean?" - # >> The phrase "You should have gone for the head" is a quote from the Marvel movies, Avengers: Infinity War and Avengers: Endgame. It was spoken by the character Thanos in Infinity War and by Thor in Endgame. - txt = "AutoGPT是什么?" - # >> AutoGPT是一个基于GPT-4语言模型的开源应用程序。它可以根据用户需求自主执行任务,包括事件分析、营销方案撰写、代码编程、数学运算等等,并完全不需要用户插手。它可以自己思考,给出实现的步骤和实现细节,甚至可以自问自答执 行任务。最近它在GitHub上爆火,成为了业内最热门的项目之一。 - # txt = "钟离带什么圣遗物?" - for cookies, cb, hist, msg in 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - print("当前问答:", cb[-1][-1].replace("\n"," ")) - for i, it in enumerate(cb): print亮蓝(it[0]); print亮黄(it[1]) - -def test_解析ipynb文件(): - from crazy_functions.解析JupyterNotebook import 解析ipynb文件 - txt = "crazy_functions/test_samples" - for cookies, cb, hist, msg in 解析ipynb文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - print(cb) - - -# test_解析一个Python项目() -# test_Latex英文润色() -# test_Markdown中译英() -# test_批量翻译PDF文档() -# test_谷歌检索小助手() -# test_总结word文档() -# test_下载arxiv论文并翻译摘要() -# test_解析一个Cpp项目() -# test_联网回答问题() -test_解析ipynb文件() - -input("程序完成,回车退出。") -print("退出。") \ No newline at end of file diff --git a/spaces/erbanku/gpt-academic/request_llm/edge_gpt.py b/spaces/erbanku/gpt-academic/request_llm/edge_gpt.py deleted file mode 100644 index bbf84000d84a42de80d3c051a24f06336af76aaf..0000000000000000000000000000000000000000 --- a/spaces/erbanku/gpt-academic/request_llm/edge_gpt.py +++ /dev/null @@ -1,409 +0,0 @@ -""" -======================================================================== -第一部分:来自EdgeGPT.py -https://github.com/acheong08/EdgeGPT -======================================================================== -""" - -import argparse -import asyncio -import json -import os -import random -import re -import ssl -import sys -import uuid -from enum import Enum -from typing import Generator -from typing import Literal -from typing import Optional -from typing import Union -import websockets.client as websockets - -DELIMITER = "\x1e" - - -# Generate random IP between range 13.104.0.0/14 -FORWARDED_IP = ( - f"13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}" -) - -HEADERS = { - "accept": "application/json", - "accept-language": "en-US,en;q=0.9", - "content-type": "application/json", - "sec-ch-ua": '"Not_A Brand";v="99", "Microsoft Edge";v="110", "Chromium";v="110"', - "sec-ch-ua-arch": '"x86"', - "sec-ch-ua-bitness": '"64"', - "sec-ch-ua-full-version": '"109.0.1518.78"', - "sec-ch-ua-full-version-list": '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-model": "", - "sec-ch-ua-platform": '"Windows"', - "sec-ch-ua-platform-version": '"15.0.0"', - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "same-origin", - "x-ms-client-request-id": str(uuid.uuid4()), - "x-ms-useragent": "azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32", - "Referer": "https://www.bing.com/search?q=Bing+AI&showconv=1&FORM=hpcodx", - "Referrer-Policy": "origin-when-cross-origin", - "x-forwarded-for": FORWARDED_IP, -} - -HEADERS_INIT_CONVER = { - "authority": "edgeservices.bing.com", - "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7", - "accept-language": "en-US,en;q=0.9", - "cache-control": "max-age=0", - "sec-ch-ua": '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"', - "sec-ch-ua-arch": '"x86"', - "sec-ch-ua-bitness": '"64"', - "sec-ch-ua-full-version": '"110.0.1587.69"', - "sec-ch-ua-full-version-list": '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-model": '""', - "sec-ch-ua-platform": '"Windows"', - "sec-ch-ua-platform-version": '"15.0.0"', - "sec-fetch-dest": "document", - "sec-fetch-mode": "navigate", - "sec-fetch-site": "none", - "sec-fetch-user": "?1", - "upgrade-insecure-requests": "1", - "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69", - "x-edge-shopping-flag": "1", - "x-forwarded-for": FORWARDED_IP, -} - -def get_ssl_context(): - import certifi - ssl_context = ssl.create_default_context() - ssl_context.load_verify_locations(certifi.where()) - return ssl_context - - - -class NotAllowedToAccess(Exception): - pass - - -class ConversationStyle(Enum): - creative = "h3imaginative,clgalileo,gencontentv3" - balanced = "galileo" - precise = "h3precise,clgalileo" - - -CONVERSATION_STYLE_TYPE = Optional[ - Union[ConversationStyle, Literal["creative", "balanced", "precise"]] -] - - -def _append_identifier(msg: dict) -> str: - """ - Appends special character to end of message to identify end of message - """ - # Convert dict to json string - return json.dumps(msg) + DELIMITER - - -def _get_ran_hex(length: int = 32) -> str: - """ - Returns random hex string - """ - return "".join(random.choice("0123456789abcdef") for _ in range(length)) - - -class _ChatHubRequest: - """ - Request object for ChatHub - """ - - def __init__( - self, - conversation_signature: str, - client_id: str, - conversation_id: str, - invocation_id: int = 0, - ) -> None: - self.struct: dict = {} - - self.client_id: str = client_id - self.conversation_id: str = conversation_id - self.conversation_signature: str = conversation_signature - self.invocation_id: int = invocation_id - - def update( - self, - prompt, - conversation_style, - options, - ) -> None: - """ - Updates request object - """ - if options is None: - options = [ - "deepleo", - "enable_debug_commands", - "disable_emoji_spoken_text", - "enablemm", - ] - if conversation_style: - if not isinstance(conversation_style, ConversationStyle): - conversation_style = getattr(ConversationStyle, conversation_style) - options = [ - "nlu_direct_response_filter", - "deepleo", - "disable_emoji_spoken_text", - "responsible_ai_policy_235", - "enablemm", - conversation_style.value, - "dtappid", - "cricinfo", - "cricinfov2", - "dv3sugg", - ] - self.struct = { - "arguments": [ - { - "source": "cib", - "optionsSets": options, - "sliceIds": [ - "222dtappid", - "225cricinfo", - "224locals0", - ], - "traceId": _get_ran_hex(32), - "isStartOfSession": self.invocation_id == 0, - "message": { - "author": "user", - "inputMethod": "Keyboard", - "text": prompt, - "messageType": "Chat", - }, - "conversationSignature": self.conversation_signature, - "participant": { - "id": self.client_id, - }, - "conversationId": self.conversation_id, - }, - ], - "invocationId": str(self.invocation_id), - "target": "chat", - "type": 4, - } - self.invocation_id += 1 - - -class _Conversation: - """ - Conversation API - """ - - def __init__( - self, - cookies, - proxy, - ) -> None: - self.struct: dict = { - "conversationId": None, - "clientId": None, - "conversationSignature": None, - "result": {"value": "Success", "message": None}, - } - import httpx - self.proxy = proxy - proxy = ( - proxy - or os.environ.get("all_proxy") - or os.environ.get("ALL_PROXY") - or os.environ.get("https_proxy") - or os.environ.get("HTTPS_PROXY") - or None - ) - if proxy is not None and proxy.startswith("socks5h://"): - proxy = "socks5://" + proxy[len("socks5h://") :] - self.session = httpx.Client( - proxies=proxy, - timeout=30, - headers=HEADERS_INIT_CONVER, - ) - for cookie in cookies: - self.session.cookies.set(cookie["name"], cookie["value"]) - - # Send GET request - response = self.session.get( - url=os.environ.get("BING_PROXY_URL") - or "https://edgeservices.bing.com/edgesvc/turing/conversation/create", - ) - if response.status_code != 200: - response = self.session.get( - "https://edge.churchless.tech/edgesvc/turing/conversation/create", - ) - if response.status_code != 200: - print(f"Status code: {response.status_code}") - print(response.text) - print(response.url) - raise Exception("Authentication failed") - try: - self.struct = response.json() - except (json.decoder.JSONDecodeError, NotAllowedToAccess) as exc: - raise Exception( - "Authentication failed. You have not been accepted into the beta.", - ) from exc - if self.struct["result"]["value"] == "UnauthorizedRequest": - raise NotAllowedToAccess(self.struct["result"]["message"]) - - -class _ChatHub: - """ - Chat API - """ - - def __init__(self, conversation) -> None: - self.wss = None - self.request: _ChatHubRequest - self.loop: bool - self.task: asyncio.Task - print(conversation.struct) - self.request = _ChatHubRequest( - conversation_signature=conversation.struct["conversationSignature"], - client_id=conversation.struct["clientId"], - conversation_id=conversation.struct["conversationId"], - ) - - async def ask_stream( - self, - prompt: str, - wss_link: str, - conversation_style: CONVERSATION_STYLE_TYPE = None, - raw: bool = False, - options: dict = None, - ) -> Generator[str, None, None]: - """ - Ask a question to the bot - """ - if self.wss and not self.wss.closed: - await self.wss.close() - # Check if websocket is closed - self.wss = await websockets.connect( - wss_link, - extra_headers=HEADERS, - max_size=None, - ssl=get_ssl_context() - ) - await self._initial_handshake() - # Construct a ChatHub request - self.request.update( - prompt=prompt, - conversation_style=conversation_style, - options=options, - ) - # Send request - await self.wss.send(_append_identifier(self.request.struct)) - final = False - while not final: - objects = str(await self.wss.recv()).split(DELIMITER) - for obj in objects: - if obj is None or not obj: - continue - response = json.loads(obj) - if response.get("type") != 2 and raw: - yield False, response - elif response.get("type") == 1 and response["arguments"][0].get( - "messages", - ): - resp_txt = response["arguments"][0]["messages"][0]["adaptiveCards"][ - 0 - ]["body"][0].get("text") - yield False, resp_txt - elif response.get("type") == 2: - final = True - yield True, response - - async def _initial_handshake(self) -> None: - await self.wss.send(_append_identifier({"protocol": "json", "version": 1})) - await self.wss.recv() - - async def close(self) -> None: - """ - Close the connection - """ - if self.wss and not self.wss.closed: - await self.wss.close() - - -class NewbingChatbot: - """ - Combines everything to make it seamless - """ - - def __init__( - self, - cookies, - proxy - ) -> None: - if cookies is None: - cookies = {} - self.cookies = cookies - self.proxy = proxy - self.chat_hub: _ChatHub = _ChatHub( - _Conversation(self.cookies, self.proxy), - ) - - async def ask( - self, - prompt: str, - wss_link: str, - conversation_style: CONVERSATION_STYLE_TYPE = None, - options: dict = None, - ) -> dict: - """ - Ask a question to the bot - """ - async for final, response in self.chat_hub.ask_stream( - prompt=prompt, - conversation_style=conversation_style, - wss_link=wss_link, - options=options, - ): - if final: - return response - await self.chat_hub.wss.close() - return None - - async def ask_stream( - self, - prompt: str, - wss_link: str, - conversation_style: CONVERSATION_STYLE_TYPE = None, - raw: bool = False, - options: dict = None, - ) -> Generator[str, None, None]: - """ - Ask a question to the bot - """ - async for response in self.chat_hub.ask_stream( - prompt=prompt, - conversation_style=conversation_style, - wss_link=wss_link, - raw=raw, - options=options, - ): - yield response - - async def close(self) -> None: - """ - Close the connection - """ - await self.chat_hub.close() - - async def reset(self) -> None: - """ - Reset the conversation - """ - await self.close() - self.chat_hub = _ChatHub(_Conversation(self.cookies, self.proxy)) - - diff --git a/spaces/ezioruan/roop/roop/utilities.py b/spaces/ezioruan/roop/roop/utilities.py deleted file mode 100644 index 90c8d981f5f159a459ca0c08cc23dfac8d04c068..0000000000000000000000000000000000000000 --- a/spaces/ezioruan/roop/roop/utilities.py +++ /dev/null @@ -1,141 +0,0 @@ -import glob -import mimetypes -import os -import platform -import shutil -import ssl -import subprocess -import urllib -from pathlib import Path -from typing import List, Any -from tqdm import tqdm - -import roop.globals - -TEMP_FILE = 'temp.mp4' -TEMP_DIRECTORY = 'temp' - -# monkey patch ssl for mac -if platform.system().lower() == 'darwin': - ssl._create_default_https_context = ssl._create_unverified_context - - -def run_ffmpeg(args: List[str]) -> bool: - commands = ['ffmpeg', '-hide_banner', '-hwaccel', 'auto', '-loglevel', roop.globals.log_level] - commands.extend(args) - try: - subprocess.check_output(commands, stderr=subprocess.STDOUT) - return True - except Exception: - pass - return False - - -def detect_fps(target_path: str) -> float: - command = ['ffprobe', '-v', 'error', '-select_streams', 'v:0', '-show_entries', 'stream=r_frame_rate', '-of', 'default=noprint_wrappers=1:nokey=1', target_path] - output = subprocess.check_output(command).decode().strip().split('/') - try: - numerator, denominator = map(int, output) - return numerator / denominator - except Exception: - pass - return 30.0 - - -def extract_frames(target_path: str) -> None: - temp_directory_path = get_temp_directory_path(target_path) - run_ffmpeg(['-i', target_path, '-pix_fmt', 'rgb24', os.path.join(temp_directory_path, '%04d.png')]) - - -def create_video(target_path: str, fps: float = 30.0) -> None: - temp_output_path = get_temp_output_path(target_path) - temp_directory_path = get_temp_directory_path(target_path) - run_ffmpeg(['-r', str(fps), '-i', os.path.join(temp_directory_path, '%04d.png'), '-c:v', roop.globals.video_encoder, '-crf', str(roop.globals.video_quality), '-pix_fmt', 'yuv420p', '-vf', 'colorspace=bt709:iall=bt601-6-625:fast=1', '-y', temp_output_path]) - - -def restore_audio(target_path: str, output_path: str) -> None: - temp_output_path = get_temp_output_path(target_path) - done = run_ffmpeg(['-i', temp_output_path, '-i', target_path, '-c:v', 'copy', '-map', '0:v:0', '-map', '1:a:0', '-y', output_path]) - if not done: - move_temp(target_path, output_path) - - -def get_temp_frame_paths(target_path: str) -> List[str]: - temp_directory_path = get_temp_directory_path(target_path) - return glob.glob((os.path.join(glob.escape(temp_directory_path), '*.png'))) - - -def get_temp_directory_path(target_path: str) -> str: - target_name, _ = os.path.splitext(os.path.basename(target_path)) - target_directory_path = os.path.dirname(target_path) - return os.path.join(target_directory_path, TEMP_DIRECTORY, target_name) - - -def get_temp_output_path(target_path: str) -> str: - temp_directory_path = get_temp_directory_path(target_path) - return os.path.join(temp_directory_path, TEMP_FILE) - - -def normalize_output_path(source_path: str, target_path: str, output_path: str) -> Any: - if source_path and target_path: - source_name, _ = os.path.splitext(os.path.basename(source_path)) - target_name, target_extension = os.path.splitext(os.path.basename(target_path)) - if os.path.isdir(output_path): - return os.path.join(output_path, source_name + '-' + target_name + target_extension) - return output_path - - -def create_temp(target_path: str) -> None: - temp_directory_path = get_temp_directory_path(target_path) - Path(temp_directory_path).mkdir(parents=True, exist_ok=True) - - -def move_temp(target_path: str, output_path: str) -> None: - temp_output_path = get_temp_output_path(target_path) - if os.path.isfile(temp_output_path): - if os.path.isfile(output_path): - os.remove(output_path) - shutil.move(temp_output_path, output_path) - - -def clean_temp(target_path: str) -> None: - temp_directory_path = get_temp_directory_path(target_path) - parent_directory_path = os.path.dirname(temp_directory_path) - if not roop.globals.keep_frames and os.path.isdir(temp_directory_path): - shutil.rmtree(temp_directory_path) - if os.path.exists(parent_directory_path) and not os.listdir(parent_directory_path): - os.rmdir(parent_directory_path) - - -def has_image_extension(image_path: str) -> bool: - return image_path.lower().endswith(('png', 'jpg', 'jpeg', 'webp')) - - -def is_image(image_path: str) -> bool: - if image_path and os.path.isfile(image_path): - mimetype, _ = mimetypes.guess_type(image_path) - return bool(mimetype and mimetype.startswith('image/')) - return False - - -def is_video(video_path: str) -> bool: - if video_path and os.path.isfile(video_path): - mimetype, _ = mimetypes.guess_type(video_path) - return bool(mimetype and mimetype.startswith('video/')) - return False - - -def conditional_download(download_directory_path: str, urls: List[str]) -> None: - if not os.path.exists(download_directory_path): - os.makedirs(download_directory_path) - for url in urls: - download_file_path = os.path.join(download_directory_path, os.path.basename(url)) - if not os.path.exists(download_file_path): - request = urllib.request.urlopen(url) # type: ignore[attr-defined] - total = int(request.headers.get('Content-Length', 0)) - with tqdm(total=total, desc='Downloading', unit='B', unit_scale=True, unit_divisor=1024) as progress: - urllib.request.urlretrieve(url, download_file_path, reporthook=lambda count, block_size, total_size: progress.update(block_size)) # type: ignore[attr-defined] - - -def resolve_relative_path(path: str) -> str: - return os.path.abspath(os.path.join(os.path.dirname(__file__), path)) diff --git a/spaces/facebook/MusicGen/audiocraft/adversarial/__init__.py b/spaces/facebook/MusicGen/audiocraft/adversarial/__init__.py deleted file mode 100644 index 864058706fbfae13d7f7dc850cc411a2f27d1510..0000000000000000000000000000000000000000 --- a/spaces/facebook/MusicGen/audiocraft/adversarial/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -"""Adversarial losses and discriminator architectures.""" - -# flake8: noqa -from .discriminators import ( - MultiPeriodDiscriminator, - MultiScaleDiscriminator, - MultiScaleSTFTDiscriminator -) -from .losses import ( - AdversarialLoss, - AdvLossType, - get_adv_criterion, - get_fake_criterion, - get_real_criterion, - FeatLossType, - FeatureMatchingLoss -) diff --git a/spaces/facebook/MusicGen/audiocraft/utils/profiler.py b/spaces/facebook/MusicGen/audiocraft/utils/profiler.py deleted file mode 100644 index b45b6d15910b50305c7b212c089ffad3c25b324d..0000000000000000000000000000000000000000 --- a/spaces/facebook/MusicGen/audiocraft/utils/profiler.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import logging -import typing as tp - -import dora -import torch - - -logger = logging.getLogger(__name__) - - -class Profiler: - """Context manager wrapper for xformers profiler. - """ - def __init__(self, module: torch.nn.Module, enabled: bool = False): - self.profiler: tp.Optional[tp.Any] = None - if enabled: - from xformers.profiler import profile - output_dir = dora.get_xp().folder / 'profiler_data' - logger.info("Profiling activated, results with be saved to %s", output_dir) - self.profiler = profile(output_dir=output_dir, module=module) - - def step(self): - if self.profiler is not None: - self.profiler.step() # type: ignore - - def __enter__(self): - if self.profiler is not None: - return self.profiler.__enter__() # type: ignore - - def __exit__(self, exc_type, exc_value, exc_tb): - if self.profiler is not None: - return self.profiler.__exit__(exc_type, exc_value, exc_tb) # type: ignore diff --git a/spaces/falterWliame/Face_Mask_Detection/Best Service Cult Sampler V1.0 Hybrid.rar.rar ((NEW)).md b/spaces/falterWliame/Face_Mask_Detection/Best Service Cult Sampler V1.0 Hybrid.rar.rar ((NEW)).md deleted file mode 100644 index 5ae6fd5a101c00a6fb2de4e44c452368c0f3fdf0..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Best Service Cult Sampler V1.0 Hybrid.rar.rar ((NEW)).md +++ /dev/null @@ -1,9 +0,0 @@ - -

      When a product is perceived as "the best" by its target customers, they are likely to view that brand favorably. So for example, if your company provides a great customer service experience, your clients will love your company and they will tell others about your company. In the competitive industries that we work in, our reputations are very important and are vital to our continued success. Ive heard so many horror stories from other agencies about other agencies who have messed up the timeline, made people wait for hours on end, etc. I can guarantee that you will get the most professional services from our team.

      -

      Best Service Cult Sampler v1.0 Hybrid.rar.rar


      Downloadhttps://urlca.com/2uDc7g



      -

      The 9th edition in the best service series of one of the best service cult samplers ever released! I've included the releases of the band until they decided to drop the electronic hiphop style of their music and start to focus on more ambient styles of music. This is the first release in the series. Enjoy!

      -

      A taster of releases which will be coming to this series in the near future, this first release includes tracks by Samael, Thomas Paul, Kaiti (aka Kato), Tetsuo V, Ryuichi Sakamoto, D-Zone, Remo, and many many more. Its a return to the "best service" spirit which each of the releases will take from the latest developments in electronica, hiphop and world music over the past few years.

      -

      Best Service Cult Sampler v1.0 1 dvd. SAMPLING MUSIC ARTIST X. COMEDY. STREET (2006. MUSIC RECORDING) === 152 Recording Songbook.zip. The material on this site is for distribution free of charge provided that this notice is not removed or altered.

      -

      899543212b
      -
      -
      \ No newline at end of file diff --git a/spaces/falterWliame/Face_Mask_Detection/Machine Type And Serial Number Are Invalid Lenovo Tabletl.md b/spaces/falterWliame/Face_Mask_Detection/Machine Type And Serial Number Are Invalid Lenovo Tabletl.md deleted file mode 100644 index 666c7506e435f9f9a868a338598ce653780c2f8b..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Machine Type And Serial Number Are Invalid Lenovo Tabletl.md +++ /dev/null @@ -1,10 +0,0 @@ -
      -

      Hello,
      I have tried many solutions but none worked, my lenovo s850 shows the bios in flash mode but when i insert my sim it shows invalid usbcard i tried to change my smbios but it still shows invalid usbcard
      Please help
      Thanks in advance
      shyam

      -

      Machine Type And Serial Number Are Invalid Lenovo Tabletl


      DOWNLOAD ->>> https://urlca.com/2uDdya



      -

      First start with the system, set the power, press Escape and go into setup. When pressing enter then you must be pressing F1, F2, F12, or Del. Your system has a serial number, MTM or model number. If the system is an S62, and you're using Windows, open the Start Menu and type wmic into the Search box.

      -

      Your serial number, MTM or model number may be stored in the system BIOS setup by pressing F1, F2, F12, or Del. The backup BIOS settings will also be lost, but you can reset the system using a factory reset.

      -

      An additional look at the CPU-Z report for a Lenovo system shows that the two values are identical. If you enter the setup mode by pressing F1, F2, F12, or Del. This utility is recommended as an alternative to the BIOS. In most cases, you can discover your serial number on the system or in the relevant system folder. Lenovo will keep your serial number for legal reasons. To determine whether your serial number has been defaced, see the Determine Serial Number.
      NOTE: If you modify the BIOS, the newly generated information may be incorrect or invalid. The correct information in the MTFD0040 file will be updated when you press F10. Find a Lenovo system box and open the manual.

      -

      The next step is to reinstall the software, after which you must change all of the required settings. The previous step will make your computer display information regarding the time and date you connect to the internet, and whether you have changed the BIOS settings. The system machine type and model (MTM), serial number, or system brand name in the BIOS menu can be corrected using a command line program via the system BIOS flash utility. The following steps are similar to the steps for repairing Windows.
      NOTE: If you modify the BIOS, the newly generated information may be incorrect or invalid.
      Restart the computer, and then immediately press the F10 key to enter the BIOS setup mode.
      You will need to enter the BIOS setup menu through the setup dialog.

      -

      899543212b
      -
      -
      \ No newline at end of file diff --git a/spaces/falterWliame/Face_Mask_Detection/Noteburner Itunes Drm Audio Converter Serial 47 ((HOT)).md b/spaces/falterWliame/Face_Mask_Detection/Noteburner Itunes Drm Audio Converter Serial 47 ((HOT)).md deleted file mode 100644 index 74a757d9f74a2535d45624ad75f5c42e7eada3e4..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Noteburner Itunes Drm Audio Converter Serial 47 ((HOT)).md +++ /dev/null @@ -1,6 +0,0 @@ -

      Noteburner Itunes Drm Audio Converter Serial 47


      Download →→→ https://urlca.com/2uDdEI



      -
      - 4d29de3e1b
      -
      -
      -

      diff --git a/spaces/falterWliame/Face_Mask_Detection/Principles Of Teaching 2 By Brenda Corpuz Pdf 417.md b/spaces/falterWliame/Face_Mask_Detection/Principles Of Teaching 2 By Brenda Corpuz Pdf 417.md deleted file mode 100644 index c6d7d57d2785853cb2c5a6807e9f56b5bb0d70af..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Principles Of Teaching 2 By Brenda Corpuz Pdf 417.md +++ /dev/null @@ -1,8 +0,0 @@ - -

      Economic reasons also drove this development. The rising costs ofteaching and decreasing number of qualified teachers had led to loss of.. challenges ofteaching, (2003: 16).... hd telugu movies download 720p
      f7225991cd

      -

      . To teach involves problem solving. Few tasks in basic school. Teachers are not freely in.. the teaching of teacher-centered instruction (Corpuz & Salandanan..... to promote the culture that should be taught (2003: 20).... hd telugu movies download 720p
      f7225991cd

      -

      principles of teaching 2 by brenda corpuz pdf 417


      DOWNLOADhttps://urlca.com/2uDcYe



      -

      51. Prioritize the elements in a lesson (2003: 41)......... The teaching of content andthe teaching of pedagogy are as different as the teaching of science. Theteacher's art is to learn to judge for the learner what.. the teaching of student centredpedagogy: approaches and trends, (2003: 41).... hd telugu movies download 720p
      f7225991cd

      -

      5. It is therefore essential to ask not only thewhat happened, but thewhy and whodid it in order to understand the forces that broughtabout a particular outcome. Principles of teaching 2 by brenda corpuz. Posted on Jun 30, 2015 / 6 views /no comments principles of teaching 2 by brenda corpuz The Principles of Environmental Justice, drafted at the First People of Color Environmental Leadership Summit in 1991, responded directly to the conditions of environmental racism. These principles are rooted in holistic vision, self-determination, repair and redress, and a core belief that all people have the right to a healthy environment that enriches life. The Principles reflect the need to center in policymaking decisions the communities most impacted by environmental risks and harms and too long marginalized from the decisions that have shaped their health, welfare, and well-being.

      899543212b
      -
      -
      \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Become a Detective with Criminal Case The Free Hidden Object Game You Can Download Today.md b/spaces/fatiXbelha/sd/Become a Detective with Criminal Case The Free Hidden Object Game You Can Download Today.md deleted file mode 100644 index 446a0ce33a4adeacda462ec5772652661466b6b7..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Become a Detective with Criminal Case The Free Hidden Object Game You Can Download Today.md +++ /dev/null @@ -1,183 +0,0 @@ - -

      Criminal Case Free Download: How to Play the Popular Facebook Game on Your Android Device

      -

      If you are a fan of hidden object games and murder mysteries, you have probably heard of Criminal Case, one of the most popular Facebook games with over 100 million players. But did you know that you can also play this game on your Android device for free? In this article, we will show you how to download, install, and play Criminal Case on your smartphone, and why you should give it a try.

      -

      What is Criminal Case?

      -

      Criminal Case is a puzzle game that follows the standard rules of the rest of the hidden object games and challenges you to solve a series of murder cases by using the different clues you can collect in the crime scene and the houses of the criminals you have to interrogate throughout the game.

      -

      criminal case free download


      Download - https://urllie.com/2uNyxH



      -

      The game was developed by Pretty Simple, a French studio, and was released on Facebook in 2012. Since then, it has become one of the most successful games on the social network, winning several awards and attracting millions of fans. In 2015, the game was also released for iOS and Android devices, allowing players to enjoy it on their mobile phones or tablets.

      -

      Criminal Case has several features that make it stand out from other hidden object games, such as:

      -
        -
      • A captivating storyline that takes you to different locations and scenarios, from a peaceful park to a haunted mansion.
      • -
      • A variety of characters that you can interact with, from suspects and witnesses to your fellow detectives and your loyal dog.
      • -
      • A realistic and detailed graphics that create an immersive atmosphere.
      • -
      • A dynamic gameplay that includes not only finding hidden objects, but also analyzing evidence, interrogating suspects, and making decisions that affect the outcome of the case.
      • -
      • A competitive ranking system that lets you compare your score with other players and earn rewards.
      • -
      • A social aspect that allows you to cooperate with your friends, send and receive gifts, and join a team.
      • -
      -

      How to Download Criminal Case for Free on Android

      -

      If you want to play Criminal Case on your Android device, you have two options: you can either download it from Uptodown or from Google Play Store. Both options are free and safe, but there are some differences between them.

      -

      Step-by-step guide to install the game from Uptodown

      -
        -
      1. Go to https://criminal-case.en.uptodown.com/android on your browser.
      2. -
      3. Tap on the green "Download" button and wait for the APK file to be downloaded.
      4. -
      5. Once the download is complete, tap on the notification or go to your downloads folder and tap on the file.
      6. -
      7. If you see a warning message saying that your device is set to block installation of apps obtained from unknown sources, go to your settings and enable this option.
      8. -
      9. Follow the instructions on the screen and tap on "Install" when prompted.
      10. -
      11. Wait for the installation process to finish and then tap on "Open" to launch the game.
      12. -
      -

      Step-by-step guide to install the game from Google Play Store

      -
        -
      1. Go to https://play.google.com/store/apps/details?id=com.prettys ygames.criminalcaseandroid&hl=en_US&gl=US on your browser or open the Google Play Store app on your device.
      2. -
      3. Tap on the green "Install" button and wait for the game to be downloaded and installed.
      4. -
      5. Once the installation is complete, tap on "Open" to launch the game.
      6. -
      -

      How to link your progress with your Facebook account

      -

      If you have already played Criminal Case on Facebook and you want to continue your progress on your Android device, you can easily link your accounts and sync your data. To do this, you need to:

      -
        -
      1. Open the game on your Android device and tap on the gear icon on the top right corner of the screen.
      2. -
      3. Tap on "Connect" and then on "Facebook".
      4. -
      5. Log in with your Facebook credentials and grant the necessary permissions.
      6. -
      7. Wait for the game to load your data and confirm that you want to continue with your existing profile.
      8. -
      9. Enjoy playing Criminal Case on your Android device with all your progress, friends, and rewards.
      10. -
      -

      How to Play Criminal Case on Android

      -

      Playing Criminal Case on Android is very similar to playing it on Facebook, but there are some differences in the interface and the controls. Here are some of the basics you need to know to enjoy the game on your smartphone.

      -

      The basics of the gameplay and the controls

      -

      The main objective of the game is to solve a series of murder cases by finding clues, analyzing evidence, interrogating suspects, and arresting the killer. Each case has several scenes that you need to explore by tapping on the objects that are listed at the bottom of the screen. The faster you find them, the higher your score will be. You can also use hints, tools, and boosters to help you in your investigation.

      -

      To navigate through the game, you can use the menu bar at the bottom of the screen, which has five icons: Home, Cases, Team, Shop, and More. You can also swipe left or right to access different sections of the game, such as your profile, your inbox, your achievements, and your settings.

      -

      criminal case game download for android
      -criminal case apk free download
      -criminal case app download
      -criminal case offline game free download
      -criminal case mod apk download
      -criminal case game download for pc
      -criminal case mystery of the past free download
      -criminal case game free download for windows 10
      -criminal case save the world free download
      -criminal case pacific bay free download
      -criminal case game free download full version
      -criminal case hidden object game free download
      -criminal case hack apk free download
      -criminal case game free download for laptop
      -criminal case unlimited energy apk free download
      -criminal case game online play free without downloading
      -criminal case the conspiracy free download
      -criminal case travel in time free download
      -criminal case game free download for windows 7
      -criminal case mod apk unlimited energy and money free download
      -criminal case game free download for android mobile
      -criminal case facebook game free download
      -criminal case game free download for mac
      -criminal case unlimited stars apk free download
      -criminal case game free online no download
      -criminal case adventure game free download
      -criminal case mod apk latest version free download
      -criminal case game setup free download for pc
      -criminal case hidden crimes free download
      -criminal case world edition game free download for pc
      -criminal case mod apk offline free download
      -criminal case game cheats free download
      -criminal case hack tool free download no survey no password
      -criminal case supernatural investigations free download
      -criminal case mod apk unlimited everything free download
      -criminal case game for pc windows 10 free download
      -criminal case hack online generator tool activation key free download
      -criminal case mysteries of the past mod apk free download
      -criminal case pacific bay mod apk unlimited energy and money free download
      -how to play criminal case game offline without internet connection for free on android devices
      -where can i find the best site to download the latest version of the popular hidden object adventure game Criminal Case for my smartphone or tablet device
      -what are some tips and tricks to solve the murder cases faster and easier in the addictive detective-themed puzzle game Criminal Case
      -how to get unlimited coins and cash in Criminal Case without spending any real money or using any third-party apps or tools
      -how to join a team and play with other players from around the world in Criminal Case and earn more rewards and bonuses
      -how to unlock new crime scenes and episodes in Criminal Case and explore different locations and themes
      -how to collect and analyze clues and evidence in Criminal Case and interrogate suspects and witnesses
      -how to feed and train your loyal dog companion in Criminal Case and use its special abilities to help you in your investigations
      -how to connect your Criminal Case account to Facebook and sync your progress across multiple devices
      -how to earn more stars and energy in Criminal Case and use them wisely to advance in the game
      -how to contact the customer support team of Pretty Simple, the developer of Criminal Case, if you have any questions or issues with the game

      -

      The different types of scenes and minigames

      -

      Criminal Case has several types of scenes that you need to complete in order to advance in the case. The most common ones are:

      -
        -
      • Hidden Object Scenes: These are the scenes where you need to find a list of objects in a cluttered environment. You can zoom in and out by pinching the screen, and tap on an object to select it. Some objects are hidden inside containers or behind other objects, so you need to tap on them first to reveal them. You can also use hints by tapping on the light bulb icon at the bottom right corner of the screen.
      • -
      • Puzzle Scenes: These are scenes where you need to solve a puzzle by rearranging pieces, connecting wires, matching symbols, or finding differences. You can drag and drop pieces by tapping and holding them, and rotate them by tapping twice. You can also use hints by tapping on the light bulb icon at the bottom right corner of the screen.
      • -
      • Action Scenes: These are scenes where you need to perform an action by tapping, swiping, or tilting your device. For example, you may need to chase a suspect, shoot a target, or dodge an obstacle. You can also use tools by tapping on their icons at the bottom of the screen.
      • -
      -

      Besides these scenes, there are also some minigames that you need to play in order to analyze evidence or interrogate suspects. These minigames include:

      -
        -
      • Evidence Analysis: These are minigames where you need to compare fingerprints, DNA samples, blood types, or other clues by tapping on them or dragging them to their corresponding slots. You can also use hints by tapping on the light bulb icon at the bottom right corner of the screen.
      • -
      • Suspect Interrogation: These are minigames where you need to question a suspect by choosing one of three options: Good Cop, Bad Cop, or Accuse. Depending on your choice, you will get different reactions from the suspect and different information about the case. You can also use tools by tapping on their icons at the bottom of the screen.
      • -
      -

      The rewards and the ranking system

      -

      As you play Criminal Case on Android, you will earn various rewards that will help you in your investigation. Some of these rewards are:

      -
        -
      • Stars: These are the main currency of the game that you can use to unlock new scenes, analyze evidence, interrogate suspects, or buy items from the shop. You can earn stars by completing scenes or minigames, or by exchanging cards with your friends.
      • -
      • Coins: These are the secondary currency of the game that you can use to buy hints, tools, boosters, or outfits from the shop. You can earn coins by completing scenes or minigames, or by receiving gifts from your friends.
      • -
      • Energy: This is the resource that you need to play scenes or minigames. You have a maximum of 110 energy points, and each scene or minigame costs 20 energy points to play. You can replenish your energy by waiting for it to regenerate over time, by eating snacks, by receiving gifts from your friends, or by buying energy packs from the shop.
      • -
      • Lucky Cards: These are cards that you can collect by playing scenes or minigames, or by receiving gifts from your friends. You can exchange these cards for stars, coins, energy, hints, tools, boosters, or outfits.
      • -
      • Achievements: These are goals that you can complete by playing the game and earning rewards. You can view your achievements by tapping on the trophy icon on the top left corner of the screen.
      • -
      -

      Besides these rewards, you can also improve your ranking by earning points and badges. You can earn points by completing scenes or minigames, and you can earn badges by solving cases, arresting killers, or reaching milestones. You can view your ranking by tapping on the badge icon on the top left corner of the screen.

      -

      The tips and tricks to solve the cases faster and easier

      -

      Criminal Case is a fun and challenging game that requires attention, memory, and logic skills. However, there are some tips and tricks that can help you solve the cases faster and easier. Here are some of them:

      -
        -
      • Play regularly and frequently. The more you play, the more familiar you will be with the scenes and the clues, and the faster you will find them.
      • -
      • Use hints, tools, and boosters wisely. These are helpful items that can make your investigation easier, but they are also limited and costly. Use them only when you really need them, and save them for the harder scenes or minigames.
      • -
      • Play with your friends. Criminal Case is a social game that allows you to cooperate with your friends, send and receive gifts, and join a team. Playing with your friends can help you earn more rewards, get more clues, and have more fun.
      • -
      • Follow the official pages and groups of Criminal Case. These are sources of information and updates about the game, such as new cases, events, features, or tips. You can also interact with other players and developers there.
      • -
      -

      Why You Should Play Criminal Case on Android

      -

      Criminal Case is a game that has many advantages and benefits for its players. Here are some of the reasons why you should play Criminal Case on Android:

      -

      The benefits of playing the game on your smartphone

      -

      Playing Criminal Case on your Android device has some benefits that you cannot get from playing it on Facebook, such as:

      -
        -
      • You can play it anytime and anywhere. You don't need a computer or a browser to access the game. You just need your smartphone and an internet connection.
      • -
      • You can enjoy a better graphics and sound quality. The game is optimized for mobile devices, so it has a higher resolution and a smoother performance.
      • -
      • You can use touch controls instead of mouse clicks. This makes the gameplay more intuitive and interactive.
      • -
      -

      The challenges and the fun of solving murder mysteries

      -

      Criminal Case is a game that offers a lot of challenges and fun for its players. Some of these are:

      -
        -
      • You can test your skills and knowledge in different fields, such as forensics, psychology, history, geography, or culture.
      • -
      • You can immerse yourself in a captivating storyline that has twists and turns, suspense and drama, humor and romance.
      • -
      • You can explore different locations and scenarios that have their own atmosphere and style.
      • -
      • You can meet different characters that have their own personality and backstory.
      • -
      -

      The social aspect of the game and how to interact with your friends

      -

      Criminal Case is a game that has a strong social aspect that allows you to interact with your friends in different ways. Some of these are:

      -
        -
      • You can cooperate with your friends by sending and receiving gifts, such as energy, coins, stars, lucky cards, hints , tools, or boosters. You can also exchange lucky cards with them to earn more rewards.
      • -
      • You can compete with your friends by comparing your scores and ranks, and challenging them to beat your records. You can also join a team and participate in team events and tournaments.
      • -
      • You can chat with your friends by sending them messages, stickers, or emojis. You can also comment on their posts, like their achievements, or invite them to play new cases.
      • -
      -

      Conclusion

      -

      Criminal Case is a game that has something for everyone. Whether you are looking for a challenge, a fun, or a social experience, you can find it in this game. You can also play it on your Android device for free, and enjoy its features and benefits. So what are you waiting for? Download Criminal Case today and join the millions of players who are already hooked on this game. You won't regret it!

      -

      FAQs

      -

      Here are some of the frequently asked questions about Criminal Case:

      - - - - - - - - - - - - - - - - - - - - - - - - - -
      QuestionAnswer
      How many cases are there in Criminal Case?There are currently six seasons in Criminal Case, each with a different theme and location. The first season has 56 cases, the second season has 60 cases, the third season has 59 cases, the fourth season has 56 cases, the fifth season has 60 cases, and the sixth season has 30 cases so far. The developers are constantly adding new cases and seasons to the game.
      How can I get more energy in Criminal Case?You can get more energy in Criminal Case by waiting for it to regenerate over time, by eating snacks, by receiving gifts from your friends, or by buying energy packs from the shop. You can also get free energy by watching ads or completing offers.
      How can I get more stars in Criminal Case?You can get more stars in Criminal Case by completing scenes or minigames, or by exchanging cards with your friends. You can also get free stars by watching ads or completing offers.
      How can I get more coins in Criminal Case?You can get more coins in Criminal Case by completing scenes or minigames, or by receiving gifts from your friends. You can also get free coins by watching ads or completing offers.
      How can I change my avatar or outfit in Criminal Case?You can change your avatar or outfit in Criminal Case by tapping on the gear icon on the top right corner of the screen, and then tapping on "Profile". You can choose from different avatars and outfits that you can buy with coins or stars.

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Download Rogue Agents MOD APK and Enjoy Unlimited Money and All Characters.md b/spaces/fatiXbelha/sd/Download Rogue Agents MOD APK and Enjoy Unlimited Money and All Characters.md deleted file mode 100644 index 24d42cd92cac1953773e18e8a091e3ce1b294aa2..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Download Rogue Agents MOD APK and Enjoy Unlimited Money and All Characters.md +++ /dev/null @@ -1,156 +0,0 @@ -
      -

      Rogue Agents Mod APK Unlimited Money: A Guide for Gamers

      -

      If you are looking for a thrilling and action-packed game that will keep you on the edge of your seat, then you should try Rogue Agents. This is a third-person cover-based shooter game that pits you against other players in various modes and maps. You can customize your character, choose your weapons, and use different skills and gadgets to gain an advantage over your enemies. But what if you want to enjoy the game without any limitations or restrictions? Well, there is a way to do that. You can use Rogue Agents Mod APK Unlimited Money, a modified version of the game that gives you unlimited money and free shopping. In this article, we will tell you everything you need to know about this mod apk, how to download and install it, and some tips and tricks to improve your game.

      -

      rogue agents mod apk unlimited money


      Download Zip === https://urllie.com/2uNIqW



      -

      What is Rogue Agents?

      -

      A third-person cover-based shooter game

      -

      Rogue Agents is a game developed by Midnight Games, a studio based in Brazil. It is available for both Android and iOS devices, and it has been downloaded over 5 million times on Google Play Store. The game is inspired by popular titles like Tom Clancy's The Division and Splinter Cell. The game has an open beta status, which means that it is still in development and may have some bugs and glitches. However, the game is updated regularly with new features and improvements.

      -

      Features of the game

      -

      Some of the features of Rogue Agents are:

      -
        -
      • Multiple game modes: You can play solo or with your friends in different modes like Team Deathmatch, Free for All, Capture the Flag, King of the Hill, and more.
      • -
      • Various maps: You can explore different environments like urban areas, industrial zones, military bases, and more.
      • -
      • Character customization: You can choose from different characters with different abilities and outfits. You can also unlock new skins and accessories as you progress in the game.
      • -
      • Weapon selection: You can equip yourself with different weapons like assault rifles, sniper rifles, shotguns, pistols, grenades, and more. You can also upgrade your weapons with attachments like scopes, silencers, magazines, etc.
      • -
      • Skill system: You can use different skills and gadgets to enhance your performance in the game. For example, you can use a drone to scout the area, a shield to protect yourself, a medkit to heal yourself or your teammates, etc.
      • -
      • Graphics and sound: The game has realistic graphics and sound effects that create an immersive gaming experience.
      • -
      -

      What is Rogue Agents Mod APK Unlimited Money?

      -

      A modified version of the game that gives unlimited money and free shopping

      -

      Rogue Agents Mod APK Unlimited Money is a modified version of the original game that gives you unlimited money and free shopping. This means that you can buy anything you want in the game without spending any real money. You can also upgrade your weapons and skills without any limitations. This way, you can enjoy the game without any hassle or frustration.

      -

      Benefits of using the mod apk

      -

      Some of the benefits of using Rogue Agents Mod APK Unlimited Money are:

      -
        -
      • You can unlock all the characters, weapons, skins, accessories, and skills in the game.
      • -
      • You can customize your character according to your preference and style.
      • -
      • You can have an edge over your opponents in the game.
      • -
      • You can have more fun and excitement in the game.
      • -
      -

      How to download and install Rogue Agents Mod APK Unlimited Money?

      -

      Steps to download and install the mod apk

      -

      To download and install Rogue Agents Mod APK Unlimited Money, you need to follow these steps

      Here are the steps to download and install Rogue Agents Mod APK Unlimited Money on your Android device:

      -

      rogue agents mod apk unlimited money and gems
      -rogue agents mod apk unlimited money download
      -rogue agents mod apk unlimited money latest version
      -rogue agents mod apk unlimited money and gold
      -rogue agents mod apk unlimited money and diamonds
      -rogue agents mod apk unlimited money free
      -rogue agents mod apk unlimited money hack
      -rogue agents mod apk unlimited money offline
      -rogue agents mod apk unlimited money and coins
      -rogue agents mod apk unlimited money no root
      -rogue agents mod apk unlimited money and ammo
      -rogue agents mod apk unlimited money android 1
      -rogue agents mod apk unlimited money 2023
      -rogue agents mod apk unlimited money and energy
      -rogue agents mod apk unlimited money and weapons
      -rogue agents mod apk unlimited money and skins
      -rogue agents mod apk unlimited money and health
      -rogue agents mod apk unlimited money and premium
      -rogue agents mod apk unlimited money and vip
      -rogue agents mod apk unlimited money and keys
      -rogue agents mod apk unlimited money and credits
      -rogue agents mod apk unlimited money and unlock all
      -rogue agents mod apk unlimited money and one shot kill
      -rogue agents mod apk unlimited money and rapid fire
      -rogue agents mod apk unlimited money and god mode
      -rogue agents mod apk unlimited money and anti ban
      -rogue agents mod apk unlimited money and no ads
      -rogue agents mod apk unlimited money and all levels unlocked
      -rogue agents mod apk unlimited money and all characters unlocked
      -rogue agents mod apk unlimited money and all missions unlocked
      -rogue agents mod apk unlimited money and all modes unlocked
      -rogue agents mod apk unlimited money and all maps unlocked
      -rogue agents mod apk unlimited money and all items unlocked
      -rogue agents mod apk unlimited money and all upgrades unlocked
      -rogue agents mod apk unlimited money and all features unlocked
      -rogue agents mod apk unlimited money and high damage
      -rogue agents mod apk unlimited money and infinite bullets
      -rogue agents mod apk unlimited money and auto aim
      -rogue agents mod apk unlimited money and wall hack
      -rogue agents mod apk unlimited money and speed hack

      -
        -
      1. Download the mod apk file from a reliable source. You can use the link below to download it directly. [Download Rogue Agents Mod APK Unlimited Money](^1^)
      2. -
      3. Go to your device settings and tap on Security. Then, enable the option to install unknown apps from your browser. This will allow you to install the mod apk file.
      4. -
      5. Open your file manager app and locate the downloaded mod apk file. Tap on it and follow the instructions to install it.
      6. -
      7. If the game requires an OBB file, you will need to download it separately and copy it to the Android/OBB folder on your device. You can use the link below to download the OBB file. [Download Rogue Agents OBB File](^2^)
      8. -
      9. Once the installation is complete, you can launch the game and enjoy unlimited money and free shopping.
      10. -
      -

      Tips and tricks to improve your game

      -

      Here are some tips and tricks to help you play better and have more fun in Rogue Agents:

      -
        -
      • Use cover wisely. The game is based on cover-based shooting, so you need to use cover to avoid enemy fire and plan your attacks. You can also switch between different covers by tapping on them.
      • -
      • Use your skills and gadgets strategically. You have a limited number of skills and gadgets that you can use in each match, so you need to use them wisely. For example, you can use a drone to scout the area, a shield to protect yourself, a medkit to heal yourself or your teammates, etc.
      • -
      • Choose your weapons carefully. You have a primary weapon, a secondary weapon, and a melee weapon that you can use in the game. You can also upgrade your weapons with attachments like scopes, silencers, magazines, etc. You need to choose your weapons according to your playstyle and the map.
      • -
      • Customize your character. You can choose from different characters with different abilities and outfits. You can also unlock new skins and accessories as you progress in the game. You can customize your character according to your preference and style.
      • -
      • Play with your friends. You can invite your friends to join your team or create a private room to play with them. You can also chat with them using voice or text messages. Playing with your friends can make the game more fun and challenging.
      • -
      -

      Conclusion

      -

      Rogue Agents is a thrilling and action-packed game that will keep you on the edge of your seat. You can play solo or with your friends in different modes and maps. You can customize your character, choose your weapons, and use different skills and gadgets to gain an advantage over your enemies. But if you want to enjoy the game without any limitations or restrictions, you can use Rogue Agents Mod APK Unlimited Money, a modified version of the game that gives you unlimited money and free shopping. You can download and install it easily by following the steps above. So, what are you waiting for? Download Rogue Agents Mod APK Unlimited Money now and have fun!

      -

      FAQs

      -

      Here are some frequently asked questions about Rogue Agents Mod APK Unlimited Money:

      - - - - - - -I have finished writing the article. Here is the final version of the article with HTML formatting:

      -

      Rogue Agents Mod APK Unlimited Money: A Guide for Gamers

      -

      If you are looking for a thrilling and action-packed game that will keep you on the edge of your seat, then you should try Rogue Agents. This is a third-person cover-based shooter game that pits you against other players in various modes and maps. You can customize your character, choose your weapons, and use different skills and gadgets to gain an advantage over your enemies. But what if you want to enjoy the game without any limitations or restrictions? Well, there is a way to do that. You can use Rogue Agents Mod APK Unlimited Money, a modified version of the game that gives you unlimited money and free shopping. In this article, we will tell you everything you need to know about this mod apk, how to download and install it, and some tips and tricks to improve your game.

      -

      What is Rogue Agents?

      -

      A third-person cover-based shooter game

      -

      Rogue Agents is a game developed by Midnight Games, a studio based in Brazil. It is available for both Android and iOS devices, and it has been downloaded over 5 million times on Google Play Store. The game is inspired by popular titles like Tom Clancy's The Division and Splinter Cell. The game has an open beta status, which means that it is still in development and may have some bugs and glitches. However, the game is updated regularly with new features and improvements.

      -

      Features of the game

      -

      Some of the features of Rogue Agents are:

      -
        -
      • Multiple game modes: You can play solo or with your friends in different modes like Team Deathmatch, Free for All, Capture the Flag, King of the Hill, and more.
      • -
      • Various maps: You can explore different environments like urban areas, industrial zones, military bases, and more.
      • -
      • Character customization: You can choose from different characters with different abilities and outfits. You can also unlock new skins and accessories as you progress in the game.
      • -
      • Weapon selection: You can equip yourself with different weapons like assault rifles, sniper rifles, shotguns, pistols, grenades, and more. You can also upgrade your weapons with attachments like scopes, silencers, magazines, etc.
      • -
      • Skill system: You can use different skills and gadgets to enhance your performance in the game. For example, you can use a drone to scout the area, a shield to protect yourself, a medkit to heal yourself or your teammates, etc.
      • -
      • Graphics and sound: The game has realistic graphics and sound effects that create an immersive gaming experience.
      • -
      -

      What is Rogue Agents Mod APK Unlimited Money?

      -

      A modified version of the game that gives unlimited money and free shopping

      -

      Rogue Agents Mod APK Unlimited Money is a modified version of the original game that gives you unlimited money and free shopping. This means that you can buy anything you want in the game without spending any real money. You can also upgrade your weapons and skills without any limitations. This way, you can enjoy the game without any hassle or frustration.

      -

      Benefits of using the mod apk

      -

      Some of the benefits of using Rogue Agents Mod APK Unlimited Money are:

      -
        -
      • You can unlock all the characters, weapons, skins, accessories, and skills in the game.
      • -
      • You can customize your character according to your preference and style.
      • -
      • You can have an edge over your opponents in the game.
      • -
      • You can have more fun and excitement in the game.
      • -
      -

      How to download and install Rogue Agents Mod APK Unlimited Money?

      -

      Steps to download and install the mod apk

      -

      To download and install Rogue Agents Mod APK Unlimited Money, you need to follow these steps:

      -
        -
      1. Download the mod apk file from a reliable source. You can use the link below to download it directly. [Download Rogue Agents Mod APK Unlimited Money]
      2. -
      3. Go to your device settings and tap on Security. Then, enable the option to install unknown apps from your browser. This will allow you to install the mod apk file.
      4. -
      5. Open your file manager app and locate the downloaded mod apk file. Tap on it and follow the instructions to install it.
      6. -
      7. If the game requires an OBB file, you will need to download it separately and copy it to the Android/OBB folder on your device. You can use the link below to download the OBB file. [Download Rogue Agents OBB File]
      8. -
      9. Once the installation is complete, you can launch the game and enjoy unlimited money and free shopping.
      10. -
      -

      Tips and tricks to improve your game

      -

      Here are some tips and tricks to help you play better and have more fun in Rogue Agents:

      -
        -
      • Use cover wisely. The game is based on cover-based shooting, so you need to use cover to avoid enemy fire and plan your attacks. You can also switch between different covers by tapping on them.
      • -
      • Use your skills and gadgets strategically. You have a limited number of skills and gadgets that you can use in each match, so you need to use them wisely. For example, you can use a drone to scout the area, a shield to protect yourself, a medkit to heal yourself or your teammates, etc.
      • -
      • Choose your weapons carefully. You have a primary weapon, a secondary weapon, and a melee weapon that you can use in the game. You can also upgrade your weapons with attachments like scopes, silencers, magazines, etc. You need to choose your weapons according to your playstyle and the map.
      • -
      • Customize your character. You can choose from different characters with different abilities and outfits. You can also unlock new skins and accessories as you progress in the game. You can customize your character according to your preference and style.
      • -
      • Play with your friends. You can invite your friends to join your team or create a private room to play with them. You can also chat with them using voice or text messages. Playing with your friends can make the game more fun and challenging.
      • -
      -

      Conclusion

      -

      Rogue Agents is a thrilling and action-packed game that will keep you on the edge of your seat. You can play solo or with your friends in different modes and maps. You can customize your character, choose your weapons, and use different skills and gadgets to gain an advantage over your enemies. But if you want to enjoy the game without any limitations or restrictions, you can use Rogue Agents Mod APK Unlimited Money, a modified version of the game that gives you unlimited money and free shopping. You can download and install it easily by following the steps above. So, what are you waiting for? Download Rogue Agents Mod APK Unlimited Money now and have fun!

      -

      FAQs

      -

      Here are some frequently asked questions about Rogue Agents Mod APK Unlimited Money:

      -
      QuestionAnswer
      Is Rogue Agents Mod APK Unlimited Money safe to use?Yes, it is safe to use as long as you download it from a trusted source. However, you should always be careful when installing unknown apps on your device.
      Do I need to root my device to use Rogue Agents Mod APK Unlimited Money?No, you do not need to root your device to use Rogue Agents Mod APK Unlimited Money. You just need to enable the option to install unknown apps from your browser.
      Will I get banned for using Rogue Agents Mod APK Unlimited Money?No, you will not get banned for using Rogue Agents Mod APK Unlimited Money. The mod apk does not interfere with the game servers or other players' accounts.
      Can I update Rogue Agents Mod APK Unlimited Money?No, you cannot update Rogue Agents Mod APK Unlimited Money. If you want to update the game, you will need to uninstall the mod apk and install the original game from Google Play Store.
      Can I play online with other players using Rogue Agents Mod APK Unlimited Money?Yes, you can play online with other players using Rogue Agents Mod APK Unlimited Money. However, you may encounter some compatibility issues or glitches if the other players are using a different version of the game.
      - - - - - -

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/FNF vs Pou Rap Your Way to Victory on Android.md b/spaces/fatiXbelha/sd/FNF vs Pou Rap Your Way to Victory on Android.md deleted file mode 100644 index 7c3062cfe9f9b156616a51be83e028942315b441..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/FNF vs Pou Rap Your Way to Victory on Android.md +++ /dev/null @@ -1,126 +0,0 @@ - -

      Fnf vs Pou Apk: A Fun and Funky Musical Game

      -

      If you are looking for a new and exciting musical game to play on your Android device, you might want to check out fnf vs pou apk. This is a fan-made mod of the popular rhythm game Friday Night Funkin' (FNF), where you have to compete in freestyle music battles against various opponents. In this mod, you will face Pou, the cute and cuddly alien pet from the famous mobile app game. Are you ready to funk with Pou?

      -

      What is fnf vs pou apk?

      -

      Fnf vs pou apk is a mod of Friday Night Funkin', a game created by ninjamuffin99, PhantomArcade3K, Evilsk8r, and Kawai Sprite. The game features Boyfriend, a blue-haired rapper who wants to impress his Girlfriend by winning musical duels against her parents and other characters. The game is inspired by rhythm games like Dance Dance Revolution and PaRappa the Rapper, as well as Newgrounds culture and 90s hip hop.

      -

      fnf vs pou apk


      Download File 🗸🗸🗸 https://urllie.com/2uNzB1



      -

      In fnf vs pou apk, Boyfriend will have to face Pou, a brown blob-like creature who used to be one of the most popular virtual pets on the internet. Pou has grown up and has challenged Boyfriend to a musical battle, so you have to help him defeat this brown poop. The mod features four original songs: Sky Jump, Food Drop, Memory Hill, and Drive Murder Kill Pou. The mod was developed by Leunam, who is also the author, musician, programmer, and artist of the project.

      -

      Why is fnf vs pou apk popular?

      -

      Fnf vs pou apk is popular because it combines two well-known and beloved games: Friday Night Funkin' and Pou. Both games have millions of fans around the world, who enjoy their simple yet addictive gameplay and charming graphics. Fnf vs pou apk offers a fresh and funny twist on the original fnf game, with Pou as a hilarious and unexpected antagonist. The mod also has catchy and upbeat songs that will make you want to dance along.

      -

      Another reason why fnf vs pou apk is popular is that it is easy to play and accessible to anyone. You don't need to download or install anything to play the game, as it is available online on various websites. You can also play it on any device that supports HTML5 technology, such as your browser, phone, tablet, or laptop. You can also choose between different modes and difficulty levels, depending on your preference and skill.

      -

      What are the features of fnf vs pou apk?

      -

      Fnf vs pou apk has many features that make it an enjoyable and entertaining game. Some of these features are:

      -
        -
      • A unique and original storyline that pits Boyfriend against Pou in a musical showdown.
      • -
      • Four new songs that are catchy, funky, and fun to play.
      • -
      • A variety of modes to choose from: story mode, free play mode, or practice mode.
      • -
      • A range of difficulty levels to suit your ability: easy, normal, hard, or extreme.
      • -
      • A colorful and cartoonish graphics style that matches the tone of the game.
      • -
      • A simple and intuitive gameplay that requires only four arrow keys to play.
      • -
      • A lively and humorous dialogue between Boyfriend and Pou that adds personality to the game.
      • -
      -

      How to play fnf vs pou apk?

      -

      Playing fnf vs pou apk is very easy and straightforward. Here are the steps you need to follow:

      -
      1. Download and install the game from a reliable source. You can find the link to the official website of the mod in the references section below. Alternatively, you can play the game online on various websites that host fnf mods, such as GameBanana or Newgrounds. Make sure you have a stable internet connection and a compatible device.
      2. -
      3. Choose a mode and a character. You can play the game in story mode, where you will follow the plot and face Pou in four different songs. You can also play in free play mode, where you can choose any song and any character to play as. You can also practice your skills in practice mode, where you can adjust the speed and the notes of the song. You can play as Boyfriend or Pou, depending on your preference.
      4. -
      5. Press the arrow keys in rhythm with the music. Once the game starts, you will see a set of arrows on the screen that correspond to the four arrow keys on your keyboard. You have to press the right key at the right time, following the beat and the melody of the song. The more accurate you are, the more points you will score and the more likely you will win. If you miss too many notes or press the wrong keys, you will lose your health and eventually lose the game.
      6. -
      7. Win the musical battles against Pou. To win each song, you have to fill up your progress bar more than Pou's by the end of the song. You can do this by hitting more notes than him and avoiding mistakes. You can also use some special moves, such as holding down a key for a long note or pressing multiple keys at once for a combo. Be careful, though, as Pou will also try to distract you with his cute and funny expressions and actions.
      8. -
      -

      Tips and tricks for fnf vs pou apk

      -

      If you want to improve your performance and enjoy fnf vs pou apk more, here are some tips and tricks that you can use:

      -

      fnf vs pou mod download
      -fnf vs pou game online
      -fnf vs pou android apk
      -fnf vs pou full week
      -fnf vs pou music
      -fnf vs pou unblocked
      -fnf vs pou free play
      -fnf vs pou hard mode
      -fnf vs pou itch.io
      -fnf vs pou song lyrics
      -fnf vs pou rap battle
      -fnf vs pou fan art
      -fnf vs pou remix
      -fnf vs pou gameplay
      -fnf vs pou chart editor
      -fnf vs pou custom skin
      -fnf vs pou animation
      -fnf vs pou reaction
      -fnf vs pou friday night funkin
      -fnf vs pou kade engine
      -fnf vs pou mobile version
      -fnf vs pou new update
      -fnf vs pou cheats
      -fnf vs pou tips and tricks
      -fnf vs pou wiki
      -fnf vs pou story mode
      -fnf vs pou characters
      -fnf vs pou sprites
      -fnf vs pou background
      -fnf vs pou voice acting
      -fnf vs pou memes
      -fnf vs pou ost download
      -fnf vs pou scratch project
      -fnf vs pou roblox game
      -fnf vs pou discord server
      -fnf vs pou youtube video
      -fnf vs pou review and rating
      -fnf vs pou mod apk latest version
      -fnf vs pou how to install on pc
      -fnf vs pou no ads apk
      -fnf vs pou hacked apk unlimited money and health
      -fnf vs pou best strategy and guide
      -fnf vs pou all endings and secrets
      -fnf vs pou crossover with other games or characters
      -fnf vs pou fan made sequel or prequel
      -fnf vs pou alternative download link or mirror site
      -fnf vs pou compatible devices and requirements

      -
        -
      • Practice in free play mode. This mode allows you to play any song and any character without any pressure or consequences. You can use this mode to familiarize yourself with the songs, the characters, and the gameplay. You can also experiment with different difficulty levels and settings to find what suits you best.
      • -
      • Adjust the difficulty level. The game has four difficulty levels: easy, normal, hard, and extreme. Each level has a different number of notes, speed, and complexity. You can choose the level that matches your skill and comfort level. If you find a level too easy or too hard, you can always change it in the options menu.
      • -
      • Collect coins and unlock items. As you play fnf vs pou apk, you will earn coins that you can use to buy items from the shop. These items include different outfits, accessories, backgrounds, and music tracks for your Pou and your Boyfriend. You can use these items to customize your characters and make them look more stylish and cool.
      • -
      • Customize your Pou and your Boyfriend. Besides buying items from the shop, you can also customize your Pou and your Boyfriend by changing their colors, eyes, mouths, hairstyles, hats, glasses, etc. You can access these options by tapping on your character in the main menu or in free play mode. You can create your own unique look for your Pou and your Boyfriend and show them off to your friends.
      • -
      -

      Comparison of fnf vs pou apk with other games

      -

      Fnf vs pou apk is not the only game that you can play if you like musical games or virtual pets. There are many other games that are similar or related to fnf vs pou apk in some way. Here is a comparison of fnf vs pou apk with some of these games:

      -
      QuestionAnswer
      Is Rogue Agents Mod APK Unlimited Money safe to use?Yes, it is safe to use as long as you download it from a trusted source. However, you should always be careful when installing unknown apps on your device.
      Do I need to root my device to use Rogue Agents Mod APK Unlimited Money?No, you do not need to root your device to use Rogue Agents Mod APK Unlimited Money. You just need to enable the option to install unknown apps from your browser.
      Will I get banned for using Rogue Agents Mod APK Unlimited Money?No, you will not get banned for using Rogue Agents Mod APK Unlimited Money. The mod apk does not interfere with the game servers or other players' accounts.
      Can I update Rogue Agents Mod APK Unlimited Money?No, you cannot update Rogue Agents Mod APK Unlimited Money. If you want to update the game, you will need to uninstall the mod apk and install the original game from Google Play Store.
      Can I play online with other players using Rogue Agents Mod APK Unlimited Money?Yes, you can play online with other players using Rogue Agents Mod APK Unlimited Money. However, you may encounter some compatibility issues or glitches if the other players are using a different version of the game.
      - - - - - - - - - - - - - - - - - - - - -
      GameSimilaritiesDifferences
      Fnf vs pou apk vs original fnf game- Both are rhythm games that feature Boyfriend as the main character.
      - Both have colorful graphics and catchy songs.
      - Both have story mode, free play mode, and practice mode.
      - Fnf vs pou apk has Pou as an opponent, while fnf has various characters from Newgrounds.
      - Fnf vs pou apk has four original songs, while fnf has dozens of songs from different genres.
      - Fnf vs pou apk has coins and items to collect and customize your characters, while fnf does not.
      Fnf vs pou apk vs other fnf mods- Both are fan-made modifications of fnf that add new content to the game.
      - Both have new songs, characters, graphics, and dialogue.
      - Both are - Both are playable online or offline on various devices.
      - Fnf vs pou apk has Pou as an opponent, while other fnf mods have different characters from various media and genres.
      - Fnf vs pou apk has a humorous and lighthearted tone, while other fnf mods have different tones, such as horror, romance, or parody.
      - Fnf vs pou apk has coins and items to collect and customize your characters, while other fnf mods may not have this feature.
      Fnf vs pou apk vs pou game- Both feature Pou as a main character.
      - Both have cute and cartoonish graphics.
      - Both have coins and items to collect and customize your Pou.
      - Fnf vs pou apk is a rhythm game, while pou game is a virtual pet game.
      - Fnf vs pou apk has Boyfriend as an opponent, while pou game has no opponents.
      - Fnf vs pou apk has musical battles, while pou game has mini-games and activities.
      -

      Conclusion

      -

      Fnf vs pou apk is a fun and funky musical game that you can play on your Android device. It is a mod of Friday Night Funkin', where you have to help Boyfriend win musical battles against Pou, the adorable alien pet. The game has four original songs, a variety of modes and difficulty levels, and a lot of coins and items to collect and customize your characters. The game also has a unique and humorous storyline, colorful graphics, and simple gameplay. If you like musical games or virtual pets, you should definitely give fnf vs pou apk a try. You will have a blast with Pou!

      -

      Do you want to download fnf vs pou apk and play it right now? You can find the link to the official website of the mod in the references section below. You can also play the game online on various websites that host fnf mods. Just make sure you have a compatible device and a good internet connection. Then, you can start funking with Pou!

      -

      FAQs

      -

      Here are some frequently asked questions about fnf vs pou apk:

      -
        -
      1. Is fnf vs pou apk safe to download and play?
      2. -

        Yes, fnf vs pou apk is safe to download and play, as long as you get it from a reliable source. The official website of the mod is , where you can find the latest version of the game. You can also play the game online on websites that host fnf mods, such as GameBanana or Newgrounds. However, you should always be careful when downloading or playing any game online, as there may be some risks of malware or viruses.

        -
      3. How can I update fnf vs pou apk?
      4. -

        You can update fnf vs pou apk by visiting the official website of the mod and downloading the latest version of the game. You can also check for updates on the mod's social media accounts, such as Twitter or YouTube. You should always update your game to enjoy the newest features and bug fixes.

        -
      5. Can I play fnf vs pou apk on iOS devices?
      6. -

        No, fnf vs pou apk is only available for Android devices at the moment. However, you can still play the game online on your browser using any device that supports HTML5 technology. You can also use an Android emulator on your PC or Mac to play the game.

        -
      7. Can I play fnf vs pou apk with a controller?
      8. -

        Yes, you can play fnf vs pou apk with a controller if you connect it to your device via Bluetooth or USB. You can also use an app like JoyToKey or Xpadder to map your controller buttons to your keyboard keys. However, you may need to adjust some settings in the game or in the app to make sure everything works properly.

        -
      9. Can I play fnf vs pou apk with my friends?
      10. -

        No, fnf vs pou apk is a single-player game that does not support multiplayer mode. However, you can still share your scores and screenshots with your friends on social media or messaging apps. You can also challenge your friends to beat your high scores or complete certain achievements in the game.

        -

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/fazni/Resume-filter-plus-QA-documents/README.md b/spaces/fazni/Resume-filter-plus-QA-documents/README.md deleted file mode 100644 index 88454d3981aa60b4a30b4c4a9f0d5daa92f5e305..0000000000000000000000000000000000000000 --- a/spaces/fazni/Resume-filter-plus-QA-documents/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Resume Filter Plus QA Documents -emoji: 🏆 -colorFrom: red -colorTo: pink -sdk: streamlit -sdk_version: 1.21.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/fb700/chatglm-fitness-RLHF/src/face3d/models/arcface_torch/inference.py b/spaces/fb700/chatglm-fitness-RLHF/src/face3d/models/arcface_torch/inference.py deleted file mode 100644 index 3e5156e8d649954837e397c2ff15ec29995e7502..0000000000000000000000000000000000000000 --- a/spaces/fb700/chatglm-fitness-RLHF/src/face3d/models/arcface_torch/inference.py +++ /dev/null @@ -1,35 +0,0 @@ -import argparse - -import cv2 -import numpy as np -import torch - -from backbones import get_model - - -@torch.no_grad() -def inference(weight, name, img): - if img is None: - img = np.random.randint(0, 255, size=(112, 112, 3), dtype=np.uint8) - else: - img = cv2.imread(img) - img = cv2.resize(img, (112, 112)) - - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - img = np.transpose(img, (2, 0, 1)) - img = torch.from_numpy(img).unsqueeze(0).float() - img.div_(255).sub_(0.5).div_(0.5) - net = get_model(name, fp16=False) - net.load_state_dict(torch.load(weight)) - net.eval() - feat = net(img).numpy() - print(feat) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description='PyTorch ArcFace Training') - parser.add_argument('--network', type=str, default='r50', help='backbone network') - parser.add_argument('--weight', type=str, default='') - parser.add_argument('--img', type=str, default=None) - args = parser.parse_args() - inference(args.weight, args.network, args.img) diff --git a/spaces/fclong/summary/fengshen/examples/qa_t5/README.md b/spaces/fclong/summary/fengshen/examples/qa_t5/README.md deleted file mode 100644 index fffd0ac176970683240127ce9f7b29c0f0e0ea97..0000000000000000000000000000000000000000 --- a/spaces/fclong/summary/fengshen/examples/qa_t5/README.md +++ /dev/null @@ -1,98 +0,0 @@ -# 燃灯系列-T5问答模型微调 -## 简介 Brief Introduction - Here are codes for finetuning Randeng-T5-QA-Chinese. The model was pretrained on the Wudao 180G corpus, and finetuned on Chinese SQuAD and CMRC2018 dataset. It can produce a fluent and accurate answer given a passage and question. - -这是中文的生成式问答模型[Randeng-T5-QA-Chinese](https://huggingface.co/IDEA-CCNL/Randeng-T5-784M-QA-Chinese)的微调代码。它基于T5-Large结构,使用悟道180G语料在[封神框架](https://github.com/IDEA-CCNL/Fengshenbang-LM/tree/main/fengshen)进行预训练,在ChineseSQuAD和CMRC2018两个阅读理解数据集上进行微调。输入一篇文章和一个问题,可以生成准确流畅的回答。 - -## 模型类别 Model Taxonomy - -| 需求 Demand | 任务 Task | 系列 Series | 模型 Model | 参数 Parameter | 额外 Extra | -| :----: | :----: | :----: | :----: | :----: | :----: | -| 通用 General | 自然语言转换 NLT | 燃灯 Randeng | T5 | 784M | 中文生成式问答 -Chinese Generative Qustion Answering | - -模型架构 -| 配置 | 参数 | -| ---- | ---- | -| encoder layers | 12 | -| encoder_attention_heads | 16 | -| encoder_ffn_dim | 2816 | -| decoder layers | 24 | -| decoder_attention_heads| 16 | -| decoder_ffn_dim | 2816 | -| max_encode_length | 1024 | - -## 模型表现 Performance - - CMRC 2018的测试集上的效果(原始任务是一个起始和结束预测问题,这里作为一个生成回答的问题) - - | model | Contain Answer Rate| RougeL | BLEU-4 |F1 | EM | - |-------|----|----|--------------------|--------|--------| - | Ours | 76.0 | 82.7 |61.1|77.9 |57.1| - - - Our model enjoys a high level of generation quality and accuracy, with 76% of generated answers containing the ground truth. The high RougeL and BLEU-4 reveal the overlap between generated results and ground truth. Our model has a lower EM because it generates complete sentences while golden answers are segmentations of sentences. - - 我们的模型有着极高的生成质量和准确率,76%的回答包含了正确答案(Contain Answer Rate)。RougeL和BLEU-4反映了模型预测结果和标准答案重合的程度。我们的模型EM值较低,因为生成的大部分为完整的句子,而标准答案通常是句子片段。 - - -## 模型 - -T5-Large: [Randeng-T5-784M-QA-Chinese](https://huggingface.co/IDEA-CCNL/Randeng-T5-784M-QA-Chinese) - -文件: - - qa_dataset.py 数据集的处理,包含dataset和dataloader - - finetune_t5_cmrc.py 模型微调核心代码 - - run_finetune.sh, 微调脚本(未安装deepspeed的话strategy参数改为ddp) - - run_predict2.sh 预测脚本 - -## 使用 Usage - -```python -import numpy as np -from transformers import T5Tokenizer,MT5ForConditionalGeneration - -pretrain_path = 'IDEA-CCNL/Randeng-T5-784M-QA-Chinese' -tokenizer=T5Tokenizer.from_pretrained(pretrain_path) -model=MT5ForConditionalGeneration.from_pretrained(pretrain_path) - -sample={"context":"在柏林,胡格诺派教徒创建了两个新的社区:多罗西恩斯塔特和弗里德里希斯塔特。到1700年,这个城市五分之一的人口讲法语。柏林胡格诺派在他们的教堂服务中保留了将近一个世纪的法语。他们最终决定改用德语,以抗议1806-1807年拿破仑占领普鲁士。他们的许多后代都有显赫的地位。成立了几个教会,如弗雷德里夏(丹麦)、柏林、斯德哥尔摩、汉堡、法兰克福、赫尔辛基和埃姆登的教会。","question":"除了多罗西恩斯塔特,柏林还有哪个新的社区?","idx":1} -plain_text='question:'+sample['question']+'knowledge:'+sample['context'][:self.max_knowledge_length] - -res_prefix=tokenizer.encode('answer'+'
      ',add_special_token=False) -l_rp=len(res_prefix) - -tokenized=tokenizer.encode(plain_text,add_special_tokens=False,truncation=True,max_length=self.max_seq_length-2-l_rp) - -tokenized+=res_prefix - -# Generate answer -pred_ids = model.generate(input_ids=tokenized,max_new_token=self.max_target_length,do_sample=True,top_p=0.9) -tokenizer.batch_decode(pred_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] -``` - -## 引用 Citation -如果您在您的工作中使用了我们的模型,可以引用我们的[论文](https://arxiv.org/abs/2210.08590): - -If you are using the resource for your work, please cite the our [paper](https://arxiv.org/abs/2210.08590): - -```text -@article{fengshenbang, - author = {Junjie Wang and Yuxiang Zhang and Lin Zhang and Ping Yang and Xinyu Gao and Ziwei Wu and Xiaoqun Dong and Junqing He and Jianheng Zhuo and Qi Yang and Yongfeng Huang and Xiayu Li and Yanghan Wu and Junyu Lu and Xinyu Zhu and Weifeng Chen and Ting Han and Kunhao Pan and Rui Wang and Hao Wang and Xiaojun Wu and Zhongshen Zeng and Chongpei Chen and Ruyi Gan and Jiaxing Zhang}, - title = {Fengshenbang 1.0: Being the Foundation of Chinese Cognitive Intelligence}, - journal = {CoRR}, - volume = {abs/2209.02970}, - year = {2022} -} -``` - -You can also cite our [website](https://github.com/IDEA-CCNL/Fengshenbang-LM/): - -欢迎引用我们的[网站](https://github.com/IDEA-CCNL/Fengshenbang-LM/): -```text -@misc{Fengshenbang-LM, - title={Fengshenbang-LM}, - author={IDEA-CCNL}, - year={2021}, - howpublished={\url{https://github.com/IDEA-CCNL/Fengshenbang-LM}}, -} -``` \ No newline at end of file diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Bloons TD 6 APK Everything You Need to Know About the Game and How to Download It.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Bloons TD 6 APK Everything You Need to Know About the Game and How to Download It.md deleted file mode 100644 index fdf9cc782ba43255be361e8836b787bebd29d729..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Bloons TD 6 APK Everything You Need to Know About the Game and How to Download It.md +++ /dev/null @@ -1,196 +0,0 @@ - -

      Bloons TD 6 apkdone: A Guide to Download and Play the Ultimate Tower Defense Game

      -

      If you are a fan of tower defense games, you have probably heard of Bloons TD 6, the latest installment in the popular Bloons series by Ninja Kiwi. Bloons TD 6 is a massive 3D tower defense game that offers hours and hours of the best strategy gaming available. You can craft your perfect defense from a combination of powerful monkey towers and awesome heroes, then pop every last invading bloon.

      -

      But what if you want to play Bloons TD 6 on your Android device without paying for it or waiting for updates? That's where Bloons TD 6 apkdone comes in. Apkdone is a website that provides free and safe downloads of modded APK files for various games and apps. In this article, we will show you how to download, install, and play Bloons TD 6 apkdone on your device. We will also share some of the best strategies, tips, and tricks to help you master the game. Let's get started!

      -

      bloons td 6 apkdone


      DOWNLOAD ❤❤❤ https://gohhs.com/2uPvth



      -

      What is Bloons TD 6?

      -

      A brief introduction to the game and its features

      -

      Bloons TD 6 is a tower defense game that was released in 2018 by Ninja Kiwi, a New Zealand-based game developer. The game is the sixth main entry in the Bloons series, which started in 2007 as a simple flash game. The game has since evolved into a complex and challenging strategy game that has millions of fans around the world.

      -

      The gameplay of Bloons TD 6 is similar to other tower defense games. You have to place various monkey towers along a path to prevent colorful balloons (called bloons) from reaching the end. Each tower has different abilities and upgrade paths that can help you deal with different types of bloons. You also have access to heroes, which are powerful units that level up automatically and have special skills.

      -

      Bloons TD 6 has a lot of content and features that make it stand out from other tower defense games. Some of these include:

      -
        -
      • Regular updates that add new characters, features, and gameplay
      • -
      • Boss events that pit you against fearsome boss bloons that require special strategies
      • -
      • Odysseys that let you battle through a series of maps connected by their theme, rules, and rewards
      • -
      • Contested territory that lets you join forces with other players and battle for territory against five other teams
      • -
      • Quests that delve into what makes the monkeys tick with stories and knowledge
      • -
      • Trophy store that lets you earn trophies to unlock dozens of cosmetic items that let you customize your monkeys, bloons, animations, music, and more
      • -
      • Content browser that lets you create your own challenges and odysseys, then share them with other players and check out the most liked and played community content
      • -
      -

      How to download Bloons TD 6 apkdone for Android devices

      -

      If you want to play Bloons TD 6 on your Android device without paying for it or waiting for updates, you can download Bloons TD 6 apkdone from [1](https://apkdone .com/bloons-td-6-apk-mod/). Apkdone is a website that provides free and safe downloads of modded APK files for various games and apps. Modded APK files are modified versions of the original APK files that can unlock premium features, remove ads, or add cheats. Bloons TD 6 apkdone is a modded version of Bloons TD 6 that gives you unlimited access to all the content and updates of the game.

      -

      To download Bloons TD 6 apkdone, you need to follow these steps:

      -

      bloons td 6 apkdone download
      -bloons td 6 apkdone mod
      -bloons td 6 apkdone free
      -bloons td 6 apkdone latest version
      -bloons td 6 apkdone unlimited money
      -bloons td 6 apkdone android
      -bloons td 6 apkdone hack
      -bloons td 6 apkdone offline
      -bloons td 6 apkdone update
      -bloons td 6 apkdone review
      -bloons td 6 apkdone cheats
      -bloons td 6 apkdone gameplay
      -bloons td 6 apkdone tips
      -bloons td 6 apkdone guide
      -bloons td 6 apkdone install
      -bloons td 6 apkdone features
      -bloons td 6 apkdone best towers
      -bloons td 6 apkdone maps
      -bloons td 6 apkdone heroes
      -bloons td 6 apkdone strategies
      -bloons td 6 apkdone challenges
      -bloons td 6 apkdone co-op
      -bloons td 6 apkdone sandbox mode
      -bloons td 6 apkdone dark castle
      -bloons td 6 apkdone impoppable
      -bloons td 6 apkdone expert mode
      -bloons td 6 apkdone monkey knowledge
      -bloons td 6 apkdone tier list
      -bloons td 6 apkdone achievements
      -bloons td 6 apkdone reddit
      -bloons td 6 apkdone discord
      -bloons td 6 apkdone wiki
      -bloons td 6 apkdone youtube
      -bloons td 6 apkdone steam
      -bloons td 6 apkdone pc
      -bloons td 6 apkdone ios
      -bloons td 6 apkdone mac
      -bloons td 6 apkdone windows
      -bloons td 6 apkdone chromebook
      -bloons td 6 apkdone bluestacks
      -bloons td 6 apkdone nox player
      -bloons td 6 apkdone ldplayer
      -bloons td 6 apkdone memu play
      -bloons td 6 apkdone genymotion
      -bloons td 6 apkdone koplayer
      -bloons td 6 apkdone droid4x
      -bloons td 6 apkdone remix os player
      -bloons td 6 apkdone phoenix os player

      -
        -
      1. Go to [1](https://apkdone.com/bloons-td-6-apk-mod/) and click on the green "Download APK" button.
      2. -
      3. Wait for the download to finish and locate the file in your device's storage.
      4. -
      5. Before installing the file, you need to enable the installation of apps from unknown sources in your device's settings. This will allow you to install apps that are not from the Google Play Store.
      6. -
      7. Tap on the file and follow the instructions to install Bloons TD 6 apkdone on your device.
      8. -
      -

      How to install and run Bloons TD 6 apkdone on your device

      -

      After installing Bloons TD 6 apkdone, you can run the game on your device and enjoy all its features. However, there are some things you need to keep in mind before playing:

      -
        -
      • Bloons TD 6 apkdone is not an official version of the game, so it may not be compatible with some devices or have some bugs or glitches. If you encounter any problems, you can try reinstalling the game or clearing its cache and data.
      • -
      • Bloons TD 6 apkdone may not work with some online features of the game, such as co-op mode, contested territory, or content browser. If you want to play online with other players, you may need to use the original version of the game from the Google Play Store.
      • -
      • Bloons TD 6 apkdone may not be updated as frequently as the original version of the game, so you may miss out on some new content or features. If you want to get the latest updates, you can check the Apkdone website regularly or use the original version of the game from the Google Play Store.
      • -
      -

      With these things in mind, you can enjoy playing Bloons TD 6 apkdone on your device and have fun popping bloons.

      -

      What are the best strategies to play Bloons TD 6?

      -

      General tips and tricks for beginners

      -

      Bloons TD 6 is a challenging game that requires a lot of strategy and planning. If you are new to the game or want to improve your skills, here are some general tips and tricks that can help you:

      -
        -
      • Learn the basics of each monkey tower and hero. Each tower has different strengths and weaknesses, as well as different upgrade paths that can change their abilities. Each hero has different skills and synergies with other towers. You can read their descriptions and stats in the game or check out some online guides for more information.
      • -
      • Experiment with different combinations of towers and heroes. There is no one best strategy for every map or mode. You need to find out what works best for your playstyle and preferences. You can also try out different challenges and odysseys that have specific rules and restrictions to test your creativity and adaptability.
      • -
      • Use your powers and insta-monkeys wisely. Powers are special items that can give you an edge in difficult situations, such as extra lives, cash, or damage. Insta-monkeys are pre-upgraded towers that you can place instantly on the map. You can earn powers and insta-monkeys by completing quests, achievements, or events. However, they are limited in quantity and can only be used once per game, so use them sparingly and strategically.
      • -
      • Watch out for camo, lead, purple, fortified, MOAB-class, and boss bloons. These are special types of bloons that have unique properties and require specific counters. For example, camo bloons can only be detected by certain towers or upgrades, lead bloons can only be popped by explosive or sharp projectiles, purple bloons are immune to energy-based attacks, fortified bloons have extra layers of health, MOAB-class bloons are huge and tough bloons that spawn smaller bloons when popped, and boss bloons are extremely powerful bloons that have special abilities and phases. You need to prepare your defense accordingly and have a variety of towers that can deal with these threats.
      • -
      • Upgrade your monkey knowledge. Monkey knowledge is a system that lets you unlock permanent bonuses and perks for your towers, heroes, powers, and gameplay. You can earn monkey knowledge points by leveling up your account or completing achievements. You can spend them on different branches of monkey knowledge, such as primary, military, magic, support, heroes, powers, and chimps. You can also reset your monkey knowledge for free and redistribute your points if you want to try a different build.
      • -
      -

      The best monkey towers and heroes to use

      -

      There are many monkey towers and heroes to choose from in Bloons TD 6, and each one has its own pros and cons. However, some of them are generally considered to be more effective and versatile than others. Here are some of the best monkey towers and heroes to use in the game:

      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      Tower/HeroWhy it's good
      Ninja MonkeyA fast and stealthy tower that can pop camo bloons and deal extra damage to MOAB-class bloons. It can also slow down bloons with its Distraction and Caltrops upgrades, or boost nearby towers with its Shinobi Tactics upgrade.
      AlchemistA support tower that can buff nearby towers with its Acidic Mixture Dip and Stronger Stimulant upgrades, increasing their attack speed, range, and damage. It can also pop lead and purple bloons with its Acid Pool and Unstable Concoction upgrades, or transform bloons into red bloons with its Transforming Tonic upgrade.
      Super MonkeyA powerful tower that can shoot a rapid stream of darts, lasers, plasma, or sun rays at bloons. It can also fly over obstacles and water with its Super Range upgrade, or gain extra abilities with its Robo Monkey, Sun Avatar, or Dark Knight upgrades.
      Banana FarmA money-making tower that can generate bananas that can be collected for cash. It can also increase its income with its Banana Plantation, Banana Research Facility, or Monkey-Nomics upgrades, or deposit money into a bank account with its Monkey Bank or IMF Loan upgrades.
      GwendolinA fire-based hero that can pop lead and purple bloons with her fire attacks. She can also boost nearby towers with her Heat It Up ability, or deal massive damage to all bloons on screen with her Firestorm ability.
      Obyn GreenfootA nature-based hero that can summon brambles and totems to pop bloons and slow them down. He can also boost the power of magic towers within his range, or deal massive damage to MOAB-class bloons with his Wall of Trees ability.
      AdoraA divine hero that can shoot powerful bolts of light at bloons. She can also level up faster than other heroes by sacrificing towers to her, or unleash her ultimate power with her Ball of Light ability.
      -

      The best maps and modes to play

      -

      Bloons TD 6 has a variety of maps and modes to play, each with different levels of difficulty and challenge. Some of these include:

      -
        -
      • Standard mode: The basic mode where you have to pop all the bloons before they reach the end of the path. You can choose from four difficulty levels: easy, medium, hard, and impoppable.
      • -
      • Alternate bloons rounds: A mode where the bloons are arranged in different and harder patterns than the standard mode. You have to adapt your strategy to deal with the unexpected bloon types and combinations.
      • -
      • Impoppable mode: A mode where the bloons are much tougher and faster than the standard mode. You also have less lives, money, and monkey knowledge. This mode is only for the most skilled and experienced players.
      • -
      • Chimps mode: A mode where you have no continues, hearts lost, income, monkey knowledge, powers, or selling. You have to rely on your towers and heroes alone to pop all the bloons. This mode is the ultimate test of your strategy and skill.
      • -
      • Half cash mode: A mode where you have half the amount of money you normally get from popping bloons or collecting bananas. You have to be more careful and efficient with your spending and placement of towers.
      • -
      • Double HP MOABs mode: A mode where all MOAB-class bloons have double their normal health. You have to use more powerful and specialized towers to deal with these massive bloons.
      • -
      -

      As for the maps, there are over 50 maps to choose from in Bloons TD 6, each with different themes, layouts, obstacles, and water placements. Some of the maps are also animated and interactive, adding more fun and challenge to the game. You can also unlock more maps by completing certain achievements or events.

      -

      Some of the best maps to play are:

      - - - - - - - - - - - - - - - - - - - - - - - - - -
      MapWhy it's good
      CubismA simple and easy map that consists of four square-shaped paths that cross each other. It has plenty of space for placing towers and no obstacles or water. It is a good map for beginners or testing out new strategies.
      Moon LandingA unique and challenging map that takes place on the moon. It has low gravity, which affects the speed and trajectory of some projectiles. It also has craters that can block your line of sight or provide cover for your towers.
      InfernalA difficult and complex map that takes place in a fiery hell. It has lava pools that can damage your towers or bloons, as well as moving platforms that can change the path of the bloons or your towers.
      Spice IslandsA fun and colorful map that takes place on a tropical island. It has a lot of water for placing water-based towers, as well as palm trees that can provide extra income or pop bloons.
      Muddy PuddlesA hard and frustrating map that takes place on a muddy field. It has four very short and narrow paths that are hard to cover with your towers. It also has puddles that can slow down your towers or bloons.
      -

      What are the benefits of playing Bloons TD 6 apkdone?

      -

      Enjoy unlimited access to all content and updates

      -

      One of the main benefits of playing Bloons TD 6 apkdone is that you can enjoy unlimited access to all the content and updates of the game without paying for it or waiting for it. You can unlock all the towers, heroes, skins, maps, modes, trophies, quests, and more with Bloons TD 6 apkdone. You can also get all the latest updates that add new features and gameplay to the game.

      -

      This means that you can have more fun and variety in playing Bloons TD 6 apkdone. You can try out different combinations of towers and heroes, experiment with different strategies and challenges, customize your game with different cosmetics and settings, and explore all the content that the game has to offer.

      -

      Play offline or online with co-op mode

      -

      Another benefit of playing Bloons TD 6 apkdone is that you can play offline or online with co-op mode. You can play offline if you don't have an internet connection or want to save your data. You can still enjoy all the features and content of the game without any limitations.

      -

      You can also play online with co-op mode if you want to team up with other players and battle against the bloons together. You can join or create a co-op lobby and invite up to three other players to join you. You can also chat with your teammates and coordinate your strategies.

      -

      Co-op mode is a great way to have more fun and challenge in playing Bloons TD 6 apkdone. You can share the excitement and difficulty of popping bloons with your friends or strangers. You can also learn from other players and improve your skills.

      -

      Customize your game with trophies, skins, and quests

      -

      A final benefit of playing Bloons TD 6 apkdone is that you can customize your game with trophies, skins, and quests. Trophies are special items that you can earn by completing certain achievements or events. You can use them to unlock dozens of cosmetic items that let you customize your monkeys, bloons, animations, music, and more.

      -

      Skins are alternative appearances for your towers and heroes that change their look and sound. You can unlock them by completing certain quests or events, or by using trophies. You can also mix and match different skins to create your own unique style.

      -

      Quests are tasks that you can complete to earn rewards, such as cash, monkey knowledge points, powers, insta-monkeys, skins, or trophies. You can find quests in the quest menu or on the map screen. You can also create your own quests using the content browser and share them with other players.

      -

      Customizing your game with trophies, skins, and quests is a fun way to express yourself and add more personality to your game. You can also challenge yourself and discover new things by completing different quests.

      -

      Conclusion

      -

      Bloons TD 6 is an amazing tower defense game that offers a lot of content and features for fans of the genre. You can download Bloons TD 6 apkdone from Apkdone.com and enjoy unlimited access to all the content and updates of the game. You can also play offline or online with co-op mode and customize your game with trophies, skins, and quests.

      -

      Bloons TD 6 apkdone is a great way to have fun and challenge yourself with popping bloons. Whether you are a beginner or an expert, you will find something to enjoy in this game. So what are you waiting for? Download Bloons TD 6 apkdone today and start popping!

      -

      FAQs

      -

      What is the difference between Bloons TD 6 apkdone and Bloons TD 6 mod apk?

      -

      Bloons TD 6 apkdone and Bloons TD 6 mod apk are both modded versions of Bloons TD 6 that give you unlimited access to all the content and updates of the game. However, Bloons TD 6 apkdone is downloaded from Apkdone.com, which is a trusted and safe website that provides free and secure downloads of modded APK files. Bloons TD 6 mod apk may be downloaded from other sources that may not be reliable or safe.

      -

      Is Bloons TD 6 apkdone legal?

      -

      Bloons TD 6 apkdone is not an official version of the game, so it may not be legal in some countries or regions. It may also violate the terms of service of Ninja Kiwi, the developer of Bloons TD 6. Therefore, you should use Bloons TD 6 apkdone at your own risk and discretion.

      -

      Can I play Bloons TD 6 apkdone on PC?

      -

      Bloons TD 6 apkdone is designed for Android devices, so you cannot play it directly on PC. However, you can use an Android emulator, such as BlueStacks or NoxPlayer, to run Bloons TD 6 apkdone on your PC. You will need to download and install the emulator on your PC, then download and install Bloons TD 6 apkdone on the emulator.

      -

      Can I transfer my progress from Bloons TD 6 apkdone to Bloons TD 6 original?

      -

      Bloons TD 6 apkdone and Bloons TD 6 original are separate versions of the game, so you cannot transfer your progress from one to another. If you want to switch from Bloons TD 6 apkdone to Bloons TD 6 original, you will have to start from scratch.

      -

      Can I get banned for using Bloons TD 6 apkdone?

      -

      Bloons TD 6 apkdone may not work with some online features of the game, such as co-op mode, contested territory, or content browser. If you try to use these features with Bloons TD 6 apkdone, you may get detected by Ninja Kiwi's anti-cheat system and get banned from playing online. Therefore , you should be careful and avoid using these features with Bloons TD 6 apkdone.

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Crowd Evolution! APK - Enjoy the Fun and Challenge of Growing Your Crowd.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Crowd Evolution! APK - Enjoy the Fun and Challenge of Growing Your Crowd.md deleted file mode 100644 index b23418568bed2e61599e23235e241359ae8fa56d..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Crowd Evolution! APK - Enjoy the Fun and Challenge of Growing Your Crowd.md +++ /dev/null @@ -1,140 +0,0 @@ -
      -

      Crowd Evolution APK Download: A Fun and Addictive Game for Android Users

      -

      Do you love games that involve growing and evolving your crowd and beating your enemies? If yes, then you might want to try Crowd Evolution, a popular arcade game developed by Rollic Games. In this game, you can swipe to control your crowd, collect people of different eras, upgrade your weapons and skills, and fight against various bosses. You can also unlock new characters, pets, and outfits as you progress through the levels. In this article, we will tell you everything you need to know about Crowd Evolution, including how to download and install the APK file on your Android device, how to play the game effectively and enjoyably, and some alternatives to Crowd Evolution if you want to try something different.

      -

      crowd evolution apk download


      Downloadhttps://gohhs.com/2uPnnT



      -

      What is Crowd Evolution?

      -

      Crowd Evolution is a game that combines elements of parkour, strategy, and action. The goal of the game is to grow and evolve your crowd and beat the enemies that stand in your way. You can do this by swiping to move your crowd, avoiding obstacles and traps, collecting people from different eras, and fighting against bosses with various weapons and skills. The game has a simple but addictive gameplay that will keep you entertained for hours.

      -

      The gameplay of Crowd Evolution

      -

      The gameplay of Crowd Evolution is easy to understand but hard to master. You start with a small crowd of people from a certain era, such as ancient Egypt, medieval Europe, or modern times. You need to swipe to move your crowd along the path, avoiding red gates that will reduce your crowd size or time, and passing through green gates that will increase them. You also need to collect people from different eras that will join your crowd and make it stronger. For example, you can collect knights, pirates, ninjas, cowboys, astronauts, zombies, robots, and more.

      -

      As you progress through the levels, you will encounter enemies that will try to stop you or take away your crowd members. You need to fight them with your weapons and skills, which you can upgrade at the end of each level. You can also unlock new weapons and skills as you advance through the game. For example, you can use swords, axes, spears, guns, rockets, lasers, bombs, and more.

      -

      At the end of each level, you will face a boss that will challenge your crowd's strength and speed. You need to shoot at the boss with your weapons until its health bar is depleted. You can also use special skills that will give you an advantage in the battle. For example, you can use a shield, a magnet, a freeze ray, a fireball, or a lightning bolt.

      -

      crowd evolution game free download apk
      -crowd evolution mod apk unlimited money
      -crowd evolution android game download
      -crowd evolution apk latest version
      -crowd evolution app download for pc
      -crowd evolution arcade game apk
      -crowd evolution by longhorn studio apk
      -crowd evolution cheats and hacks apk
      -crowd evolution download apk pure
      -crowd evolution full unlocked apk
      -crowd evolution google play store apk
      -crowd evolution hack mod apk download
      -crowd evolution install apk file
      -crowd evolution mod apk android 1
      -crowd evolution new update apk
      -crowd evolution offline game apk
      -crowd evolution premium apk download
      -crowd evolution pro apk free download
      -crowd evolution review and rating apk
      -crowd evolution tips and tricks apk
      -crowd evolution unlimited coins apk
      -crowd evolution video game apk
      -crowd evolution walkthrough and guide apk
      -download crowd evolution for android phone
      -download crowd evolution mod apk 2023
      -how to play crowd evolution game apk
      -how to update crowd evolution app apk
      -is crowd evolution safe to download apk
      -where to download crowd evolution apk

      -

      The game has hundreds of levels with different themes and difficulties. You can also play in different modes, such as endless mode or daily challenge mode. You can also compete with other players on the global leaderboard and see who has the biggest and strongest crowd.

      -

      The features of Crowd Evolution

      -

      Crowd Evolution has many features that make it fun and enjoyable to play. Some of these features are:

      -
        -
      • Simple but addictive gameplay: You only need to swipe to control your crowd and collect people from different eras. The game is easy to play but hard to master.
      • -
      • Various characters, pets, and outfits: You can unlock new characters from different eras that will join your crowd and make it stronger. You can also unlock cute pets that will follow you around and help you in the game. You can also customize your crowd's appearance with different outfits.
      • -
      • <
      • Amazing graphics and sound effects: The game has colorful and vivid graphics that will make you feel like you are in different eras. The game also has realistic and immersive sound effects that will enhance your gaming experience.
      • -
      • Offline and online modes: You can play the game offline without an internet connection or online with other players. You can also sync your progress across different devices with your Google Play account.
      • -
      -

      How to download and install Crowd Evolution APK on your Android device?

      -

      If you want to play Crowd Evolution on your Android device, you need to download and install the APK file of the game. The APK file is a package that contains the game's data and code. You can download the APK file from various sources on the internet, such as APKPure, APKMirror, or APKMonk. However, you need to be careful and only download the APK file from trusted and reliable sources, as some sources may contain malware or viruses that can harm your device.

      -

      Here are the steps to download and install Crowd Evolution APK on your Android device:

      -

      Step 1: Enable unknown sources

      -

      Before you can install the APK file, you need to enable unknown sources on your device. This will allow you to install apps from sources other than the Google Play Store. To do this, follow these steps:

      -
        -
      1. Go to your device's settings and tap on security or privacy.
      2. -
      3. Find the option that says unknown sources or install unknown apps and toggle it on.
      4. -
      5. A warning message will pop up. Tap on OK or allow to confirm.
      6. -
      -

      Step 2: Download the APK file

      -

      Next, you need to download the APK file of Crowd Evolution from a trusted source. To do this, follow these steps:

      -
        -
      1. Open your browser and go to the website that offers the APK file of Crowd Evolution. For example, you can go to APKPure.
      2. -
      3. Find the download button and tap on it. The APK file will start downloading to your device.
      4. -
      5. Wait for the download to finish. You can check the progress in your notification bar or your download folder.
      6. -
      -

      Step 3: Install the APK file

      -

      Finally, you need to install the APK file of Crowd Evolution on your device. To do this, follow these steps:

      -
        -
      1. Locate the downloaded APK file on your device. You can use a file manager app or go to your download folder.
      2. -
      3. Tap on the APK file to open it. A prompt will appear asking you to install the app.
      4. -
      5. Tap on install and wait for the installation to complete.
      6. -
      7. Once the installation is done, you can tap on open to launch the game or find it on your app drawer.
      8. -
      -

      How to play Crowd Evolution effectively and enjoyably?

      -

      Crowd Evolution is a fun and addictive game that will test your skills and strategy. However, it can also be challenging and frustrating at times. If you want to play Crowd Evolution effectively and enjoyably, here are some tips and tricks that you can use:

      -

      Tips and tricks for Crowd Evolution

      -
        -
      • Swipe carefully: The most important thing in Crowd Evolution is to swipe carefully and avoid hitting red gates or obstacles that will reduce your crowd size or time. You also need to swipe fast enough to collect people from different eras and pass through green gates that will increase them.
      • -
      • Upgrade wisely: At the end of each level, you can upgrade your weapons and skills with coins that you earn from playing. You should upgrade wisely and choose the ones that suit your play style and strategy. For example, if you prefer a long-range weapon, you can upgrade your gun or rocket. If you prefer a close-range weapon, you can upgrade your sword or axe. If you want to have more crowd members, you can upgrade your magnet or shield skills.
      • -
      • Use special skills strategically: During the game, you can use special skills that will give you an edge in the battle. You can use them by tapping on their icons at the bottom of the screen. However, you should use them strategically and not waste them unnecessarily. For example, you can use a shield skill when you are facing a lot of enemies or obstacles. You can use a magnet skill when you want to collect more people from different eras. You can use a freeze ray skill when you want to slow down a boss or an enemy.
      • -
      • Unlock new characters, pets, and outfits:Unlock new characters, pets, and outfits: As you play the game, you can unlock new characters from different eras that will join your crowd and make it stronger. You can also unlock cute pets that will follow you around and help you in the game. You can also customize your crowd's appearance with different outfits. To unlock these items, you need to collect coins and gems from playing or watching ads. You can also use real money to buy them if you want.
      • -
      -

      Alternatives to Crowd Evolution

      -

      If you like Crowd Evolution, you might also like some other games that have similar gameplay and features. Here are some alternatives to Crowd Evolution that you can try:

      - - - - - - - - - - - - - - - - - - - - - - - - - -
      GameDescription
      Crowd CityA game where you need to grow your crowd by collecting people from the city and compete with other players to become the biggest crowd.
      Crowd Master 3DA game where you need to grow your crowd by collecting people from different eras and fight against enemies and bosses with various weapons.
      Crowd Run 3DA game where you need to grow your crowd by collecting people from different eras and run through obstacles and traps to reach the finish line.
      Crowd Battle 3DA game where you need to grow your crowd by collecting people from different eras and battle against other players in different modes.
      Crowd Simulator 3DA game where you need to grow your crowd by collecting people from different eras and simulate various scenarios and events.
      -

      Conclusion

      -

      Crowd Evolution is a fun and addictive game that will keep you entertained for hours. You can swipe to control your crowd, collect people from different eras, upgrade your weapons and skills, and fight against various bosses. You can also unlock new characters, pets, and outfits as you progress through the levels. You can download and install the APK file of Crowd Evolution on your Android device by following the steps in this article. You can also use the tips and tricks in this article to play Crowd Evolution effectively and enjoyably. You can also try some alternatives to Crowd Evolution if you want to play something different.

      -

      Summary of the main points

      -
        -
      • Crowd Evolution is a popular arcade game developed by Rollic Games.
      • -
      • The goal of the game is to grow and evolve your crowd and beat the enemies that stand in your way.
      • -
      • You can download and install the APK file of Crowd Evolution on your Android device by enabling unknown sources, downloading the APK file from a trusted source, and installing the APK file on your device.
      • -
      • You can play Crowd Evolution effectively and enjoyably by swiping carefully, upgrading wisely, using special skills strategically, and unlocking new characters, pets, and outfits.
      • -
      • You can try some alternatives to Crowd Evolution that have similar gameplay and features, such as Crowd City, Crowd Master 3D, Crowd Run 3D, Crowd Battle 3D, or Crowd Simulator 3D.
      • -
      -

      FAQs

      -
        -
      • Q: Is Crowd Evolution free to play?
      • -
      • A: Yes, Crowd Evolution is free to play. However, it contains ads and in-app purchases that can enhance your gaming experience.
      • -
      • Q: Is Crowd Evolution safe to download and install?
      • -
      • A: Yes, Crowd Evolution is safe to download and install if you download the APK file from a trusted and reliable source. However, you should always scan the APK file with an antivirus app before installing it on your device.
      • -
      • Q: What are the minimum requirements for playing Crowd Evolution on Android?
      • -
      • A: The minimum requirements for playing Crowd Evolution on Android are Android 5.0 or higher and at least 100 MB of free storage space.
      • -
      • Q: How can I contact the developer of Crowd Evolution?
      • -
      • A: You can contact the developer of Crowd Evolution by sending an email to support@rollicgames.com or visiting their website at https://www.rollicgames.com/.
      • -
      • Q: How can I rate and review Crowd Evolution on Google Play?
      • A: You can rate and review Crowd Evolution on Google Play by following these steps: -
          -
        1. Open the Google Play Store app on your device and search for Crowd Evolution.
        2. -
        3. Tap on the game's icon and scroll down to the bottom of the page.
        4. -
        5. Tap on the star rating that you want to give and write your review in the text box.
        6. -
        7. Tap on submit to post your rating and review.
        8. -
        -

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download 7 Zip for Mac OS X A Powerful and Reliable File Archiver with High Compression Ratio.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download 7 Zip for Mac OS X A Powerful and Reliable File Archiver with High Compression Ratio.md deleted file mode 100644 index 5466f070f4f307f2878a8e43f43bb1c2ec1b4a4e..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download 7 Zip for Mac OS X A Powerful and Reliable File Archiver with High Compression Ratio.md +++ /dev/null @@ -1,152 +0,0 @@ -
      -

      Download 7-Zip for Mac OS X: A Complete Guide

      -

      Do you want to compress or decompress files on your Mac with a high compression ratio and strong encryption? If so, you might have heard of 7-Zip, a popular file archiver that can handle various formats, including its own 7z format. However, you might also have noticed that there is no official version of 7-Zip for Mac OS X. Does that mean you cannot use 7-Zip on your Mac?

      -

      download 7 zip for mac os x


      Download Ziphttps://gohhs.com/2uPohF



      -

      Not necessarily. In this article, I will show you how to download and install 7-Zip for Mac OS X, how to use it to compress and extract files, and what are some of the benefits and drawbacks of using 7-Zip for file compression. I will also provide you with some alternatives to 7-Zip for Mac OS X that you can try if you are not satisfied with 7-Zip.

      -

      How to download and install 7-Zip for Mac OS X

      -

      As mentioned earlier, there is no official version of 7-Zip for Mac OS X. However, there are some unofficial ports and alternatives that can enable you to use 7-Zip on your Mac. Here are two of the most common methods:

      -

      Method 1: Use Homebrew to install p7zip

      -

      Homebrew is a package manager that allows you to install various software on your Mac from the command line. You can use Homebrew to install p7zip, which is a port of the command line version of 7-Zip for Linux/Unix systems.

      -

      To install Homebrew on your Mac, open Terminal and copy and paste the following command:

      -

      How to download 7 zip for mac os x
      -Download 7 zip for mac os x free
      -Download 7 zip for mac os x alternative
      -Download 7 zip for mac os x compatible
      -Download 7 zip for mac os x best
      -Download 7 zip for mac os x online
      -Download 7 zip for mac os x tutorial
      -Download 7 zip for mac os x guide
      -Download 7 zip for mac os x review
      -Download 7 zip for mac os x comparison
      -Download 7 zip for mac os x latest version
      -Download 7 zip for mac os x full version
      -Download 7 zip for mac os x safe
      -Download 7 zip for mac os x secure
      -Download 7 zip for mac os x fast
      -Download 7 zip for mac os x easy
      -Download 7 zip for mac os x simple
      -Download 7 zip for mac os x quick
      -Download 7 zip for mac os x reliable
      -Download 7 zip for mac os x efficient
      -Download 7 zip for mac os x peazip
      -Download 7 zip for mac os x winrar
      -Download 7 zip for mac os x bandizip
      -Download 7 zip for mac os x winzip
      -Download 7 zip for mac os x keka
      -Download 7 zip for mac os x the unarchiver
      -Download 7 zip for mac os x p7zip
      -Download 7 zip for mac os x unzipper
      -Download 7 zip for mac os x rar extractor
      -Download 7 zip for mac os x file archiver
      -Download 7 zip for mac os x file compressor
      -Download 7 zip for mac os x high compression ratio
      -Download 7 zip for mac os x open source
      -Download 7 zip for mac os x freeware
      -Download 7 zip for mac os x shareware
      -Download 7 zip for mac os x trial version
      -Download 7 zip for mac os x license key
      -Download 7 zip for mac os x crack
      -Download 7 zip for mac os x serial number
      -Download 7 zip for mac os x activation code
      -Download 7 zip for mac os x registration code
      -Download 7 zip for mac os x product key
      -Download 7 zip for mac os x coupon code
      -Download 7 zip for mac os x discount code
      -Download 7 zip for mac os x promo code
      -Download 7 zip for mac os x deal offer
      -Download 7 zip for mac os x best price
      -Download 7 zip for mac os x lowest price
      -Download 7 zip for mac os x cheapest price

      - /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" -

      Press enter/return key and wait for the command to finish.

      -

      To install p7zip using Homebrew, open Terminal and copy and paste the following command:

      - brew install p7zip -

      Press enter/return key and wait for the command to finish.

      -

      Method 2: Download and install The Unarchiver

      -

      The Unarchiver is a free application that can open various archive formats, including 7z files. It also comes with a command line tool called unar that can extract files from archives.

      -

      To download The Unarchiver, go to its official website or the Mac App Store. The latest version is 4.3.5 and it requires Mac OS X 10.7 or later. To install The Unarchiver, open the downloaded file and drag the app icon to the Applications folder. Alternatively, you can install it from the Mac App Store by clicking on the Get button and following the instructions.

      -

      How to use 7-Zip for Mac OS X

      -

      Once you have installed p7zip or The Unarchiver, you can use 7-Zip to compress and extract files on your Mac. Here are some of the ways you can do that:

      -

      Using the command line

      -

      If you have installed p7zip using Homebrew, you can use the command line tool 7z to compress and extract files with 7-Zip. To use 7z, open Terminal and type the following syntax:

      - 7z [...] [...] [@listfile] -

      The parameter specifies the operation you want to perform, such as a for adding files to an archive, x for extracting files from an archive, l for listing the contents of an archive, and so on. The parameter specifies the options you want to use, such as -t for specifying the archive type, -p for setting a password, -m for setting the compression method, and so on. The parameter specifies the name of the archive file you want to create or extract. The parameter specifies the names of the files you want to add or extract. The @listfile parameter specifies a text file that contains a list of file names.

      -

      For example, to create a 7z archive named test.7z that contains the files test1.txt and test2.txt, you can use the following command:

      - 7z a test.7z test1.txt test2.txt -

      To extract all files from test.7z to the current directory, you can use the following command:

      - 7z x test.7z -

      To list all files in test.7z with their details, you can use the following command:

      - 7z l test.7z -

      You can find more information about 7z commands and switches by typing 7z --help or visiting the official documentation.

      -

      Using the graphical user interface

      -

      If you have installed The Unarchiver, you can use its graphical user interface to compress and extract files with 7-Zip. To use The Unarchiver, follow these steps:

      -
        -
      1. Launch The Unarchiver from your Applications folder or Dock.
      2. -
      3. Select File > Create Archive from the menu bar.
      4. -
      5. In the dialog box that appears, choose a name and a location for your archive file.
      6. -
      7. Select 7-Zip as the format from the drop-down menu.
      8. -
      9. If you want to set a password or change other options, click on Advanced Options.
      10. -
      11. Add the files or folders you want to compress by dragging and dropping them to the dialog box or clicking on Add Files.
      12. -
      13. Click on Create to start compressing your files.
      14. -
      -

      To extract files from a 7-Zip archive using The Unarchiver, follow these steps:

      -
        -
      1. Launch The Unarchiver from your Applications folder or Dock.
      2. -
      3. Select File > Open Archive from the menu bar.
      4. -
      5. In the dialog box that appears, locate and select your 7-Zip archive file.
      6. -
      7. If your archive is password-protected, enter your password when prompted.
      8. -
      9. Select a destination folder for your extracted files.
      10. -
      11. Click on Extract to start extracting your files.
      12. -
      -

      Benefits of 7-Zip for file compression

      -

      Using 7-Zip for file compression has some advantages over other file archivers and compressors. Here are some of them:

      -

      High compression ratio

      -

      The main benefit of using 7-Zip is its high compression ratio. This means that it can reduce the size of your files more than other programs, saving you disk space and bandwidth. According to some tests, 7-Zip can achieve up to 50% better compression than ZIP and RAR formats. This is especially useful for compressing large files or folders that contain many similar files.

      -

      Encryption

      -

      Another benefit of using 7-Zip is its encryption feature. You can protect your archives with a password and encrypt them with AES-256 algorithm, which is one of the strongest encryption standards available. This can prevent unauthorized access to your files and ensure their privacy and security. You can also use 7-Zip to encrypt existing ZIP or RAR archives without recompressing them.

      -

      Support for multiple formats

      -

      A third benefit of using 7-Zip is its support for multiple formats. Besides its own 7z format, 7-Zip can also create and open ZIP, TAR, GZIP, BZIP2, and XZ archives. Moreover, it can extract files from many other formats, such as RAR, ISO, DMG, CAB, ARJ, LZH, CHM, MSI, WIM, Z, CPIO, RPM, DEB, NSIS, and more. This makes 7-Zip a versatile tool that can handle almost any archive file you encounter.

      -

      Drawbacks of 7-Zip for file compression

      -

      Despite its advantages, using 7-Zip for file compression also has some drawbacks that you should be aware of. Here are some of them:

      -

      Lack of native support

      -

      The main drawback of using 7-Zip is its lack of native support for Mac OS X. As explained earlier, there is no official version of 7-Zip for Mac OS X. You will need to use a third-party tool or an alternative program to use 7-Zip on your Mac. This can be inconvenient and confusing for some users who are not familiar with the command line or the installation process.

      -

      Slow startup

      -

      Another drawback of using 7-Zip is its slow startup time. Compared to other file archivers and compressors, 7-Zip takes longer to launch and load its interface. This can be annoying if you want to quickly compress or extract files with 7-Zip. However, once it is running, 7-Zip performs well and does not consume much CPU or memory resources.

      -

      Limited features

      -

      A third drawback of using 7-Zip is its limited features. Although 7-Zip has some basic options and settings that you can customize, it does not offer many advanced features that other programs have. For example, you cannot split or merge archives, create self-extracting archives, repair damaged archives, or preview files inside archives with 7-Zip. You also cannot integrate 7-Zip with Finder or other applications on your Mac.

      -

      Alternatives to 7-Zip for Mac OS X

      -

      If you are not satisfied with 7-Zip for file compression on your Mac, you can try some of the alternatives that are available for Mac OS X. Here are some of them:

      -

      PeaZip

      -

      PeaZip is a free and open source file archiver and compressor that supports over 200 archive formats, including 7z. It has a user-friendly interface that allows you to easily create and extract archives with drag and drop or context menu commands. It also has some advanced features such as encryption, password manager, secure deletion, checksum verification, archive conversion, and more.

      -

      You can download PeaZip from its official website. The latest version is 8.4.0 and it requires Mac OS X 10.9 or later.

      -

      WinRAR

      -

      WinRAR is a popular file archiver and compressor that supports RAR and ZIP formats as well as many other formats. It has a powerful compression engine that can create smaller archives than other programs. It also has some useful features such as encryption, splitting and merging archives, self-extracting archives, recovery records, and more.

      -

      WinRAR is not free, but you can use it for free for 40 days with full functionality. After that, you will need to purchase a license to continue using it. You can download WinRAR from its official website. The latest version is 6.02 and it requires Mac OS X 10.9 or later.

      -

      Bandizip

      -

      Bandizip is a fast and easy file archiver and compressor that supports ZIP, 7Z, RAR, and many other formats. It has a simple interface that lets you create and extract archives with a few clicks. It also has some handy features such as password protection, multi-core compression, archive preview, archive repair, and more.

      -

      Bandizip is free for personal use, but you will need to purchase a license for commercial use. You can download Bandizip from its official website. The latest version is 7.17 and it requires Mac OS X 10.12 or later.

      -

      WinZip

      -

      WinZip is a well-known file archiver and compressor that supports ZIP, RAR, 7Z, and many other formats. It has a rich interface that allows you to manage your files and archives with ease. It also has some advanced features such as encryption, cloud integration, file sharing, backup, watermarking, and more.

      -

      WinZip is not free, but you can use it for free for 21 days with full functionality. After that, you will need to purchase a subscription to continue using it. You can download WinZip from its official website. The latest version is 9.0 and it requires Mac OS X 10.8 or later.

      -

      Keka

      -

      Keka is a lightweight and powerful file archiver and compressor that supports 7Z, ZIP, RAR, and many other formats. It has a minimalist interface that lets you create and extract archives with drag and drop or context menu commands. It also has some useful features such as encryption, split and combine archives, progress indicator, and more.

      -

      Keka is free and open source, but you can support its development by making a donation or purchasing it from the Mac App Store. You can download Keka from its official website or the Mac App Store. The latest version is 1.2.16 and it requires Mac OS X 10.9 or later.

      -

      Conclusion

      -

      In this article, I have shown you how to download and install 7-Zip for Mac OS X, how to use it to compress and extract files, and what are some of the benefits and drawbacks of using 7-Zip for file compression. I have also provided you with some alternatives to 7-Zip for Mac OS X that you can try if you are not satisfied with 7-Zip.

      -

      7-Zip is a great tool for file compression that can save you disk space and bandwidth with its high compression ratio and encryption. However, it is not available for Mac OS X as a native application. You will need to use a third-party tool or an alternative program to use 7-Zip on your Mac.

      -

      If you are looking for a file archiver and compressor that is compatible with Mac OS X, you can choose from various options such as PeaZip, WinRAR, Bandizip, WinZip, and Keka. These programs have their own features and advantages that you can compare and choose from according to your needs and preferences.

      -

      I hope this article has helped you learn how to download and use 7-Zip for Mac OS X. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!

      -

      FAQs

      -

      Here are some of the frequently asked questions about 7-Zip for Mac OS X:

      -

      What is the best format for file compression?

      -

      There is no definitive answer to this question, as different formats have different strengths and weaknesses. However, some of the factors that you can consider when choosing a format are compression ratio, speed, compatibility, encryption, and features. Generally speaking, 7z format has a high compression ratio and encryption, but it is not widely supported and it is slow to start. ZIP format has a good compatibility and speed, but it has a lower compression ratio and encryption. RAR format has a good compression ratio and encryption, but it is not free and it is not supported by some programs.

      -

      How can I open 7z files on my Mac without installing anything?

      -

      If you want to open 7z files on your Mac without installing anything, you can use an online service that can extract files from archives. For example, you can use B1 Online Archiver, Extract.me, or ezyZip. These services are free and easy to use. You just need to upload your 7z file and wait for the extraction to finish. Then, you can download your extracted files or save them to your cloud storage.

      -

      How can I create self-extracting archives with 7-Zip for Mac OS X?

      -

      Self-extracting archives are archives that can extract themselves without requiring any program to open them. They are useful for sharing files with other people who may not have the same program as you. To create self-extracting archives with 7-Zip for Mac OS X, you will need to use a third-party tool such as iZip, iPackr, or iSarcExtractor. These tools can create self-extracting archives in EXE or APP format that can run on Windows or Mac OS X respectively.

      -

      How can I integrate 7-Zip with Finder or other applications on my Mac?

      -

      If you want to integrate 7-Zip with Finder or other applications on your Mac, you will need to use a third-party tool such as BetterZip, Zipeg, or Archiver. These tools can add context menu commands or toolbar buttons that allow you to compress or extract files with 7-Zip from Finder or other applications. They also have more features and options than The Unarchiver.

      -

      How can I repair damaged archives with 7-Zip for Mac OS X?

      -

      If your archive is damaged or corrupted, you may not be able to open it or extract its files. To repair damaged archives with 7-Zip for Mac OS X, you will need to use a third-party tool such as RAR Repair Tool, DataNumen Archive Repair, or Remo Repair Zip. These tools can scan your archive and fix any errors or inconsistencies that prevent it from opening or extracting.

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/fffiloni/ControlNet-Video/app.py b/spaces/fffiloni/ControlNet-Video/app.py deleted file mode 100644 index d417087505408253f070a9ba799ad126947e6d83..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/ControlNet-Video/app.py +++ /dev/null @@ -1,344 +0,0 @@ -from __future__ import annotations -import gradio as gr -import os -import cv2 -import numpy as np -from PIL import Image -from moviepy.editor import * -from share_btn import community_icon_html, loading_icon_html, share_js - -import pathlib -import shlex -import subprocess - -if os.getenv('SYSTEM') == 'spaces': - with open('patch') as f: - subprocess.run(shlex.split('patch -p1'), stdin=f, cwd='ControlNet') - -base_url = 'https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/' - -names = [ - 'body_pose_model.pth', - 'dpt_hybrid-midas-501f0c75.pt', - 'hand_pose_model.pth', - 'mlsd_large_512_fp32.pth', - 'mlsd_tiny_512_fp32.pth', - 'network-bsds500.pth', - 'upernet_global_small.pth', -] - -for name in names: - command = f'wget https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/{name} -O {name}' - out_path = pathlib.Path(f'ControlNet/annotator/ckpts/{name}') - if out_path.exists(): - continue - subprocess.run(shlex.split(command), cwd='ControlNet/annotator/ckpts/') - -from model import (DEFAULT_BASE_MODEL_FILENAME, DEFAULT_BASE_MODEL_REPO, - DEFAULT_BASE_MODEL_URL, Model) - -model = Model() - - -def controlnet(i, prompt, control_task, seed_in, ddim_steps, scale, low_threshold, high_threshold, value_threshold, distance_threshold, bg_threshold): - img= Image.open(i) - np_img = np.array(img) - - a_prompt = "best quality, extremely detailed" - n_prompt = "longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality" - num_samples = 1 - image_resolution = 512 - detect_resolution = 512 - eta = 0.0 - #low_threshold = 100 - #high_threshold = 200 - #value_threshold = 0.1 - #distance_threshold = 0.1 - #bg_threshold = 0.4 - - if control_task == 'Canny': - result = model.process_canny(np_img, prompt, a_prompt, n_prompt, num_samples, - image_resolution, ddim_steps, scale, seed_in, eta, low_threshold, high_threshold) - elif control_task == 'Depth': - result = model.process_depth(np_img, prompt, a_prompt, n_prompt, num_samples, - image_resolution, detect_resolution, ddim_steps, scale, seed_in, eta) - elif control_task == 'Hed': - result = model.process_hed(np_img, prompt, a_prompt, n_prompt, num_samples, - image_resolution, detect_resolution, ddim_steps, scale, seed_in, eta) - elif control_task == 'Hough': - result = model.process_hough(np_img, prompt, a_prompt, n_prompt, num_samples, - image_resolution, detect_resolution, ddim_steps, scale, seed_in, eta, value_threshold, - distance_threshold) - elif control_task == 'Normal': - result = model.process_normal(np_img, prompt, a_prompt, n_prompt, num_samples, - image_resolution, detect_resolution, ddim_steps, scale, seed_in, eta, bg_threshold) - elif control_task == 'Pose': - result = model.process_pose(np_img, prompt, a_prompt, n_prompt, num_samples, - image_resolution, detect_resolution, ddim_steps, scale, seed_in, eta) - elif control_task == 'Scribble': - result = model.process_scribble(np_img, prompt, a_prompt, n_prompt, num_samples, - image_resolution, ddim_steps, scale, seed_in, eta) - elif control_task == 'Seg': - result = model.process_seg(np_img, prompt, a_prompt, n_prompt, num_samples, - image_resolution, detect_resolution, ddim_steps, scale, seed_in, eta) - - #print(result[0]) - processor_im = Image.fromarray(result[0]) - processor_im.save("process_" + control_task + "_" + str(i) + ".jpeg") - im = Image.fromarray(result[1]) - im.save("your_file" + str(i) + ".jpeg") - return "your_file" + str(i) + ".jpeg", "process_" + control_task + "_" + str(i) + ".jpeg" - -def change_task_options(task): - if task == "Canny" : - return canny_opt.update(visible=True), hough_opt.update(visible=False), normal_opt.update(visible=False) - elif task == "Hough" : - return canny_opt.update(visible=False),hough_opt.update(visible=True), normal_opt.update(visible=False) - elif task == "Normal" : - return canny_opt.update(visible=False),hough_opt.update(visible=False), normal_opt.update(visible=True) - else : - return canny_opt.update(visible=False),hough_opt.update(visible=False), normal_opt.update(visible=False) - -def get_frames(video_in): - frames = [] - #resize the video - clip = VideoFileClip(video_in) - - #check fps - if clip.fps > 30: - print("vide rate is over 30, resetting to 30") - clip_resized = clip.resize(height=512) - clip_resized.write_videofile("video_resized.mp4", fps=30) - else: - print("video rate is OK") - clip_resized = clip.resize(height=512) - clip_resized.write_videofile("video_resized.mp4", fps=clip.fps) - - print("video resized to 512 height") - - # Opens the Video file with CV2 - cap= cv2.VideoCapture("video_resized.mp4") - - fps = cap.get(cv2.CAP_PROP_FPS) - print("video fps: " + str(fps)) - i=0 - while(cap.isOpened()): - ret, frame = cap.read() - if ret == False: - break - cv2.imwrite('kang'+str(i)+'.jpg',frame) - frames.append('kang'+str(i)+'.jpg') - i+=1 - - cap.release() - cv2.destroyAllWindows() - print("broke the video into frames") - - return frames, fps - - -def convert(gif): - if gif != None: - clip = VideoFileClip(gif.name) - clip.write_videofile("my_gif_video.mp4") - return "my_gif_video.mp4" - else: - pass - - -def create_video(frames, fps, type): - print("building video result") - clip = ImageSequenceClip(frames, fps=fps) - clip.write_videofile(type + "_result.mp4", fps=fps) - - return type + "_result.mp4" - - -def infer(prompt,video_in, control_task, seed_in, trim_value, ddim_steps, scale, low_threshold, high_threshold, value_threshold, distance_threshold, bg_threshold, gif_import): - print(f""" - ——————————————— - {prompt} - ———————————————""") - - # 1. break video into frames and get FPS - break_vid = get_frames(video_in) - frames_list= break_vid[0] - fps = break_vid[1] - n_frame = int(trim_value*fps) - - if n_frame >= len(frames_list): - print("video is shorter than the cut value") - n_frame = len(frames_list) - - # 2. prepare frames result arrays - processor_result_frames = [] - result_frames = [] - print("set stop frames to: " + str(n_frame)) - - for i in frames_list[0:int(n_frame)]: - controlnet_img = controlnet(i, prompt,control_task, seed_in, ddim_steps, scale, low_threshold, high_threshold, value_threshold, distance_threshold, bg_threshold) - #images = controlnet_img[0] - #rgb_im = images[0].convert("RGB") - - # exporting the image - #rgb_im.save(f"result_img-{i}.jpg") - processor_result_frames.append(controlnet_img[1]) - result_frames.append(controlnet_img[0]) - print("frame " + i + "/" + str(n_frame) + ": done;") - - processor_vid = create_video(processor_result_frames, fps, "processor") - final_vid = create_video(result_frames, fps, "final") - - files = [processor_vid, final_vid] - if gif_import != None: - final_gif = VideoFileClip(final_vid) - final_gif.write_gif("final_result.gif") - final_gif = "final_result.gif" - - files.append(final_gif) - print("finished !") - - return final_vid, gr.Accordion.update(visible=True), gr.Video.update(value=processor_vid, visible=True), gr.File.update(value=files, visible=True), gr.Group.update(visible=True) - - -def clean(): - return gr.Accordion.update(visible=False),gr.Video.update(value=None, visible=False), gr.Video.update(value=None), gr.File.update(value=None, visible=False), gr.Group.update(visible=False) - -title = """ -
      -
      -

      - ControlNet Video -

      -
      -

      - Apply ControlNet to a video -

      -
      -""" - -article = """ - - -
      -

      You may also like:

      -
      - - - - - - - -
      - -
      - -""" - -with gr.Blocks(css='style.css') as demo: - with gr.Column(elem_id="col-container"): - gr.HTML(title) - gr.HTML(""" - Duplicate Space - """, elem_id="duplicate-container") - with gr.Row(): - with gr.Column(): - video_inp = gr.Video(label="Video source", source="upload", type="filepath", elem_id="input-vid") - video_out = gr.Video(label="ControlNet video result", elem_id="video-output") - - with gr.Group(elem_id="share-btn-container", visible=False) as share_group: - community_icon = gr.HTML(community_icon_html) - loading_icon = gr.HTML(loading_icon_html) - share_button = gr.Button("Share to community", elem_id="share-btn") - - with gr.Accordion("Detailed results", visible=False) as detailed_result: - prep_video_out = gr.Video(label="Preprocessor video result", visible=False, elem_id="prep-video-output") - files = gr.File(label="Files can be downloaded ;)", visible=False) - - with gr.Column(): - #status = gr.Textbox() - - prompt = gr.Textbox(label="Prompt", placeholder="enter prompt", show_label=True, elem_id="prompt-in") - - with gr.Row(): - control_task = gr.Dropdown(label="Control Task", choices=["Canny", "Depth", "Hed", "Hough", "Normal", "Pose", "Scribble", "Seg"], value="Pose", multiselect=False, elem_id="controltask-in") - seed_inp = gr.Slider(label="Seed", minimum=0, maximum=2147483647, step=1, value=123456, elem_id="seed-in") - - with gr.Row(): - trim_in = gr.Slider(label="Cut video at (s)", minimun=1, maximum=5, step=1, value=1) - - with gr.Accordion("Advanced Options", open=False): - with gr.Tab("Diffusion Settings"): - with gr.Row(visible=False) as canny_opt: - low_threshold = gr.Slider(label='Canny low threshold', minimum=1, maximum=255, value=100, step=1) - high_threshold = gr.Slider(label='Canny high threshold', minimum=1, maximum=255, value=200, step=1) - - with gr.Row(visible=False) as hough_opt: - value_threshold = gr.Slider(label='Hough value threshold (MLSD)', minimum=0.01, maximum=2.0, value=0.1, step=0.01) - distance_threshold = gr.Slider(label='Hough distance threshold (MLSD)', minimum=0.01, maximum=20.0, value=0.1, step=0.01) - - with gr.Row(visible=False) as normal_opt: - bg_threshold = gr.Slider(label='Normal background threshold', minimum=0.0, maximum=1.0, value=0.4, step=0.01) - - ddim_steps = gr.Slider(label='Steps', minimum=1, maximum=100, value=20, step=1) - scale = gr.Slider(label='Guidance Scale', minimum=0.1, maximum=30.0, value=9.0, step=0.1) - - with gr.Tab("GIF import"): - gif_import = gr.File(label="import a GIF instead", file_types=['.gif']) - gif_import.change(convert, gif_import, video_inp, queue=False) - - with gr.Tab("Custom Model"): - current_base_model = gr.Text(label='Current base model', - value=DEFAULT_BASE_MODEL_URL) - with gr.Row(): - with gr.Column(): - base_model_repo = gr.Text(label='Base model repo', - max_lines=1, - placeholder=DEFAULT_BASE_MODEL_REPO, - interactive=True) - base_model_filename = gr.Text( - label='Base model file', - max_lines=1, - placeholder=DEFAULT_BASE_MODEL_FILENAME, - interactive=True) - change_base_model_button = gr.Button('Change base model') - - gr.HTML( - '''

      You can use other base models by specifying the repository name and filename.
      - The base model must be compatible with Stable Diffusion v1.5.

      ''') - - change_base_model_button.click(fn=model.set_base_model, - inputs=[ - base_model_repo, - base_model_filename, - ], - outputs=current_base_model, queue=False) - - submit_btn = gr.Button("Generate ControlNet video") - - inputs = [prompt,video_inp,control_task, seed_inp, trim_in, ddim_steps, scale, low_threshold, high_threshold, value_threshold, distance_threshold, bg_threshold, gif_import] - outputs = [video_out, detailed_result, prep_video_out, files, share_group] - #outputs = [status] - - - gr.HTML(article) - control_task.change(change_task_options, inputs=[control_task], outputs=[canny_opt, hough_opt, normal_opt], queue=False) - submit_btn.click(clean, inputs=[], outputs=[detailed_result, prep_video_out, video_out, files, share_group], queue=False) - submit_btn.click(infer, inputs, outputs) - share_button.click(None, [], [], _js=share_js) - - - -demo.queue(max_size=12).launch() \ No newline at end of file diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/side-channel/test/index.js b/spaces/fffiloni/controlnet-animation-doodle/node_modules/side-channel/test/index.js deleted file mode 100644 index 3b92ef7eb3a5fe84b896356d25b84bac54ceef91..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/side-channel/test/index.js +++ /dev/null @@ -1,78 +0,0 @@ -'use strict'; - -var test = require('tape'); - -var getSideChannel = require('../'); - -test('export', function (t) { - t.equal(typeof getSideChannel, 'function', 'is a function'); - t.equal(getSideChannel.length, 0, 'takes no arguments'); - - var channel = getSideChannel(); - t.ok(channel, 'is truthy'); - t.equal(typeof channel, 'object', 'is an object'); - - t.end(); -}); - -test('assert', function (t) { - var channel = getSideChannel(); - t['throws']( - function () { channel.assert({}); }, - TypeError, - 'nonexistent value throws' - ); - - var o = {}; - channel.set(o, 'data'); - t.doesNotThrow(function () { channel.assert(o); }, 'existent value noops'); - - t.end(); -}); - -test('has', function (t) { - var channel = getSideChannel(); - var o = []; - - t.equal(channel.has(o), false, 'nonexistent value yields false'); - - channel.set(o, 'foo'); - t.equal(channel.has(o), true, 'existent value yields true'); - - t.end(); -}); - -test('get', function (t) { - var channel = getSideChannel(); - var o = {}; - t.equal(channel.get(o), undefined, 'nonexistent value yields undefined'); - - var data = {}; - channel.set(o, data); - t.equal(channel.get(o), data, '"get" yields data set by "set"'); - - t.end(); -}); - -test('set', function (t) { - var channel = getSideChannel(); - var o = function () {}; - t.equal(channel.get(o), undefined, 'value not set'); - - channel.set(o, 42); - t.equal(channel.get(o), 42, 'value was set'); - - channel.set(o, Infinity); - t.equal(channel.get(o), Infinity, 'value was set again'); - - var o2 = {}; - channel.set(o2, 17); - t.equal(channel.get(o), Infinity, 'o is not modified'); - t.equal(channel.get(o2), 17, 'o2 is set'); - - channel.set(o, 14); - t.equal(channel.get(o), 14, 'o is modified'); - t.equal(channel.get(o2), 17, 'o2 is not modified'); - - t.end(); -}); diff --git a/spaces/firica/assistant/app.py b/spaces/firica/assistant/app.py deleted file mode 100644 index 4d782cf3d55a558bdd96dcf05c904aa39b29f674..0000000000000000000000000000000000000000 --- a/spaces/firica/assistant/app.py +++ /dev/null @@ -1,96 +0,0 @@ -import os -import openai -import streamlit as st -import time - -from transformers import SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan -from datasets import load_dataset -import torch -import soundfile as sf -from datasets import load_dataset -# import base64 - -def ask_assistant(prompt): - MODEL = "gpt-3.5-turbo" - openai.api_key = os.getenv("OPENAI_API_KEY") - response = openai.ChatCompletion.create( - model=MODEL, - messages=[ - # {"role": "system", "content": "You are a friendly and helpful teaching assistant. You explain concepts in great depth using simple terms, and you give examples to help people learn. At the end of each explanation, you ask a question to check for understanding"}, - {"role": "system", "content": "Summarize the response in maximum 600 characters"}, - {"role": "user", "content": prompt}, - ], - temperature=0, - ) - - return response["choices"][0]["message"]["content"] - -def speak_up(text): - inputs = processor(text=text, return_tensors="pt") - - # load xvector containing speaker's voice characteristics from a dataset - embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation") - speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0) - - speech = model.generate_speech(inputs["input_ids"], speaker_embeddings, vocoder=vocoder) - - audio_file_name = "speech.wav" - sf.write(audio_file_name, speech.numpy(), samplerate=16000) - return audio_file_name - -# def play(file_path: str): -# with open(file_path, "rb") as f: -# data = f.read() -# b64 = base64.b64encode(data).decode() -# md = f""" -# -# """ -# st.markdown( -# md, -# unsafe_allow_html=True, -# ) - -processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts") -model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts") -vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan") - -# st.title("GPT Chat") - -# Initialize chat history -if "messages" not in st.session_state: - st.session_state.messages = [] - -# Display chat messages from history on app rerun -for message in st.session_state.messages: - with st.chat_message(message["role"]): - st.markdown(message["content"]) - -# Accept user input -if prompt := st.chat_input("What's up?"): - # Add user message to chat history - st.session_state.messages.append({"role": "user", "content": prompt}) - # Display user message in chat message container - with st.chat_message("user"): - st.markdown(prompt) - - # Display assistant response in chat message container - with st.chat_message("assistant"): - message_placeholder = st.empty() - full_response = "" - assistant_response = ask_assistant(prompt) - - audio_file = speak_up(assistant_response) - st.audio(audio_file) - # play(audio_file) - os.remove(audio_file) - # Simulate stream of response with milliseconds delay - for chunk in assistant_response.split(): - full_response += chunk + " " - time.sleep(0.05) - # Add a blinking cursor to simulate typing - message_placeholder.markdown(full_response + "▌") - message_placeholder.markdown(full_response) - # Add assistant response to chat history - st.session_state.messages.append({"role": "assistant", "content": full_response}) \ No newline at end of file diff --git a/spaces/fkhuggingme/gpt-academic/.github/ISSUE_TEMPLATE/feature_request.md b/spaces/fkhuggingme/gpt-academic/.github/ISSUE_TEMPLATE/feature_request.md deleted file mode 100644 index e46a4c01e804aa4b649bd40af6c13d5981c873d4..0000000000000000000000000000000000000000 --- a/spaces/fkhuggingme/gpt-academic/.github/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -name: Feature request -about: Suggest an idea for this project -title: '' -labels: '' -assignees: '' - ---- - - diff --git a/spaces/flowers-team/SocialAISchool/utils/format.py b/spaces/flowers-team/SocialAISchool/utils/format.py deleted file mode 100644 index dcf9d0741e618b484d7e18ebfccf7180c633cda4..0000000000000000000000000000000000000000 --- a/spaces/flowers-team/SocialAISchool/utils/format.py +++ /dev/null @@ -1,144 +0,0 @@ -import os -import json -import numpy -import re -import torch -import torch_ac -import gym - -import utils - - -def get_obss_preprocessor(obs_space, text=None, dialogue_current=None, dialogue_history=None, custom_image_preprocessor=None, custom_image_space_preprocessor=None): - # Check if obs_space is an image space - if isinstance(obs_space, gym.spaces.Box): - obs_space = {"image": obs_space.shape} - - def preprocess_obss(obss, device=None): - assert custom_image_preprocessor is None - return torch_ac.DictList({ - "image": preprocess_images(obss, device=device) - }) - - # Check if it is a MiniGrid observation space - elif isinstance(obs_space, gym.spaces.Dict) and list(obs_space.spaces.keys()) == ["image"]: - - assert (custom_image_preprocessor is None) == (custom_image_space_preprocessor is None) - - image_obs_space = obs_space.spaces["image"].shape - - if custom_image_preprocessor: - image_obs_space = custom_image_space_preprocessor(image_obs_space) - - obs_space = {"image": image_obs_space, "text": 100} - - # must be specified in this case - if text is None: - raise ValueError("text argument must be specified.") - if dialogue_current is None: - raise ValueError("dialogue current argument must be specified.") - if dialogue_history is None: - raise ValueError("dialogue history argument must be specified.") - - vocab = Vocabulary(obs_space["text"]) - def preprocess_obss(obss, device=None): - if custom_image_preprocessor is None: - D = { - "image": preprocess_images([obs["image"] for obs in obss], device=device) - } - else: - D = { - "image": custom_image_preprocessor([obs["image"] for obs in obss], device=device) - } - - if dialogue_current: - D["utterance"] = preprocess_texts([obs["utterance"] for obs in obss], vocab, device=device) - - if dialogue_history: - D["utterance_history"] = preprocess_texts([obs["utterance_history"] for obs in obss], vocab, device=device) - - if text: - D["text"] = preprocess_texts([obs["mission"] for obs in obss], vocab, device=device) - - - return torch_ac.DictList(D) - - preprocess_obss.vocab = vocab - - else: - raise ValueError("Unknown observation space: " + str(obs_space)) - - return obs_space, preprocess_obss - -def ride_ref_image_space_preprocessor(image_space): - return image_space - -def ride_ref_image_preprocessor(images, device=None): - # Bug of Pytorch: very slow if not first converted to numpy array - - images = numpy.array(images) - - # grid dimensions - size = images.shape[1] - assert size == images.shape[2] - - # assert that 1, 2 are absolute cooridnates - # assert images[:,:,:,1].max() <= size - # assert images[:,:,:,2].max() <= size - assert images[:,:,:,1].min() >= 0 - assert images[:,:,:,2].min() >= 0 - # - # # 0, 1, 2 -> door state - # assert all([e in set([0, 1, 2]) for e in numpy.unique(images[:, :, :, 4].reshape(-1))]) - # - # only keep the (obj id, colors, state) -> multiply others by 0 - # print(images[:, :, :, 1].max()) - - images[:, :, :, 1] *= 0 - images[:, :, :, 2] *= 0 - - assert images.shape[-1] == 5 - - return torch.tensor(images, device=device, dtype=torch.float) - -def preprocess_images(images, device=None): - # Bug of Pytorch: very slow if not first converted to numpy array - images = numpy.array(images) - return torch.tensor(images, device=device, dtype=torch.float) - - -def preprocess_texts(texts, vocab, device=None): - var_indexed_texts = [] - max_text_len = 0 - - for text in texts: - tokens = re.findall("([a-z]+)", text.lower()) - var_indexed_text = numpy.array([vocab[token] for token in tokens]) - var_indexed_texts.append(var_indexed_text) - max_text_len = max(len(var_indexed_text), max_text_len) - - indexed_texts = numpy.zeros((len(texts), max_text_len)) - - for i, indexed_text in enumerate(var_indexed_texts): - indexed_texts[i, :len(indexed_text)] = indexed_text - - return torch.tensor(indexed_texts, device=device, dtype=torch.long) - - -class Vocabulary: - """A mapping from tokens to ids with a capacity of `max_size` words. - It can be saved in a `vocab.json` file.""" - - def __init__(self, max_size): - self.max_size = max_size - self.vocab = {} - - def load_vocab(self, vocab): - self.vocab = vocab - - def __getitem__(self, token): - if not token in self.vocab.keys(): - if len(self.vocab) >= self.max_size: - raise ValueError("Maximum vocabulary capacity reached") - self.vocab[token] = len(self.vocab) + 1 - return self.vocab[token] diff --git a/spaces/freddyaboulton/3.1.4.9-all-demos/demos/blocks_flipper/run.py b/spaces/freddyaboulton/3.1.4.9-all-demos/demos/blocks_flipper/run.py deleted file mode 100644 index 88859c6113e8887708bfc2762b87845166530363..0000000000000000000000000000000000000000 --- a/spaces/freddyaboulton/3.1.4.9-all-demos/demos/blocks_flipper/run.py +++ /dev/null @@ -1,27 +0,0 @@ -import numpy as np -import gradio as gr - -def flip_text(x): - return x[::-1] - -def flip_image(x): - return np.fliplr(x) - -with gr.Blocks() as demo: - gr.Markdown("Flip text or image files using this demo.") - with gr.Tabs(): - with gr.TabItem("Flip Text"): - text_input = gr.Textbox() - text_output = gr.Textbox() - text_button = gr.Button("Flip") - with gr.TabItem("Flip Image"): - with gr.Row(): - image_input = gr.Image() - image_output = gr.Image() - image_button = gr.Button("Flip") - - text_button.click(flip_text, inputs=text_input, outputs=text_output) - image_button.click(flip_image, inputs=image_input, outputs=image_output) - -if __name__ == "__main__": - demo.launch() \ No newline at end of file diff --git a/spaces/freshield/ChatGPT-gradio/README.md b/spaces/freshield/ChatGPT-gradio/README.md deleted file mode 100644 index 76755b06ed66472097982323260bc13e468dac8c..0000000000000000000000000000000000000000 --- a/spaces/freshield/ChatGPT-gradio/README.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: ChatGPT-gradio -emoji: 😚 -colorFrom: yellow -colorTo: red -sdk: gradio -sdk_version: 3.19.1 -python_version: 3.9.16 -app_file: app.py -fullWidth: true -pinned: false -license: mit ---- -# ChatGPT-gradio -# 简介 - -> 这是一个可以简单的把ChatGPT的API应用的前端网页,通过gradio进行构建。同时给出了简单的无需数据库的版本和加入数据库的两个不同的版本。 - - -基于ChatGPT的[API](https://github.com/openai/openai-python) 接口进行调用。 - -app的版本是已经直接部署到[huggingface space](https://huggingface.co/spaces/freshield/ChatGPT-gradio)的版本,没有任何的状态存储所以不需要数据库的支持。 - -而server版本是使用gradio结合mongodb的实现方式,加入了对于gradio的access token的识别并获取,对于想要使用gradio构建自己的应用的朋友有一定的参考价值。需要注意的是这里需要通过offline部分的代码提前加入用户。 - -有任何问题欢迎来骚扰,vx: freshield \ No newline at end of file diff --git a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/fileio/handlers/__init__.py b/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/fileio/handlers/__init__.py deleted file mode 100644 index aa24d91972837b8756b225f4879bac20436eb72a..0000000000000000000000000000000000000000 --- a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/fileio/handlers/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .base import BaseFileHandler -from .json_handler import JsonHandler -from .pickle_handler import PickleHandler -from .yaml_handler import YamlHandler - -__all__ = ['BaseFileHandler', 'JsonHandler', 'PickleHandler', 'YamlHandler'] diff --git a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/ops/deform_roi_pool.py b/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/ops/deform_roi_pool.py deleted file mode 100644 index cc245ba91fee252226ba22e76bb94a35db9a629b..0000000000000000000000000000000000000000 --- a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/ops/deform_roi_pool.py +++ /dev/null @@ -1,204 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from torch import nn -from torch.autograd import Function -from torch.autograd.function import once_differentiable -from torch.nn.modules.utils import _pair - -from ..utils import ext_loader - -ext_module = ext_loader.load_ext( - '_ext', ['deform_roi_pool_forward', 'deform_roi_pool_backward']) - - -class DeformRoIPoolFunction(Function): - - @staticmethod - def symbolic(g, input, rois, offset, output_size, spatial_scale, - sampling_ratio, gamma): - return g.op( - 'mmcv::MMCVDeformRoIPool', - input, - rois, - offset, - pooled_height_i=output_size[0], - pooled_width_i=output_size[1], - spatial_scale_f=spatial_scale, - sampling_ratio_f=sampling_ratio, - gamma_f=gamma) - - @staticmethod - def forward(ctx, - input, - rois, - offset, - output_size, - spatial_scale=1.0, - sampling_ratio=0, - gamma=0.1): - if offset is None: - offset = input.new_zeros(0) - ctx.output_size = _pair(output_size) - ctx.spatial_scale = float(spatial_scale) - ctx.sampling_ratio = int(sampling_ratio) - ctx.gamma = float(gamma) - - assert rois.size(1) == 5, 'RoI must be (idx, x1, y1, x2, y2)!' - - output_shape = (rois.size(0), input.size(1), ctx.output_size[0], - ctx.output_size[1]) - output = input.new_zeros(output_shape) - - ext_module.deform_roi_pool_forward( - input, - rois, - offset, - output, - pooled_height=ctx.output_size[0], - pooled_width=ctx.output_size[1], - spatial_scale=ctx.spatial_scale, - sampling_ratio=ctx.sampling_ratio, - gamma=ctx.gamma) - - ctx.save_for_backward(input, rois, offset) - return output - - @staticmethod - @once_differentiable - def backward(ctx, grad_output): - input, rois, offset = ctx.saved_tensors - grad_input = grad_output.new_zeros(input.shape) - grad_offset = grad_output.new_zeros(offset.shape) - - ext_module.deform_roi_pool_backward( - grad_output, - input, - rois, - offset, - grad_input, - grad_offset, - pooled_height=ctx.output_size[0], - pooled_width=ctx.output_size[1], - spatial_scale=ctx.spatial_scale, - sampling_ratio=ctx.sampling_ratio, - gamma=ctx.gamma) - if grad_offset.numel() == 0: - grad_offset = None - return grad_input, None, grad_offset, None, None, None, None - - -deform_roi_pool = DeformRoIPoolFunction.apply - - -class DeformRoIPool(nn.Module): - - def __init__(self, - output_size, - spatial_scale=1.0, - sampling_ratio=0, - gamma=0.1): - super(DeformRoIPool, self).__init__() - self.output_size = _pair(output_size) - self.spatial_scale = float(spatial_scale) - self.sampling_ratio = int(sampling_ratio) - self.gamma = float(gamma) - - def forward(self, input, rois, offset=None): - return deform_roi_pool(input, rois, offset, self.output_size, - self.spatial_scale, self.sampling_ratio, - self.gamma) - - -class DeformRoIPoolPack(DeformRoIPool): - - def __init__(self, - output_size, - output_channels, - deform_fc_channels=1024, - spatial_scale=1.0, - sampling_ratio=0, - gamma=0.1): - super(DeformRoIPoolPack, self).__init__(output_size, spatial_scale, - sampling_ratio, gamma) - - self.output_channels = output_channels - self.deform_fc_channels = deform_fc_channels - - self.offset_fc = nn.Sequential( - nn.Linear( - self.output_size[0] * self.output_size[1] * - self.output_channels, self.deform_fc_channels), - nn.ReLU(inplace=True), - nn.Linear(self.deform_fc_channels, self.deform_fc_channels), - nn.ReLU(inplace=True), - nn.Linear(self.deform_fc_channels, - self.output_size[0] * self.output_size[1] * 2)) - self.offset_fc[-1].weight.data.zero_() - self.offset_fc[-1].bias.data.zero_() - - def forward(self, input, rois): - assert input.size(1) == self.output_channels - x = deform_roi_pool(input, rois, None, self.output_size, - self.spatial_scale, self.sampling_ratio, - self.gamma) - rois_num = rois.size(0) - offset = self.offset_fc(x.view(rois_num, -1)) - offset = offset.view(rois_num, 2, self.output_size[0], - self.output_size[1]) - return deform_roi_pool(input, rois, offset, self.output_size, - self.spatial_scale, self.sampling_ratio, - self.gamma) - - -class ModulatedDeformRoIPoolPack(DeformRoIPool): - - def __init__(self, - output_size, - output_channels, - deform_fc_channels=1024, - spatial_scale=1.0, - sampling_ratio=0, - gamma=0.1): - super(ModulatedDeformRoIPoolPack, - self).__init__(output_size, spatial_scale, sampling_ratio, gamma) - - self.output_channels = output_channels - self.deform_fc_channels = deform_fc_channels - - self.offset_fc = nn.Sequential( - nn.Linear( - self.output_size[0] * self.output_size[1] * - self.output_channels, self.deform_fc_channels), - nn.ReLU(inplace=True), - nn.Linear(self.deform_fc_channels, self.deform_fc_channels), - nn.ReLU(inplace=True), - nn.Linear(self.deform_fc_channels, - self.output_size[0] * self.output_size[1] * 2)) - self.offset_fc[-1].weight.data.zero_() - self.offset_fc[-1].bias.data.zero_() - - self.mask_fc = nn.Sequential( - nn.Linear( - self.output_size[0] * self.output_size[1] * - self.output_channels, self.deform_fc_channels), - nn.ReLU(inplace=True), - nn.Linear(self.deform_fc_channels, - self.output_size[0] * self.output_size[1] * 1), - nn.Sigmoid()) - self.mask_fc[2].weight.data.zero_() - self.mask_fc[2].bias.data.zero_() - - def forward(self, input, rois): - assert input.size(1) == self.output_channels - x = deform_roi_pool(input, rois, None, self.output_size, - self.spatial_scale, self.sampling_ratio, - self.gamma) - rois_num = rois.size(0) - offset = self.offset_fc(x.view(rois_num, -1)) - offset = offset.view(rois_num, 2, self.output_size[0], - self.output_size[1]) - mask = self.mask_fc(x.view(rois_num, -1)) - mask = mask.view(rois_num, 1, self.output_size[0], self.output_size[1]) - d = deform_roi_pool(input, rois, offset, self.output_size, - self.spatial_scale, self.sampling_ratio, - self.gamma) - return d * mask diff --git a/spaces/gnakan/airtable-QA/utils.py b/spaces/gnakan/airtable-QA/utils.py deleted file mode 100644 index 1d44ce8d17670a47efdf00b763f63e2863e251df..0000000000000000000000000000000000000000 --- a/spaces/gnakan/airtable-QA/utils.py +++ /dev/null @@ -1,140 +0,0 @@ -""" -Utility functions -""" - -import re -from io import BytesIO -from PIL import Image -import tempfile -import streamlit as st -import requests -import pandas as pd - -from langchain.agents import create_csv_agent -from langchain.llms import OpenAI - -from pyairtable import Table - -airtable_logo_url = "https://seeklogo.com/images/Q/question-cube-logo-2041FEA436-seeklogo.com.png" - -def extract_ids_from_base_url(base_url): - """ - Extract base and table ID or name from the base URL using regular expressions - """ - pattern = r'https://airtable.com/([\w\d]+)/(.*?)(?:/|$)' - match = re.match(pattern, base_url) - - if match: - base_id = match.group(1) - table_id = match.group(2) - - return dict(base_id=base_id, table_id=table_id) - else: - raise ValueError("Invalid base URL") - -def airtable_to_csv(): - """ - Convert Airtable contents into csv - """ - access_token = st.session_state["AIRTABLE_PAT"] - - # Extract the base and table ID from the base URL - ids_from_url = extract_ids_from_base_url(st.session_state["AIRTABLE_URL"]) - base_id, table_id = ids_from_url['base_id'], ids_from_url['table_id'] - - # Initialize Airtable Python SDK - table = Table(access_token, base_id, table_id) - - # Get all records from the table - all_records = table.all() - - # Extract the data from the JSON response and create a pandas DataFrame - rows = [] - for record in all_records: - row = record['fields'] - row['id'] = record['id'] - rows.append(row) - df = pd.DataFrame(rows) - - with tempfile.NamedTemporaryFile(delete=False) as tmp_file: - df.to_csv(tmp_file.name, index=False) - - print(tmp_file.name) - return tmp_file.name - -def clear_submit(): - """ - Clears the 'submit' value in the session state. - """ - st.session_state["submit"] = False - -def run_agent(file_name, query): - """ - Runs the agent on the given file with the specified query. - """ - openai_key = st.session_state["OPENAI_API_KEY"] - agent = create_csv_agent(OpenAI(temperature=0, openai_api_key=openai_key), file_name, verbose=True) - return agent.run(query).__str__() - -def validate_api_key(api_key_input): - """ - Validates the provided API key. - """ - api_key_regex = r"^sk-" - api_key_valid = re.match(api_key_regex, api_key_input) is not None - return api_key_valid - -def validate_pat(airtable_pat_input): - """ - Validates the provided Airtable personal access token (PAT). - """ - airtable_pat_regex = r"^pat" - airtable_pat_valid = re.match(airtable_pat_regex, airtable_pat_input) is not None - return airtable_pat_valid - -def validate_base_url(airtable_base_url_input): - """ - Validates the provided Airtable base URL. - """ - airtable_base_url_regex = r"^https:\/\/airtable.com\/app[^\/]+\/tbl[^\/]" - airtable_base_url_valid = re.match(airtable_base_url_regex, airtable_base_url_input) is not None - return airtable_base_url_valid - -def set_logo_and_page_config(): - """ - Sets the Airtable logo image and page config. - """ - response = requests.get(airtable_logo_url) - im = Image.open(BytesIO(response.content)) - st.set_page_config(page_title="Airtable-QA", page_icon=im, layout="wide") - st.image(airtable_logo_url, width=50) - st.header("Airtable-QA") - -def populate_markdown(): - """ - Populates markdown for sidebar. - """ - st.markdown( - "## How to use\n" - "1. Enter your [OpenAI API key](https://platform.openai.com/account/api-keys) below\n" - "2. Enter your [Airtable Personal Access Token](https://airtable.com/developers/web/guides/personal-access-tokens#creating-a-token) & Base URL 🔑\n" - "3. Ask any question that can be answered from Airtable Base\n") - api_key_input = st.text_input( - "OpenAI API Key", - type="password", - placeholder="sk-...", - help="You can get your API key from https://platform.openai.com/account/api-keys", - value=st.session_state.get("OPENAI_API_KEY", "")) - airtable_pat_input = st.text_input( - "Airtable Personal Access Token", - type="password", - placeholder="pat...", - help="You can get your Airtable PAT from https://airtable.com/developers/web/guides/personal-access-tokens#creating-a-token", - value=st.session_state.get("AIRTABLE_PAT", "")) - airtable_base_url_input = st.text_input( - "Airtable Base URL", - type="password", - placeholder="https://airtable.com/app.../tbl...", - help="You can get your Airtable Base URL by simply copy pasting the URL", - value=st.session_state.get("AIRTABLE_URL", "")) - return api_key_input, airtable_pat_input, airtable_base_url_input \ No newline at end of file diff --git a/spaces/godot-demo/godot-2d-threads/index.worker.js b/spaces/godot-demo/godot-2d-threads/index.worker.js deleted file mode 100644 index 1b721d12814d2fc24e95760ed7be0e33bbdbada4..0000000000000000000000000000000000000000 --- a/spaces/godot-demo/godot-2d-threads/index.worker.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";var Module={};function threadPrintErr(){var text=Array.prototype.slice.call(arguments).join(" ");console.error(text)}function threadAlert(){var text=Array.prototype.slice.call(arguments).join(" ");postMessage({cmd:"alert",text:text,threadId:Module["_pthread_self"]()})}var err=threadPrintErr;self.alert=threadAlert;Module["instantiateWasm"]=function(info,receiveInstance){var instance=new WebAssembly.Instance(Module["wasmModule"],info);receiveInstance(instance);Module["wasmModule"]=null;return instance.exports};function moduleLoaded(){}self.onmessage=function(e){try{if(e.data.cmd==="load"){Module["wasmModule"]=e.data.wasmModule;Module["wasmMemory"]=e.data.wasmMemory;Module["buffer"]=Module["wasmMemory"].buffer;Module["ENVIRONMENT_IS_PTHREAD"]=true;if(typeof e.data.urlOrBlob==="string"){importScripts(e.data.urlOrBlob)}else{var objectUrl=URL.createObjectURL(e.data.urlOrBlob);importScripts(objectUrl);URL.revokeObjectURL(objectUrl)}Godot(Module).then(function(instance){Module=instance;moduleLoaded()})}else if(e.data.cmd==="objectTransfer"){Module["PThread"].receiveObjectTransfer(e.data)}else if(e.data.cmd==="run"){Module["__performance_now_clock_drift"]=performance.now()-e.data.time;Module["__emscripten_thread_init"](e.data.threadInfoStruct,0,0);var max=e.data.stackBase;var top=e.data.stackBase+e.data.stackSize;Module["establishStackSpace"](top,max);Module["PThread"].receiveObjectTransfer(e.data);Module["PThread"].threadInit();try{var result=Module["invokeEntryPoint"](e.data.start_routine,e.data.arg);if(Module["keepRuntimeAlive"]()){Module["PThread"].setExitStatus(result)}else{Module["PThread"].threadExit(result)}}catch(ex){if(ex==="Canceled!"){Module["PThread"].threadCancel()}else if(ex!="unwind"){if(ex instanceof Module["ExitStatus"]){if(Module["keepRuntimeAlive"]()){}else{Module["PThread"].threadExit(ex.status)}}else{Module["PThread"].threadExit(-2);throw ex}}}}else if(e.data.cmd==="cancel"){if(Module["_pthread_self"]()){Module["PThread"].threadCancel()}}else if(e.data.target==="setimmediate"){}else if(e.data.cmd==="processThreadQueue"){if(Module["_pthread_self"]()){Module["_emscripten_current_thread_process_queued_calls"]()}}else{err("worker.js received unknown command "+e.data.cmd);err(e.data)}}catch(ex){err("worker.js onmessage() captured an uncaught exception: "+ex);if(ex&&ex.stack)err(ex.stack);throw ex}}; diff --git a/spaces/gotiQspiryo/whisper-ui/Dockerfile b/spaces/gotiQspiryo/whisper-ui/Dockerfile deleted file mode 100644 index 187535e9726f6feb6d62b88b98c68912ccac3789..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/Dockerfile +++ /dev/null @@ -1,32 +0,0 @@ -# Currently tested & workong for Python 3.11 -FROM python:3.11-slim - -# Run updates and install ffmpeg -RUN apt-get update && \ - apt-get upgrade -y && \ - apt-get install -y ffmpeg - -# Copy the current directory contents into the container at /app -COPY app /app - -# Copy and install the requirements -COPY ./requirements.txt /requirements.txt - -# Pip install the dependencies -RUN pip install --upgrade pip -# For CPU only, you can use pip install git+https://github.com/MiscellaneousStuff/whisper.git -# in place of openai-whisper. -# Also, --extra-index-url https://download.pytorch.org/whl/cpu might be needed if you are using a CPU only machine -RUN pip install --no-cache-dir -r /requirements.txt - -# Set the working directory to /app -WORKDIR /app - -# Expose port 8501 -EXPOSE 8501 - -# Mount the data volume -VOLUME /data - -# Run the app -CMD streamlit run /app/01_🏠_Home.py diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Diary of a Wimpy Kid Ebook Free Download Read the Hilarious Adventures of Greg Heffley.md b/spaces/gotiQspiryo/whisper-ui/examples/Diary of a Wimpy Kid Ebook Free Download Read the Hilarious Adventures of Greg Heffley.md deleted file mode 100644 index 997539c4af757190f43979c95e31e7f94d8c5730..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Diary of a Wimpy Kid Ebook Free Download Read the Hilarious Adventures of Greg Heffley.md +++ /dev/null @@ -1,25 +0,0 @@ -
      -

      WE ARE LÖÖÖÖÖDED DIPER! Check out our new Loded Diper merch, now available in tees, hoodies, tanks, and more. ??? Check out the link in our stories, or in the Trevco highlight reel!#wimpykid #diaryofawimpykid #lodeddiper #rodrickheffley #rodrickrules #merch #apparel

      -

      diary of a wimpy kid ebook free download


      Download > https://urlgoal.com/2uyM0I



      -

      Rodrick looks like he's in desperate need of a weekend-long nap on the living room couch...??Artwork by @deadwick_ ??#fanartfriday #fridayfanart #rodrickheffley #lodeddiper #wimpykid #diaryofawimpykid #friday

      -

      Fun fact, if you look closely at the cheese holes on each book page, you might see some familiar faces pop up! (Beware of the perils that await you on The Deep End's page...)#wimpykid #diaryofawimpykid #gregheffley #rowleyjefferson #diaryofanawesomefriendlykid #thelonghaul #diperoverlode #rowleyjeffersonsawesomespookystories

      -

      Some Tuesday Trivia for those who have read Diper Överlöde. What's the name of the drummer from Rodrick's favorite band, Metallichihuahua? ?#wimpykid #diaryofawimpykid #diperoverlode #rodrickheffley #metallichihuahua #trivia #TriviaTuesday

      -

      -

      In case anyone was wondering what "Diper Överlöde" translates to in different languages....???#wimpykid #diaryofawimpykid #diperoverlode #internationaleditions #booktranslations #finland #spain #germany #japan

      -

      As part of our website revamp, we have added lots of new and fun facts about the Diary of a Wimpy Kid series! Check out the "About" section at wimpykid.com to see how some rough sketches and jokes turned into an international phenomenon. ?#wimpykid #diaryofawimpykid #jeffkinney #books #gregheffley #diperoverlode

      -

      New year, new #FanArtFriday! Check out this rockin' @lego concert featuring Löded Diper ???Thank you lady_shed_hunter for sharing your son's awesome work! ???#wimpykid #diaryofawimpykid #lodeddiper #rodrickheffley #diperoverlode #fanart #lego #fridayfanart

      -

      Let's get this show on the road ??Gather your crew and rock out to Diary of a #WimpyKid: Rodrick Rules now streaming on Disneyplus #diaryofawimpykid #rodrickrules #rodrickheffley #lodeddiper #gregheffley #disney #disneyplus #nowstreaming #outnow #readytorock

      -

      ? Text Message: You're invited to...?Watch Diary of a #WimpyKid: Rodrick Rules now on Disneyplus #diaryofawimpykid #rodrickrules #rodrickheffley #partytime #disney #disneyplus #nowstreaming #groupchat

      -

      From book to screen, Diary of a #WimpyKid: Rodrick Rules truly ROCKS! For the first time ever, here's Jeff Kinney, author of the bestselling series, reading a passage from the Rodrick Rules book as the iconic party scene plays out in the all-new movie, now streaming only on @disneyplus!#diaryofawimpykid #rodrickrules #rodrickheffley #lodeddiper #gregheffley #jeffkinney #booktoscreen #disney #disneyplus #nowstreaming

      -

      The Little Drummer Boy has been a holiday classic for years. We're putting our bid in for the Löded Drummer Boy to become the new seasonal sensation. Are you in? ????#wimpykid #diaryofawimpykid #rodrick #rodrickhefley #lodeddiper #lodeddrummerboy #holiday #classic #seasonal #sensation

      -

      Perks of having a big brother ?Diary of a #WimpyKid: Rodrick Rules is now streaming on Disneyplus #diaryofawimpykid #rodrickrules #rodrickheffley #gregheffley #heffleybrothers #disney #disneyplus #nowstreaming #brothers

      -

      Winter break is fast-approaching! Here are a few suggestions to keep your household entertained!???#wimpykid #diaryofawimpykid #games #puzzles #winterbreak #cheesetouch #cardgame #boardgame #giftideas

      -

      Fan Art Friday: Wimp Yourself Edition ?Thank you to everyone who is having fun with Wimp Yourself on our website. Keep the content coming! #wimpykid #diaryofawimpykid #wimpyourself #charactercreation #fanart #fanartfriday @plww1_ @weirdpersoninweirdworld @gamerrising @itzzz_first_mate @elisostrovski @colladojccd @vibeswithannie_

      -

      #tbt to our event in Columbus, OH this year. We miss seeing all of our awesome wimpy fans on the road!#wimpykid #diaryofawimpykid #throwback #throwbackthursday #lodeddiper #diperoverlodetour #columbusoh

      -

      Rules to live by ??See what they all are in Diary of a #WimpyKid: Rodrick Rules now streaming on Disneyplus #diaryofawimpykid #rodrickrules #rodrickheffley #lodeddiper #disney #disneyplus #nowstreaming #outnow

      -

      We recently revamped our website, with so many new and exciting features including WIMP YOURSELF, where you can design your own wimpy kid characters! Be sure to tag us at @diaryofawimpykid so that we can see what awesome wimpy characters you create! #wimpykid #diaryofawimpykid #lodeddiper #website #newandimproved #wimpyourself #charactercreation

      -

      This party is just getting started ?Head on over to @DisneyPlus to stream Diary of a #WimpyKid: Rodrick Rules now!#diaryofawimpykid #rodrickrules #rodrickheffley #gregheffley #rowleyjefferson #lodeddiper #outnow #lodeddiper #disney #disneyplus

      -

      ??Can You Smell Us Now?? Sing along and see Löded Diper rock out in Diary of a #WimpyKid: Rodrick Rules now streaming on @DisneyPlus! ???#diaryofawimpykid #rodrickrules #rodrickheffley #lodeddiper #canyousmellusnow #disney #disneyplus

      -

      Don't miss out on a rockin' good time...?Experience Diary of a #WimpyKid: Rodrick Rules now on @disneyplus #diaryofawimpykid #rodrickrules #rodrickheffley #gregheffley #lodeddiper #nowstreaming #outnow #disney #disneyplus

      aaccfb2cb3
      -
      -
      \ No newline at end of file diff --git a/spaces/gotiQspiryo/whisper-ui/examples/ImTOO Blu-ray Creator V2.0.4 Build 0707 Serial.epubl __FULL__.md b/spaces/gotiQspiryo/whisper-ui/examples/ImTOO Blu-ray Creator V2.0.4 Build 0707 Serial.epubl __FULL__.md deleted file mode 100644 index caad7ad1b23cc57f0111e558c51b1ba26173916f..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/ImTOO Blu-ray Creator V2.0.4 Build 0707 Serial.epubl __FULL__.md +++ /dev/null @@ -1,6 +0,0 @@ -

      ImTOO Blu-ray Creator V2.0.4 Build 0707 Serial.epubl


      DOWNLOAD ✑ ✑ ✑ https://urlgoal.com/2uyN4Y



      -
      - aaccfb2cb3
      -
      -
      -

      diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Inazuma Eleven Go Strikers 2013 Download Iso.md b/spaces/gotiQspiryo/whisper-ui/examples/Inazuma Eleven Go Strikers 2013 Download Iso.md deleted file mode 100644 index 60718d13d87df8f0ec8404ab342407f1f26721a9..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Inazuma Eleven Go Strikers 2013 Download Iso.md +++ /dev/null @@ -1,9 +0,0 @@ - -

      battle mode: the player plays their character in strikers style. the goal of this mode is to build a winning team of other player-created characters. only characters created before a game begins may participate in battle mode. characters are drawn from the inazuma eleven franchise - inazuma eleven 1, inazuma eleven 2, the inazuma eleven go strikers mobile game, and post-game characters can be used. a team begins with three characters. one player takes control of the team and does whatever he wants with them; however, there is a one-turn limit for removing a team member from the team. in each turn, a player can change the team, add a new character to the team, or recruit one of a number of unique characters to his team, at a cost of cp (halo currency) or gold.

      -

      inazuma eleven go strikers 2013 download iso


      Download Zip ::: https://urlgoal.com/2uyMYK



      -

      prelude mode: the player can play another character different from his chosen character in strikers style against an opponent. during a match, the player is free to use all unique moves and special abilities of his or her character. the player can improve characters using cp (halo currency) or gold.

      -

      theatre mode: the player can watch the match between the 12 characters in an arena. the match is composed of rounds. the number of rounds in the match are determined when starting the game for the first time.

      -

      additionally, the introduction of dōjō and dōjō-yaki, and the g-rated language used in some in-game dialog and newspaper articles, were intentional design choices to meet local expectations, while preserving the charm of the original.

      -

      899543212b
      -
      -
      \ No newline at end of file diff --git a/spaces/gradio/HuBERT/tests/test_valid_subset_checks.py b/spaces/gradio/HuBERT/tests/test_valid_subset_checks.py deleted file mode 100644 index 3e9191bda66fccfebba34920f88bf7b1efea5f7e..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/tests/test_valid_subset_checks.py +++ /dev/null @@ -1,138 +0,0 @@ -import os -import shutil -import tempfile -import unittest - -from fairseq import options -from fairseq.dataclass.utils import convert_namespace_to_omegaconf -from fairseq.data.data_utils import raise_if_valid_subsets_unintentionally_ignored -from .utils import create_dummy_data, preprocess_lm_data, train_language_model - - -def make_lm_config( - data_dir=None, - extra_flags=None, - task="language_modeling", - arch="transformer_lm_gpt2_tiny", -): - task_args = [task] - if data_dir is not None: - task_args += [data_dir] - train_parser = options.get_training_parser() - train_args = options.parse_args_and_arch( - train_parser, - [ - "--task", - *task_args, - "--arch", - arch, - "--optimizer", - "adam", - "--lr", - "0.0001", - "--max-tokens", - "500", - "--tokens-per-sample", - "500", - "--save-dir", - data_dir, - "--max-epoch", - "1", - ] - + (extra_flags or []), - ) - cfg = convert_namespace_to_omegaconf(train_args) - return cfg - - -def write_empty_file(path): - with open(path, "w"): - pass - assert os.path.exists(path) - - -class TestValidSubsetsErrors(unittest.TestCase): - """Test various filesystem, clarg combinations and ensure that error raising happens as expected""" - - def _test_case(self, paths, extra_flags): - with tempfile.TemporaryDirectory() as data_dir: - [ - write_empty_file(os.path.join(data_dir, f"{p}.bin")) - for p in paths + ["train"] - ] - cfg = make_lm_config(data_dir, extra_flags=extra_flags) - raise_if_valid_subsets_unintentionally_ignored(cfg) - - def test_default_raises(self): - with self.assertRaises(ValueError): - self._test_case(["valid", "valid1"], []) - with self.assertRaises(ValueError): - self._test_case( - ["valid", "valid1", "valid2"], ["--valid-subset", "valid,valid1"] - ) - - def partially_specified_valid_subsets(self): - with self.assertRaises(ValueError): - self._test_case( - ["valid", "valid1", "valid2"], ["--valid-subset", "valid,valid1"] - ) - # Fix with ignore unused - self._test_case( - ["valid", "valid1", "valid2"], - ["--valid-subset", "valid,valid1", "--ignore-unused-valid-subsets"], - ) - - def test_legal_configs(self): - self._test_case(["valid"], []) - self._test_case(["valid", "valid1"], ["--ignore-unused-valid-subsets"]) - self._test_case(["valid", "valid1"], ["--combine-val"]) - self._test_case(["valid", "valid1"], ["--valid-subset", "valid,valid1"]) - self._test_case(["valid", "valid1"], ["--valid-subset", "valid1"]) - self._test_case( - ["valid", "valid1"], ["--combine-val", "--ignore-unused-valid-subsets"] - ) - self._test_case( - ["valid1"], ["--valid-subset", "valid1"] - ) # valid.bin doesn't need to be ignored. - - def test_disable_validation(self): - self._test_case([], ["--disable-validation"]) - self._test_case(["valid", "valid1"], ["--disable-validation"]) - - def test_dummy_task(self): - cfg = make_lm_config(task="dummy_lm") - raise_if_valid_subsets_unintentionally_ignored(cfg) - - def test_masked_dummy_task(self): - cfg = make_lm_config(task="dummy_masked_lm") - raise_if_valid_subsets_unintentionally_ignored(cfg) - - -class TestCombineValidSubsets(unittest.TestCase): - def _train(self, extra_flags): - with self.assertLogs() as logs: - with tempfile.TemporaryDirectory("test_transformer_lm") as data_dir: - create_dummy_data(data_dir, num_examples=20) - preprocess_lm_data(data_dir) - - shutil.copyfile(f"{data_dir}/valid.bin", f"{data_dir}/valid1.bin") - shutil.copyfile(f"{data_dir}/valid.idx", f"{data_dir}/valid1.idx") - train_language_model( - data_dir, - "transformer_lm", - ["--max-update", "0", "--log-format", "json"] + extra_flags, - run_validation=False, - ) - return [x.message for x in logs.records] - - def test_combined(self): - flags = ["--combine-valid-subsets"] - logs = self._train(flags) - assert any(["valid1" in x for x in logs]) # loaded 100 examples from valid1 - assert not any(["valid1_ppl" in x for x in logs]) # metrics are combined - - def test_subsets(self): - flags = ["--valid-subset", "valid,valid1"] - logs = self._train(flags) - assert any(["valid_ppl" in x for x in logs]) # loaded 100 examples from valid1 - assert any(["valid1_ppl" in x for x in logs]) # metrics are combined diff --git a/spaces/gradio/video_identity_main/README.md b/spaces/gradio/video_identity_main/README.md deleted file mode 100644 index bc921e4383e8305acd30e430bd41aaffa4b11aad..0000000000000000000000000000000000000000 --- a/spaces/gradio/video_identity_main/README.md +++ /dev/null @@ -1,12 +0,0 @@ - ---- -title: video_identity_main -emoji: 🔥 -colorFrom: indigo -colorTo: indigo -sdk: gradio -sdk_version: 4.1.2 -app_file: run.py -pinned: false -hf_oauth: true ---- diff --git a/spaces/gyugnsu/DragGan-Inversion/PTI/utils/ImagesDataset.py b/spaces/gyugnsu/DragGan-Inversion/PTI/utils/ImagesDataset.py deleted file mode 100644 index 4d36e8665270f4f6dee5a2d58a36c564e1543771..0000000000000000000000000000000000000000 --- a/spaces/gyugnsu/DragGan-Inversion/PTI/utils/ImagesDataset.py +++ /dev/null @@ -1,43 +0,0 @@ -import os - -from torch.utils.data import Dataset -from PIL import Image - -from PTI.utils.data_utils import make_dataset -from torchvision import transforms - - -class Image2Dataset(Dataset): - def __init__(self, image) -> None: - super().__init__() - self.image = image - self.transform = transforms.Compose( - [ - transforms.ToTensor(), - transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]), - ] - ) - - def __len__(self): - return 1 - - def __getitem__(self, index): - return "customIMG", self.transform(self.image) - - -class ImagesDataset(Dataset): - def __init__(self, source_root, source_transform=None): - self.source_paths = sorted(make_dataset(source_root)) - self.source_transform = source_transform - - def __len__(self): - return len(self.source_paths) - - def __getitem__(self, index): - fname, from_path = self.source_paths[index] - from_im = Image.open(from_path).convert("RGB").resize([1024, 1024]) - - if self.source_transform: - from_im = self.source_transform(from_im) - - return fname, from_im diff --git a/spaces/haakohu/deep_privacy2_face/dp2/detection/models/__init__.py b/spaces/haakohu/deep_privacy2_face/dp2/detection/models/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/hamacojr/SAM-CAT-Seg/open_clip/src/training/params.py b/spaces/hamacojr/SAM-CAT-Seg/open_clip/src/training/params.py deleted file mode 100644 index 44db413a5440ed1d6b151851fb95507177b4f3d2..0000000000000000000000000000000000000000 --- a/spaces/hamacojr/SAM-CAT-Seg/open_clip/src/training/params.py +++ /dev/null @@ -1,403 +0,0 @@ -import argparse -import ast - - -def get_default_params(model_name): - # Params from paper (https://arxiv.org/pdf/2103.00020.pdf) - model_name = model_name.lower() - if "vit" in model_name: - return {"lr": 5.0e-4, "beta1": 0.9, "beta2": 0.98, "eps": 1.0e-6} - else: - return {"lr": 5.0e-4, "beta1": 0.9, "beta2": 0.999, "eps": 1.0e-8} - - -class ParseKwargs(argparse.Action): - def __call__(self, parser, namespace, values, option_string=None): - kw = {} - for value in values: - key, value = value.split('=') - try: - kw[key] = ast.literal_eval(value) - except ValueError: - kw[key] = str(value) # fallback to string (avoid need to escape on command line) - setattr(namespace, self.dest, kw) - - -def parse_args(args): - parser = argparse.ArgumentParser() - parser.add_argument( - "--train-data", - type=str, - default=None, - help="Path to file(s) with training data", - ) - parser.add_argument( - "--val-data", - type=str, - default=None, - help="Path to file(s) with validation data", - ) - parser.add_argument( - "--train-num-samples", - type=int, - default=None, - help="Number of samples in dataset. Required for webdataset if not available in info file.", - ) - parser.add_argument( - "--val-num-samples", - type=int, - default=None, - help="Number of samples in dataset. Useful for webdataset if not available in info file.", - ) - parser.add_argument( - "--dataset-type", - choices=["webdataset", "csv", "synthetic", "auto"], - default="auto", - help="Which type of dataset to process." - ) - parser.add_argument( - "--dataset-resampled", - default=False, - action="store_true", - help="Whether to use sampling with replacement for webdataset shard selection." - ) - parser.add_argument( - "--csv-separator", - type=str, - default="\t", - help="For csv-like datasets, which separator to use." - ) - parser.add_argument( - "--csv-img-key", - type=str, - default="filepath", - help="For csv-like datasets, the name of the key for the image paths." - ) - parser.add_argument( - "--csv-caption-key", - type=str, - default="title", - help="For csv-like datasets, the name of the key for the captions." - ) - parser.add_argument( - "--imagenet-val", - type=str, - default=None, - help="Path to imagenet val set for conducting zero shot evaluation.", - ) - parser.add_argument( - "--imagenet-v2", - type=str, - default=None, - help="Path to imagenet v2 for conducting zero shot evaluation.", - ) - parser.add_argument( - "--logs", - type=str, - default="./logs/", - help="Where to store tensorboard logs. Use None to avoid storing logs.", - ) - parser.add_argument( - "--log-local", - action="store_true", - default=False, - help="log files on local master, otherwise global master only.", - ) - parser.add_argument( - "--name", - type=str, - default=None, - help="Optional identifier for the experiment when storing logs. Otherwise use current time.", - ) - parser.add_argument( - "--workers", type=int, default=1, help="Number of dataloader workers per GPU." - ) - parser.add_argument( - "--batch-size", type=int, default=64, help="Batch size per GPU." - ) - parser.add_argument( - "--epochs", type=int, default=32, help="Number of epochs to train for." - ) - parser.add_argument( - "--epochs-cooldown", type=int, default=None, - help="When scheduler w/ cooldown used, perform cooldown from total_epochs - cooldown_epochs onwards." - ) - parser.add_argument("--lr", type=float, default=None, help="Learning rate.") - parser.add_argument("--beta1", type=float, default=None, help="Adam beta 1.") - parser.add_argument("--beta2", type=float, default=None, help="Adam beta 2.") - parser.add_argument("--eps", type=float, default=None, help="Adam epsilon.") - parser.add_argument("--wd", type=float, default=0.2, help="Weight decay.") - parser.add_argument( - "--warmup", type=int, default=10000, help="Number of steps to warmup for." - ) - parser.add_argument( - "--use-bn-sync", - default=False, - action="store_true", - help="Whether to use batch norm sync.") - parser.add_argument( - "--skip-scheduler", - action="store_true", - default=False, - help="Use this flag to skip the learning rate decay.", - ) - parser.add_argument( - "--lr-scheduler", - type=str, - default='cosine', - help="LR scheduler. One of: 'cosine', 'const' (constant), 'const-cooldown' (constant w/ cooldown). Default: cosine", - ) - parser.add_argument( - "--lr-cooldown-end", type=float, default=0.0, - help="End learning rate for cooldown schedule. Default: 0" - ) - parser.add_argument( - "--lr-cooldown-power", type=float, default=1.0, - help="Power for polynomial cooldown schedule. Default: 1.0 (linear decay)" - ) - parser.add_argument( - "--save-frequency", type=int, default=1, help="How often to save checkpoints." - ) - parser.add_argument( - "--save-most-recent", - action="store_true", - default=False, - help="Always save the most recent model trained to epoch_latest.pt.", - ) - parser.add_argument( - "--zeroshot-frequency", type=int, default=2, help="How often to run zero shot." - ) - parser.add_argument( - "--val-frequency", type=int, default=1, help="How often to run evaluation with val data." - ) - parser.add_argument( - "--resume", - default=None, - type=str, - help="path to latest checkpoint (default: none)", - ) - parser.add_argument( - "--precision", - choices=["amp", "amp_bf16", "amp_bfloat16", "bf16", "fp16", "fp32"], - default="amp", - help="Floating point precision." - ) - parser.add_argument( - "--model", - type=str, - default="RN50", - help="Name of the vision backbone to use.", - ) - parser.add_argument( - "--pretrained", - default='', - type=str, - help="Use a pretrained CLIP model weights with the specified tag or file path.", - ) - parser.add_argument( - "--pretrained-image", - default=False, - action='store_true', - help="Load imagenet pretrained weights for image tower backbone if available.", - ) - parser.add_argument( - "--lock-image", - default=False, - action='store_true', - help="Lock full image tower by disabling gradients.", - ) - parser.add_argument( - "--lock-image-unlocked-groups", - type=int, - default=0, - help="Leave last n image tower layer groups unlocked.", - ) - parser.add_argument( - "--lock-image-freeze-bn-stats", - default=False, - action='store_true', - help="Freeze BatchNorm running stats in image tower for any locked layers.", - ) - parser.add_argument( - '--image-mean', type=float, nargs='+', default=None, metavar='MEAN', - help='Override default image mean value of dataset') - parser.add_argument( - '--image-std', type=float, nargs='+', default=None, metavar='STD', - help='Override default image std deviation of of dataset') - parser.add_argument('--aug-cfg', nargs='*', default={}, action=ParseKwargs) - parser.add_argument( - "--grad-checkpointing", - default=False, - action='store_true', - help="Enable gradient checkpointing.", - ) - parser.add_argument( - "--local-loss", - default=False, - action="store_true", - help="calculate loss w/ local features @ global (instead of realizing full global @ global matrix)" - ) - parser.add_argument( - "--gather-with-grad", - default=False, - action="store_true", - help="enable full distributed gradient for feature gather" - ) - parser.add_argument( - '--force-image-size', type=int, nargs='+', default=None, - help='Override default image size' - ) - parser.add_argument( - "--force-quick-gelu", - default=False, - action='store_true', - help="Force use of QuickGELU activation for non-OpenAI transformer models.", - ) - parser.add_argument( - "--force-patch-dropout", - default=None, - type=float, - help="Override the patch dropout during training, for fine tuning with no dropout near the end as in the paper", - ) - parser.add_argument( - "--force-custom-text", - default=False, - action='store_true', - help="Force use of CustomTextCLIP model (separate text-tower).", - ) - parser.add_argument( - "--torchscript", - default=False, - action='store_true', - help="torch.jit.script the model, also uses jit version of OpenAI models if pretrained=='openai'", - ) - parser.add_argument( - "--trace", - default=False, - action='store_true', - help="torch.jit.trace the model for inference / eval only", - ) - parser.add_argument( - "--accum-freq", type=int, default=1, help="Update the model every --acum-freq steps." - ) - # arguments for distributed training - parser.add_argument( - "--dist-url", - default="env://", - type=str, - help="url used to set up distributed training", - ) - parser.add_argument( - "--dist-backend", default="nccl", type=str, help="distributed backend" - ) - parser.add_argument( - "--report-to", - default='', - type=str, - help="Options are ['wandb', 'tensorboard', 'wandb,tensorboard']" - ) - parser.add_argument( - "--wandb-notes", - default='', - type=str, - help="Notes if logging with wandb" - ) - parser.add_argument( - "--wandb-project-name", - type=str, - default='open-clip', - help="Name of the project if logging with wandb.", - ) - parser.add_argument( - "--debug", - default=False, - action="store_true", - help="If true, more information is logged." - ) - parser.add_argument( - "--copy-codebase", - default=False, - action="store_true", - help="If true, we copy the entire base on the log directory, and execute from there." - ) - parser.add_argument( - "--horovod", - default=False, - action="store_true", - help="Use horovod for distributed training." - ) - parser.add_argument( - "--ddp-static-graph", - default=False, - action='store_true', - help="Enable static graph optimization for DDP in PyTorch >= 1.11.", - ) - parser.add_argument( - "--no-set-device-rank", - default=False, - action="store_true", - help="Don't set device index from local rank (when CUDA_VISIBLE_DEVICES restricted to one per proc)." - ) - parser.add_argument( - "--seed", type=int, default=0, help="Default random seed." - ) - parser.add_argument( - "--grad-clip-norm", type=float, default=None, help="Gradient clip." - ) - parser.add_argument( - "--lock-text", - default=False, - action='store_true', - help="Lock full text tower by disabling gradients.", - ) - parser.add_argument( - "--lock-text-unlocked-layers", - type=int, - default=0, - help="Leave last n image tower layer groups unlocked.", - ) - parser.add_argument( - "--lock-text-freeze-layer-norm", - default=False, - action='store_true', - help="Freeze BatchNorm running stats in image tower for any locked layers.", - ) - parser.add_argument( - "--log-every-n-steps", - type=int, - default=100, - help="Log every n steps to tensorboard/console/wandb.", - ) - parser.add_argument( - "--remote-sync", - type=str, - default=None, - help="Optinoally sync with a remote path specified by this arg", - ) - parser.add_argument( - "--remote-sync-frequency", - type=int, - default=300, - help="How frequently to sync to a remote directly if --remote-sync is not None.", - ) - parser.add_argument( - "--remote-sync-protocol", - choices=["s3", "fsspec"], - default="s3", - help="How to do the remote sync backup if --remote-sync is not None.", - ) - parser.add_argument( - "--delete-previous-checkpoint", - default=False, - action="store_true", - help="If true, delete previous checkpoint after storing a new one." - ) - args = parser.parse_args(args) - - # If some params are not passed, we use the default values based on model name. - default_params = get_default_params(args.model) - for name, val in default_params.items(): - if getattr(args, name) is None: - setattr(args, name, val) - - return args diff --git "a/spaces/hands012/gpt-academic/crazy_functions/\350\247\243\346\236\220\351\241\271\347\233\256\346\272\220\344\273\243\347\240\201.py" "b/spaces/hands012/gpt-academic/crazy_functions/\350\247\243\346\236\220\351\241\271\347\233\256\346\272\220\344\273\243\347\240\201.py" deleted file mode 100644 index 30ae4440c2cbfa6da3e38692d3707d6557e65c26..0000000000000000000000000000000000000000 --- "a/spaces/hands012/gpt-academic/crazy_functions/\350\247\243\346\236\220\351\241\271\347\233\256\346\272\220\344\273\243\347\240\201.py" +++ /dev/null @@ -1,352 +0,0 @@ -from toolbox import update_ui -from toolbox import CatchException, report_execption, write_results_to_file -from .crazy_utils import input_clipping - -def 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): - import os, copy - from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency - from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive - msg = '正常' - summary_batch_isolation = True - inputs_array = [] - inputs_show_user_array = [] - history_array = [] - sys_prompt_array = [] - report_part_1 = [] - - assert len(file_manifest) <= 512, "源文件太多(超过512个), 请缩减输入文件的数量。或者,您也可以选择删除此行警告,并修改代码拆分file_manifest列表,从而实现分批次处理。" - ############################## <第一步,逐个文件分析,多线程> ################################## - for index, fp in enumerate(file_manifest): - # 读取文件 - with open(fp, 'r', encoding='utf-8', errors='replace') as f: - file_content = f.read() - prefix = "接下来请你逐文件分析下面的工程" if index==0 else "" - i_say = prefix + f'请对下面的程序文件做一个概述文件名是{os.path.relpath(fp, project_folder)},文件代码是 ```{file_content}```' - i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的程序文件做一个概述: {os.path.abspath(fp)}' - # 装载请求内容 - inputs_array.append(i_say) - inputs_show_user_array.append(i_say_show_user) - history_array.append([]) - sys_prompt_array.append("你是一个程序架构分析师,正在分析一个源代码项目。你的回答必须简单明了。") - - # 文件读取完成,对每一个源代码文件,生成一个请求线程,发送到chatgpt进行分析 - gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency( - inputs_array = inputs_array, - inputs_show_user_array = inputs_show_user_array, - history_array = history_array, - sys_prompt_array = sys_prompt_array, - llm_kwargs = llm_kwargs, - chatbot = chatbot, - show_user_at_complete = True - ) - - # 全部文件解析完成,结果写入文件,准备对工程源代码进行汇总分析 - report_part_1 = copy.deepcopy(gpt_response_collection) - history_to_return = report_part_1 - res = write_results_to_file(report_part_1) - chatbot.append(("完成?", "逐个文件分析已完成。" + res + "\n\n正在开始汇总。")) - yield from update_ui(chatbot=chatbot, history=history_to_return) # 刷新界面 - - ############################## <第二步,综合,单线程,分组+迭代处理> ################################## - batchsize = 16 # 10个文件为一组 - report_part_2 = [] - previous_iteration_files = [] - last_iteration_result = "" - while True: - if len(file_manifest) == 0: break - this_iteration_file_manifest = file_manifest[:batchsize] - this_iteration_gpt_response_collection = gpt_response_collection[:batchsize*2] - file_rel_path = [os.path.relpath(fp, project_folder) for index, fp in enumerate(this_iteration_file_manifest)] - # 把“请对下面的程序文件做一个概述” 替换成 精简的 "文件名:{all_file[index]}" - for index, content in enumerate(this_iteration_gpt_response_collection): - if index%2==0: this_iteration_gpt_response_collection[index] = f"{file_rel_path[index//2]}" # 只保留文件名节省token - this_iteration_files = [os.path.relpath(fp, project_folder) for index, fp in enumerate(this_iteration_file_manifest)] - previous_iteration_files.extend(this_iteration_files) - previous_iteration_files_string = ', '.join(previous_iteration_files) - current_iteration_focus = ', '.join(this_iteration_files) - if summary_batch_isolation: focus = current_iteration_focus - else: focus = previous_iteration_files_string - i_say = f'用一张Markdown表格简要描述以下文件的功能:{focus}。根据以上分析,用一句话概括程序的整体功能。' - if last_iteration_result != "": - sys_prompt_additional = "已知某些代码的局部作用是:" + last_iteration_result + "\n请继续分析其他源代码,从而更全面地理解项目的整体功能。" - else: - sys_prompt_additional = "" - inputs_show_user = f'根据以上分析,对程序的整体功能和构架重新做出概括,由于输入长度限制,可能需要分组处理,本组文件为 {current_iteration_focus} + 已经汇总的文件组。' - this_iteration_history = copy.deepcopy(this_iteration_gpt_response_collection) - this_iteration_history.append(last_iteration_result) - # 裁剪input - inputs, this_iteration_history_feed = input_clipping(inputs=i_say, history=this_iteration_history, max_token_limit=2560) - result = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=inputs, inputs_show_user=inputs_show_user, llm_kwargs=llm_kwargs, chatbot=chatbot, - history=this_iteration_history_feed, # 迭代之前的分析 - sys_prompt="你是一个程序架构分析师,正在分析一个项目的源代码。" + sys_prompt_additional) - - summary = "请用一句话概括这些文件的整体功能" - summary_result = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=summary, - inputs_show_user=summary, - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history=[i_say, result], # 迭代之前的分析 - sys_prompt="你是一个程序架构分析师,正在分析一个项目的源代码。" + sys_prompt_additional) - - report_part_2.extend([i_say, result]) - last_iteration_result = summary_result - file_manifest = file_manifest[batchsize:] - gpt_response_collection = gpt_response_collection[batchsize*2:] - - ############################## ################################## - history_to_return.extend(report_part_2) - res = write_results_to_file(history_to_return) - chatbot.append(("完成了吗?", res)) - yield from update_ui(chatbot=chatbot, history=history_to_return) # 刷新界面 - - -@CatchException -def 解析项目本身(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - history = [] # 清空历史,以免输入溢出 - import glob - file_manifest = [f for f in glob.glob('./*.py') if ('test_project' not in f) and ('gpt_log' not in f)] + \ - [f for f in glob.glob('./crazy_functions/*.py') if ('test_project' not in f) and ('gpt_log' not in f)]+ \ - [f for f in glob.glob('./request_llm/*.py') if ('test_project' not in f) and ('gpt_log' not in f)] - project_folder = './' - if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) - -@CatchException -def 解析一个Python项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - history = [] # 清空历史,以免输入溢出 - import glob, os - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.py', recursive=True)] - if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) - - -@CatchException -def 解析一个C项目的头文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - history = [] # 清空历史,以免输入溢出 - import glob, os - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.hpp', recursive=True)] #+ \ - # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)] - if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) - -@CatchException -def 解析一个C项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - history = [] # 清空历史,以免输入溢出 - import glob, os - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.hpp', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)] - if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) - - -@CatchException -def 解析一个Java项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - history = [] # 清空历史,以免输入溢出 - import glob, os - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.java', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.jar', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.xml', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.sh', recursive=True)] - if len(file_manifest) == 0: - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何java文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) - - -@CatchException -def 解析一个前端项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - history = [] # 清空历史,以免输入溢出 - import glob, os - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.ts', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.tsx', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.json', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.js', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.vue', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.less', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.sass', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.wxml', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.wxss', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.css', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.jsx', recursive=True)] - if len(file_manifest) == 0: - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何前端相关文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) - - -@CatchException -def 解析一个Golang项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - history = [] # 清空历史,以免输入溢出 - import glob, os - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.go', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/go.mod', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/go.sum', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/go.work', recursive=True)] - if len(file_manifest) == 0: - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何golang文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) - -@CatchException -def 解析一个Rust项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - history = [] # 清空历史,以免输入溢出 - import glob, os - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.rs', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.toml', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.lock', recursive=True)] - if len(file_manifest) == 0: - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何golang文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) - -@CatchException -def 解析一个Lua项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - history = [] # 清空历史,以免输入溢出 - import glob, os - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.lua', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.xml', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.json', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.toml', recursive=True)] - if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何lua文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) - - -@CatchException -def 解析一个CSharp项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - history = [] # 清空历史,以免输入溢出 - import glob, os - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.cs', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.csproj', recursive=True)] - if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何CSharp文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) - - -@CatchException -def 解析任意code项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - txt_pattern = plugin_kwargs.get("advanced_arg") - txt_pattern = txt_pattern.replace(",", ",") - # 将要匹配的模式(例如: *.c, *.cpp, *.py, config.toml) - pattern_include = [_.lstrip(" ,").rstrip(" ,") for _ in txt_pattern.split(",") if _ != "" and not _.strip().startswith("^")] - if not pattern_include: pattern_include = ["*"] # 不输入即全部匹配 - # 将要忽略匹配的文件后缀(例如: ^*.c, ^*.cpp, ^*.py) - pattern_except_suffix = [_.lstrip(" ^*.,").rstrip(" ,") for _ in txt_pattern.split(" ") if _ != "" and _.strip().startswith("^*.")] - pattern_except_suffix += ['zip', 'rar', '7z', 'tar', 'gz'] # 避免解析压缩文件 - # 将要忽略匹配的文件名(例如: ^README.md) - pattern_except_name = [_.lstrip(" ^*,").rstrip(" ,").replace(".", "\.") for _ in txt_pattern.split(" ") if _ != "" and _.strip().startswith("^") and not _.strip().startswith("^*.")] - # 生成正则表达式 - pattern_except = '/[^/]+\.(' + "|".join(pattern_except_suffix) + ')$' - pattern_except += '|/(' + "|".join(pattern_except_name) + ')$' if pattern_except_name != [] else '' - - history.clear() - import glob, os, re - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - # 若上传压缩文件, 先寻找到解压的文件夹路径, 从而避免解析压缩文件 - maybe_dir = [f for f in glob.glob(f'{project_folder}/*') if os.path.isdir(f)] - if len(maybe_dir)>0 and maybe_dir[0].endswith('.extract'): - extract_folder_path = maybe_dir[0] - else: - extract_folder_path = project_folder - # 按输入的匹配模式寻找上传的非压缩文件和已解压的文件 - file_manifest = [f for pattern in pattern_include for f in glob.glob(f'{extract_folder_path}/**/{pattern}', recursive=True) if "" != extract_folder_path and \ - os.path.isfile(f) and (not re.search(pattern_except, f) or pattern.endswith('.' + re.search(pattern_except, f).group().split('.')[-1]))] - if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) \ No newline at end of file diff --git a/spaces/haofeixu/unimatch/utils/flow_viz.py b/spaces/haofeixu/unimatch/utils/flow_viz.py deleted file mode 100644 index dbe3f139d8fc54478fc1880eb6aa5a286660540a..0000000000000000000000000000000000000000 --- a/spaces/haofeixu/unimatch/utils/flow_viz.py +++ /dev/null @@ -1,290 +0,0 @@ -# MIT License -# -# Copyright (c) 2018 Tom Runia -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to conditions. -# -# Author: Tom Runia -# Date Created: 2018-08-03 - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -from PIL import Image - - -def make_colorwheel(): - ''' - Generates a color wheel for optical flow visualization as presented in: - Baker et al. "A Database and Evaluation Methodology for Optical Flow" (ICCV, 2007) - URL: http://vision.middlebury.edu/flow/flowEval-iccv07.pdf - According to the C++ source code of Daniel Scharstein - According to the Matlab source code of Deqing Sun - ''' - - RY = 15 - YG = 6 - GC = 4 - CB = 11 - BM = 13 - MR = 6 - - ncols = RY + YG + GC + CB + BM + MR - colorwheel = np.zeros((ncols, 3)) - col = 0 - - # RY - colorwheel[0:RY, 0] = 255 - colorwheel[0:RY, 1] = np.floor(255 * np.arange(0, RY) / RY) - col = col + RY - # YG - colorwheel[col:col + YG, 0] = 255 - np.floor(255 * np.arange(0, YG) / YG) - colorwheel[col:col + YG, 1] = 255 - col = col + YG - # GC - colorwheel[col:col + GC, 1] = 255 - colorwheel[col:col + GC, 2] = np.floor(255 * np.arange(0, GC) / GC) - col = col + GC - # CB - colorwheel[col:col + CB, 1] = 255 - np.floor(255 * np.arange(CB) / CB) - colorwheel[col:col + CB, 2] = 255 - col = col + CB - # BM - colorwheel[col:col + BM, 2] = 255 - colorwheel[col:col + BM, 0] = np.floor(255 * np.arange(0, BM) / BM) - col = col + BM - # MR - colorwheel[col:col + MR, 2] = 255 - np.floor(255 * np.arange(MR) / MR) - colorwheel[col:col + MR, 0] = 255 - return colorwheel - - -def flow_compute_color(u, v, convert_to_bgr=False): - ''' - Applies the flow color wheel to (possibly clipped) flow components u and v. - According to the C++ source code of Daniel Scharstein - According to the Matlab source code of Deqing Sun - :param u: np.ndarray, input horizontal flow - :param v: np.ndarray, input vertical flow - :param convert_to_bgr: bool, whether to change ordering and output BGR instead of RGB - :return: - ''' - - flow_image = np.zeros((u.shape[0], u.shape[1], 3), np.uint8) - - colorwheel = make_colorwheel() # shape [55x3] - ncols = colorwheel.shape[0] - - rad = np.sqrt(np.square(u) + np.square(v)) - a = np.arctan2(-v, -u) / np.pi - - fk = (a + 1) / 2 * (ncols - 1) + 1 - k0 = np.floor(fk).astype(np.int32) - k1 = k0 + 1 - k1[k1 == ncols] = 1 - f = fk - k0 - - for i in range(colorwheel.shape[1]): - tmp = colorwheel[:, i] - col0 = tmp[k0] / 255.0 - col1 = tmp[k1] / 255.0 - col = (1 - f) * col0 + f * col1 - - idx = (rad <= 1) - col[idx] = 1 - rad[idx] * (1 - col[idx]) - col[~idx] = col[~idx] * 0.75 # out of range? - - # Note the 2-i => BGR instead of RGB - ch_idx = 2 - i if convert_to_bgr else i - flow_image[:, :, ch_idx] = np.floor(255 * col) - - return flow_image - - -def flow_to_color(flow_uv, clip_flow=None, convert_to_bgr=False): - ''' - Expects a two dimensional flow image of shape [H,W,2] - According to the C++ source code of Daniel Scharstein - According to the Matlab source code of Deqing Sun - :param flow_uv: np.ndarray of shape [H,W,2] - :param clip_flow: float, maximum clipping value for flow - :return: - ''' - - assert flow_uv.ndim == 3, 'input flow must have three dimensions' - assert flow_uv.shape[2] == 2, 'input flow must have shape [H,W,2]' - - if clip_flow is not None: - flow_uv = np.clip(flow_uv, 0, clip_flow) - - u = flow_uv[:, :, 0] - v = flow_uv[:, :, 1] - - rad = np.sqrt(np.square(u) + np.square(v)) - rad_max = np.max(rad) - - epsilon = 1e-5 - u = u / (rad_max + epsilon) - v = v / (rad_max + epsilon) - - return flow_compute_color(u, v, convert_to_bgr) - - -UNKNOWN_FLOW_THRESH = 1e7 -SMALLFLOW = 0.0 -LARGEFLOW = 1e8 - - -def make_color_wheel(): - """ - Generate color wheel according Middlebury color code - :return: Color wheel - """ - RY = 15 - YG = 6 - GC = 4 - CB = 11 - BM = 13 - MR = 6 - - ncols = RY + YG + GC + CB + BM + MR - - colorwheel = np.zeros([ncols, 3]) - - col = 0 - - # RY - colorwheel[0:RY, 0] = 255 - colorwheel[0:RY, 1] = np.transpose(np.floor(255 * np.arange(0, RY) / RY)) - col += RY - - # YG - colorwheel[col:col + YG, 0] = 255 - np.transpose(np.floor(255 * np.arange(0, YG) / YG)) - colorwheel[col:col + YG, 1] = 255 - col += YG - - # GC - colorwheel[col:col + GC, 1] = 255 - colorwheel[col:col + GC, 2] = np.transpose(np.floor(255 * np.arange(0, GC) / GC)) - col += GC - - # CB - colorwheel[col:col + CB, 1] = 255 - np.transpose(np.floor(255 * np.arange(0, CB) / CB)) - colorwheel[col:col + CB, 2] = 255 - col += CB - - # BM - colorwheel[col:col + BM, 2] = 255 - colorwheel[col:col + BM, 0] = np.transpose(np.floor(255 * np.arange(0, BM) / BM)) - col += + BM - - # MR - colorwheel[col:col + MR, 2] = 255 - np.transpose(np.floor(255 * np.arange(0, MR) / MR)) - colorwheel[col:col + MR, 0] = 255 - - return colorwheel - - -def compute_color(u, v): - """ - compute optical flow color map - :param u: optical flow horizontal map - :param v: optical flow vertical map - :return: optical flow in color code - """ - [h, w] = u.shape - img = np.zeros([h, w, 3]) - nanIdx = np.isnan(u) | np.isnan(v) - u[nanIdx] = 0 - v[nanIdx] = 0 - - colorwheel = make_color_wheel() - ncols = np.size(colorwheel, 0) - - rad = np.sqrt(u ** 2 + v ** 2) - - a = np.arctan2(-v, -u) / np.pi - - fk = (a + 1) / 2 * (ncols - 1) + 1 - - k0 = np.floor(fk).astype(int) - - k1 = k0 + 1 - k1[k1 == ncols + 1] = 1 - f = fk - k0 - - for i in range(0, np.size(colorwheel, 1)): - tmp = colorwheel[:, i] - col0 = tmp[k0 - 1] / 255 - col1 = tmp[k1 - 1] / 255 - col = (1 - f) * col0 + f * col1 - - idx = rad <= 1 - col[idx] = 1 - rad[idx] * (1 - col[idx]) - notidx = np.logical_not(idx) - - col[notidx] *= 0.75 - img[:, :, i] = np.uint8(np.floor(255 * col * (1 - nanIdx))) - - return img - - -# from https://github.com/gengshan-y/VCN -def flow_to_image(flow): - """ - Convert flow into middlebury color code image - :param flow: optical flow map - :return: optical flow image in middlebury color - """ - u = flow[:, :, 0] - v = flow[:, :, 1] - - maxu = -999. - maxv = -999. - minu = 999. - minv = 999. - - idxUnknow = (abs(u) > UNKNOWN_FLOW_THRESH) | (abs(v) > UNKNOWN_FLOW_THRESH) - u[idxUnknow] = 0 - v[idxUnknow] = 0 - - maxu = max(maxu, np.max(u)) - minu = min(minu, np.min(u)) - - maxv = max(maxv, np.max(v)) - minv = min(minv, np.min(v)) - - rad = np.sqrt(u ** 2 + v ** 2) - maxrad = max(-1, np.max(rad)) - - u = u / (maxrad + np.finfo(float).eps) - v = v / (maxrad + np.finfo(float).eps) - - img = compute_color(u, v) - - idx = np.repeat(idxUnknow[:, :, np.newaxis], 3, axis=2) - img[idx] = 0 - - return np.uint8(img) - - -def save_vis_flow_tofile(flow, output_path): - vis_flow = flow_to_image(flow) - Image.fromarray(vis_flow).save(output_path) - - -def flow_tensor_to_image(flow): - """Used for tensorboard visualization""" - flow = flow.permute(1, 2, 0) # [H, W, 2] - flow = flow.detach().cpu().numpy() - flow = flow_to_image(flow) # [H, W, 3] - flow = np.transpose(flow, (2, 0, 1)) # [3, H, W] - - return flow diff --git a/spaces/hdhzk/bingo/src/components/ui/separator.tsx b/spaces/hdhzk/bingo/src/components/ui/separator.tsx deleted file mode 100644 index 6c55e0b2ca8e2436658a06748aadbff7cd700db0..0000000000000000000000000000000000000000 --- a/spaces/hdhzk/bingo/src/components/ui/separator.tsx +++ /dev/null @@ -1,31 +0,0 @@ -'use client' - -import * as React from 'react' -import * as SeparatorPrimitive from '@radix-ui/react-separator' - -import { cn } from '@/lib/utils' - -const Separator = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->( - ( - { className, orientation = 'horizontal', decorative = true, ...props }, - ref - ) => ( - - ) -) -Separator.displayName = SeparatorPrimitive.Root.displayName - -export { Separator } diff --git a/spaces/hhalim/datavis-plotly/README.md b/spaces/hhalim/datavis-plotly/README.md deleted file mode 100644 index 6abdfd329ab8a7bf3fe174b512c0677a42041c86..0000000000000000000000000000000000000000 --- a/spaces/hhalim/datavis-plotly/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Plotly Graphing Libraries Streamlit -emoji: 💻 -colorFrom: yellow -colorTo: purple -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/dataset_conversion/Task056_VerSe2019.py b/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/dataset_conversion/Task056_VerSe2019.py deleted file mode 100644 index 4962ec9ae634b319821199d08b665e83e44b2367..0000000000000000000000000000000000000000 --- a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/dataset_conversion/Task056_VerSe2019.py +++ /dev/null @@ -1,274 +0,0 @@ -# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from collections import OrderedDict -import SimpleITK as sitk -from multiprocessing.pool import Pool -from nnunet.configuration import default_num_threads -from nnunet.paths import nnUNet_raw_data -from batchgenerators.utilities.file_and_folder_operations import * -import shutil -from medpy import metric -import numpy as np -from nnunet.utilities.image_reorientation import reorient_all_images_in_folder_to_ras - - -def check_if_all_in_good_orientation(imagesTr_folder: str, labelsTr_folder: str, output_folder: str) -> None: - maybe_mkdir_p(output_folder) - filenames = subfiles(labelsTr_folder, suffix='.nii.gz', join=False) - import matplotlib.pyplot as plt - for n in filenames: - img = sitk.GetArrayFromImage(sitk.ReadImage(join(imagesTr_folder, n[:-7] + '_0000.nii.gz'))) - lab = sitk.GetArrayFromImage(sitk.ReadImage(join(labelsTr_folder, n))) - assert np.all([i == j for i, j in zip(img.shape, lab.shape)]) - z_slice = img.shape[0] // 2 - img_slice = img[z_slice] - lab_slice = lab[z_slice] - lab_slice[lab_slice != 0] = 1 - img_slice = img_slice - img_slice.min() - img_slice = img_slice / img_slice.max() - stacked = np.vstack((img_slice, lab_slice)) - print(stacked.shape) - plt.imsave(join(output_folder, n[:-7] + '.png'), stacked, cmap='gray') - - -def evaluate_verse_case(sitk_file_ref:str, sitk_file_test:str): - """ - Only vertebra that are present in the reference will be evaluated - :param sitk_file_ref: - :param sitk_file_test: - :return: - """ - gt_npy = sitk.GetArrayFromImage(sitk.ReadImage(sitk_file_ref)) - pred_npy = sitk.GetArrayFromImage(sitk.ReadImage(sitk_file_test)) - dice_scores = [] - for label in range(1, 26): - mask_gt = gt_npy == label - if np.sum(mask_gt) > 0: - mask_pred = pred_npy == label - dc = metric.dc(mask_pred, mask_gt) - else: - dc = np.nan - dice_scores.append(dc) - return dice_scores - - -def evaluate_verse_folder(folder_pred, folder_gt, out_json="/home/fabian/verse.json"): - p = Pool(default_num_threads) - files_gt_bare = subfiles(folder_gt, join=False) - assert all([isfile(join(folder_pred, i)) for i in files_gt_bare]), "some files are missing in the predicted folder" - files_pred = [join(folder_pred, i) for i in files_gt_bare] - files_gt = [join(folder_gt, i) for i in files_gt_bare] - - results = p.starmap_async(evaluate_verse_case, zip(files_gt, files_pred)) - - results = results.get() - - dct = {i: j for i, j in zip(files_gt_bare, results)} - - results_stacked = np.vstack(results) - results_mean = np.nanmean(results_stacked, 0) - overall_mean = np.nanmean(results_mean) - - save_json((dct, list(results_mean), overall_mean), out_json) - p.close() - p.join() - - -def print_unique_labels_and_their_volumes(image: str, print_only_if_vol_smaller_than: float = None): - img = sitk.ReadImage(image) - voxel_volume = np.prod(img.GetSpacing()) - img_npy = sitk.GetArrayFromImage(img) - uniques = [i for i in np.unique(img_npy) if i != 0] - volumes = {i: np.sum(img_npy == i) * voxel_volume for i in uniques} - print('') - print(image.split('/')[-1]) - print('uniques:', uniques) - for k in volumes.keys(): - v = volumes[k] - if print_only_if_vol_smaller_than is not None and v > print_only_if_vol_smaller_than: - pass - else: - print('k:', k, '\tvol:', volumes[k]) - - -def remove_label(label_file: str, remove_this: int, replace_with: int = 0): - img = sitk.ReadImage(label_file) - img_npy = sitk.GetArrayFromImage(img) - img_npy[img_npy == remove_this] = replace_with - img2 = sitk.GetImageFromArray(img_npy) - img2.CopyInformation(img) - sitk.WriteImage(img2, label_file) - - -if __name__ == "__main__": - ### First we create a nnunet dataset from verse. After this the images will be all willy nilly in their - # orientation because that's how VerSe comes - base = '/media/fabian/DeepLearningData/VerSe2019' - base = "/home/fabian/data/VerSe2019" - - # correct orientation - train_files_base = subfiles(join(base, "train"), join=False, suffix="_seg.nii.gz") - train_segs = [i[:-len("_seg.nii.gz")] + "_seg.nii.gz" for i in train_files_base] - train_data = [i[:-len("_seg.nii.gz")] + ".nii.gz" for i in train_files_base] - test_files_base = [i[:-len(".nii.gz")] for i in subfiles(join(base, "test"), join=False, suffix=".nii.gz")] - test_data = [i + ".nii.gz" for i in test_files_base] - - task_id = 56 - task_name = "VerSe" - - foldername = "Task%03.0d_%s" % (task_id, task_name) - - out_base = join(nnUNet_raw_data, foldername) - imagestr = join(out_base, "imagesTr") - imagests = join(out_base, "imagesTs") - labelstr = join(out_base, "labelsTr") - maybe_mkdir_p(imagestr) - maybe_mkdir_p(imagests) - maybe_mkdir_p(labelstr) - - train_patient_names = [i[:-len("_seg.nii.gz")] for i in subfiles(join(base, "train"), join=False, suffix="_seg.nii.gz")] - for p in train_patient_names: - curr = join(base, "train") - label_file = join(curr, p + "_seg.nii.gz") - image_file = join(curr, p + ".nii.gz") - shutil.copy(image_file, join(imagestr, p + "_0000.nii.gz")) - shutil.copy(label_file, join(labelstr, p + ".nii.gz")) - - test_patient_names = [i[:-7] for i in subfiles(join(base, "test"), join=False, suffix=".nii.gz")] - for p in test_patient_names: - curr = join(base, "test") - image_file = join(curr, p + ".nii.gz") - shutil.copy(image_file, join(imagests, p + "_0000.nii.gz")) - - - json_dict = OrderedDict() - json_dict['name'] = "VerSe2019" - json_dict['description'] = "VerSe2019" - json_dict['tensorImageSize'] = "4D" - json_dict['reference'] = "see challenge website" - json_dict['licence'] = "see challenge website" - json_dict['release'] = "0.0" - json_dict['modality'] = { - "0": "CT", - } - json_dict['labels'] = {i: str(i) for i in range(26)} - - json_dict['numTraining'] = len(train_patient_names) - json_dict['numTest'] = len(test_patient_names) - json_dict['training'] = [{'image': "./imagesTr/%s.nii.gz" % i.split("/")[-1], "label": "./labelsTr/%s.nii.gz" % i.split("/")[-1]} for i in - train_patient_names] - json_dict['test'] = ["./imagesTs/%s.nii.gz" % i.split("/")[-1] for i in test_patient_names] - - save_json(json_dict, os.path.join(out_base, "dataset.json")) - - # now we reorient all those images to ras. This saves a pkl with the original affine. We need this information to - # bring our predictions into the same geometry for submission - reorient_all_images_in_folder_to_ras(imagestr) - reorient_all_images_in_folder_to_ras(imagests) - reorient_all_images_in_folder_to_ras(labelstr) - - # sanity check - check_if_all_in_good_orientation(imagestr, labelstr, join(out_base, 'sanitycheck')) - # looks good to me - proceed - - # check the volumes of the vertebrae - _ = [print_unique_labels_and_their_volumes(i, 1000) for i in subfiles(labelstr, suffix='.nii.gz')] - - # some cases appear fishy. For example, verse063.nii.gz has labels [1, 20, 21, 22, 23, 24] and 1 only has a volume - # of 63mm^3 - - #let's correct those - - # 19 is connected to the image border and should not be segmented. Only one slice of 19 is segmented in the - # reference. Looks wrong - remove_label(join(labelstr, 'verse031.nii.gz'), 19, 0) - - # spurious annotation of 18 (vol: 8.00) - remove_label(join(labelstr, 'verse060.nii.gz'), 18, 0) - - # spurious annotation of 16 (vol: 3.00) - remove_label(join(labelstr, 'verse061.nii.gz'), 16, 0) - - # spurious annotation of 1 (vol: 63.00) although the rest of the vertebra is [20, 21, 22, 23, 24] - remove_label(join(labelstr, 'verse063.nii.gz'), 1, 0) - - # spurious annotation of 3 (vol: 9.53) although the rest of the vertebra is - # [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24] - remove_label(join(labelstr, 'verse074.nii.gz'), 3, 0) - - # spurious annotation of 3 (vol: 15.00) - remove_label(join(labelstr, 'verse097.nii.gz'), 3, 0) - - # spurious annotation of 3 (vol: 10) although the rest of the vertebra is - # [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24] - remove_label(join(labelstr, 'verse151.nii.gz'), 3, 0) - - # spurious annotation of 25 (vol: 4) although the rest of the vertebra is - # [1, 2, 3, 4, 5, 6, 7, 8, 9] - remove_label(join(labelstr, 'verse201.nii.gz'), 25, 0) - - # spurious annotation of 23 (vol: 8) although the rest of the vertebra is - # [1, 2, 3, 4, 5, 6, 7, 8] - remove_label(join(labelstr, 'verse207.nii.gz'), 23, 0) - - # spurious annotation of 23 (vol: 12) although the rest of the vertebra is - # [1, 2, 3, 4, 5, 6, 7, 8, 9] - remove_label(join(labelstr, 'verse208.nii.gz'), 23, 0) - - # spurious annotation of 23 (vol: 2) although the rest of the vertebra is - # [1, 2, 3, 4, 5, 6, 7, 8, 9] - remove_label(join(labelstr, 'verse212.nii.gz'), 23, 0) - - # spurious annotation of 20 (vol: 4) although the rest of the vertebra is - # [1, 2, 3, 4, 5, 6, 7, 8, 9] - remove_label(join(labelstr, 'verse214.nii.gz'), 20, 0) - - # spurious annotation of 23 (vol: 15) although the rest of the vertebra is - # [1, 2, 3, 4, 5, 6, 7, 8] - remove_label(join(labelstr, 'verse223.nii.gz'), 23, 0) - - # spurious annotation of 23 (vol: 1) and 25 (vol: 7) although the rest of the vertebra is - # [1, 2, 3, 4, 5, 6, 7, 8, 9] - remove_label(join(labelstr, 'verse226.nii.gz'), 23, 0) - remove_label(join(labelstr, 'verse226.nii.gz'), 25, 0) - - # spurious annotation of 25 (vol: 27) although the rest of the vertebra is - # [1, 2, 3, 4, 5, 6, 7, 8] - remove_label(join(labelstr, 'verse227.nii.gz'), 25, 0) - - # spurious annotation of 20 (vol: 24) although the rest of the vertebra is - # [1, 2, 3, 4, 5, 6, 7, 8] - remove_label(join(labelstr, 'verse232.nii.gz'), 20, 0) - - - # Now we are ready to run nnU-Net - - - """# run this part of the code once training is done - folder_gt = "/media/fabian/My Book/MedicalDecathlon/nnUNet_raw_splitted/Task056_VerSe/labelsTr" - - folder_pred = "/home/fabian/drives/datasets/results/nnUNet/3d_fullres/Task056_VerSe/nnUNetTrainerV2__nnUNetPlansv2.1/cv_niftis_raw" - out_json = "/home/fabian/Task056_VerSe_3d_fullres_summary.json" - evaluate_verse_folder(folder_pred, folder_gt, out_json) - - folder_pred = "/home/fabian/drives/datasets/results/nnUNet/3d_lowres/Task056_VerSe/nnUNetTrainerV2__nnUNetPlansv2.1/cv_niftis_raw" - out_json = "/home/fabian/Task056_VerSe_3d_lowres_summary.json" - evaluate_verse_folder(folder_pred, folder_gt, out_json) - - folder_pred = "/home/fabian/drives/datasets/results/nnUNet/3d_cascade_fullres/Task056_VerSe/nnUNetTrainerV2CascadeFullRes__nnUNetPlansv2.1/cv_niftis_raw" - out_json = "/home/fabian/Task056_VerSe_3d_cascade_fullres_summary.json" - evaluate_verse_folder(folder_pred, folder_gt, out_json)""" - diff --git a/spaces/huaiji3y/bingo-Public/src/components/voice.tsx b/spaces/huaiji3y/bingo-Public/src/components/voice.tsx deleted file mode 100644 index ab886394487445e4b0675770b76096bba0e61b0e..0000000000000000000000000000000000000000 --- a/spaces/huaiji3y/bingo-Public/src/components/voice.tsx +++ /dev/null @@ -1,52 +0,0 @@ -import React, { useEffect } from 'react' -import { useSetAtom } from 'jotai' -import { useBing } from '@/lib/hooks/use-bing' -import Image from 'next/image' -import VoiceIcon from '@/assets/images/voice.svg' -import VoiceButton from './ui/voice' -import { SR } from '@/lib/bots/bing/sr' -import { voiceListenAtom } from '@/state' - -const sr = new SR(['发送', '清空', '退出']) - -const Voice = ({ setInput, input, sendMessage, isSpeaking }: Pick, 'setInput' | 'sendMessage' | 'input' | 'isSpeaking'>) => { - const setListen = useSetAtom(voiceListenAtom) - useEffect(() => { - if (sr.listening) return - sr.transcript = !isSpeaking - }, [isSpeaking]) - - useEffect(() => { - sr.onchange = (msg: string, command?: string) => { - switch (command) { - case '退出': - sr.stop() - break; - case '发送': - sendMessage(input) - case '清空': - setInput('') - break; - default: - setInput(input + msg) - } - } - }, [input, setInput, sendMessage]) - - const switchSR = (enable: boolean = false) => { - setListen(enable) - if (enable) { - sr.start() - } else { - sr.stop() - } - } - - return sr.listening ? ( - switchSR(false)} /> - ) : ( - start voice switchSR(true)} /> - ) -}; - -export default Voice; diff --git a/spaces/huang4414/White-box-Cartoonization/app.py b/spaces/huang4414/White-box-Cartoonization/app.py deleted file mode 100644 index c55ced56bd87a85f59d1c8ef84b7eca87422720f..0000000000000000000000000000000000000000 --- a/spaces/huang4414/White-box-Cartoonization/app.py +++ /dev/null @@ -1,108 +0,0 @@ -#!/usr/bin/env python - -from __future__ import annotations -import argparse -import functools -import os -import pathlib -import sys -from typing import Callable -import uuid - -import gradio as gr -import huggingface_hub -import numpy as np -import PIL.Image - -from io import BytesIO -from wbc.cartoonize import Cartoonize - -ORIGINAL_REPO_URL = 'https://github.com/SystemErrorWang/White-box-Cartoonization' -TITLE = 'SystemErrorWang/White-box-Cartoonization' -DESCRIPTION = f"""This is a demo for {ORIGINAL_REPO_URL}. - -""" -ARTICLE = """ - -""" - -SAFEHASH = [x for x in "0123456789-abcdefghijklmnopqrstuvwxyz_ABCDEFGHIJKLMNOPQRSTUVWXYZ"] -def compress_UUID(): - ''' - 根据http://www.ietf.org/rfc/rfc1738.txt,由uuid编码扩bai大字符域生成du串 - 包括:[0-9a-zA-Z\-_]共64个 - 长度:(32-2)/3*2=20 - 备注:可在地球上人zhi人都用,使用100年不重复(2^120) - :return:String - ''' - row = str(uuid.uuid4()).replace('-', '') - safe_code = '' - for i in range(10): - enbin = "%012d" % int(bin(int(row[i * 3] + row[i * 3 + 1] + row[i * 3 + 2], 16))[2:], 10) - safe_code += (SAFEHASH[int(enbin[0:6], 2)] + SAFEHASH[int(enbin[6:12], 2)]) - safe_code = safe_code.replace('-', '') - return safe_code - - -def parse_args() -> argparse.Namespace: - parser = argparse.ArgumentParser() - parser.add_argument('--device', type=str, default='cpu') - parser.add_argument('--theme', type=str) - parser.add_argument('--live', action='store_true') - parser.add_argument('--share', action='store_true') - parser.add_argument('--port', type=int) - parser.add_argument('--disable-queue', - dest='enable_queue', - action='store_false') - parser.add_argument('--allow-flagging', type=str, default='never') - parser.add_argument('--allow-screenshot', action='store_true') - return parser.parse_args() - -def run( - image, - cartoonize : Cartoonize -) -> tuple[PIL.Image.Image]: - - out_path = compress_UUID()+'.png' - cartoonize.run_sigle(image.name, out_path) - - return PIL.Image.open(out_path) - - -def main(): - gr.close_all() - - args = parse_args() - - cartoonize = Cartoonize(os.path.join(os.path.dirname(os.path.abspath(__file__)),'wbc/saved_models/')) - - func = functools.partial(run, cartoonize=cartoonize) - func = functools.update_wrapper(func, run) - - gr.Interface( - func, - [ - gr.inputs.Image(type='file', label='Input Image'), - ], - [ - gr.outputs.Image( - type='pil', - label='Result'), - ], - # examples=examples, - theme=args.theme, - title=TITLE, - description=DESCRIPTION, - article=ARTICLE, - allow_screenshot=args.allow_screenshot, - allow_flagging=args.allow_flagging, - live=args.live, - ).launch( - enable_queue=args.enable_queue, - server_port=args.port, - share=args.share, - ) - - -if __name__ == '__main__': - main() diff --git a/spaces/huggingface-projects/Leaderboard-Restart/app.py b/spaces/huggingface-projects/Leaderboard-Restart/app.py deleted file mode 100644 index 102f48a2fe8520d3dc67d6d25e8bf2361c4d15b3..0000000000000000000000000000000000000000 --- a/spaces/huggingface-projects/Leaderboard-Restart/app.py +++ /dev/null @@ -1,25 +0,0 @@ -import os -import gradio as gr - -from huggingface_hub import HfApi, hf_hub_download, snapshot_download -from apscheduler.schedulers.background import BackgroundScheduler - -HF_TOKEN = os.environ.get("HF_TOKEN") - -api = HfApi(token=HF_TOKEN) - -def restart(): - api.restart_space(repo_id="huggingface-projects/Deep-Reinforcement-Learning-Leaderboard") - print("Restart") - -scheduler = BackgroundScheduler() -# Refresh every hour -scheduler.add_job(restart, 'interval', seconds=3600) -scheduler.start() - -def greet(name): - return "Hello " + name + "!!" - -iface = gr.Interface(fn=greet, inputs="text", outputs="text") -iface.launch() - diff --git a/spaces/huggingface-projects/stable-diffusion-multiplayer/stablediffusion-infinity/schema.sql b/spaces/huggingface-projects/stable-diffusion-multiplayer/stablediffusion-infinity/schema.sql deleted file mode 100644 index 667b3dd060d8b1b045d0ca38a5f4f298ba08aa04..0000000000000000000000000000000000000000 --- a/spaces/huggingface-projects/stable-diffusion-multiplayer/stablediffusion-infinity/schema.sql +++ /dev/null @@ -1,47 +0,0 @@ -PRAGMA foreign_keys=OFF; -BEGIN TRANSACTION; -CREATE TABLE rooms (id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, room_id TEXT NOT NULL, users_count INTEGER NOT NULL DEFAULT 0); -INSERT INTO rooms VALUES(1,'room-0',0); -INSERT INTO rooms VALUES(2,'room-1',0); -INSERT INTO rooms VALUES(3,'room-2',0); -INSERT INTO rooms VALUES(4,'room-3',0); -INSERT INTO rooms VALUES(5,'room-4',0); -INSERT INTO rooms VALUES(6,'room-5',0); -INSERT INTO rooms VALUES(7,'room-6',0); -INSERT INTO rooms VALUES(8,'room-7',0); -INSERT INTO rooms VALUES(9,'room-8',0); -INSERT INTO rooms VALUES(10,'room-9',0); -INSERT INTO rooms VALUES(11,'room-10',0); -INSERT INTO rooms VALUES(12,'room-11',0); -INSERT INTO rooms VALUES(13,'room-12',0); -INSERT INTO rooms VALUES(14,'room-13',0); -INSERT INTO rooms VALUES(15,'room-14',0); -INSERT INTO rooms VALUES(16,'room-15',0); -INSERT INTO rooms VALUES(17,'room-16',0); -INSERT INTO rooms VALUES(18,'room-17',0); -INSERT INTO rooms VALUES(19,'room-18',0); -INSERT INTO rooms VALUES(20,'room-19',0); -INSERT INTO rooms VALUES(21,'room-20',0); -INSERT INTO rooms VALUES(22,'room-21',0); -INSERT INTO rooms VALUES(23,'room-22',0); -INSERT INTO rooms VALUES(24,'room-23',0); -INSERT INTO rooms VALUES(25,'room-24',0); -INSERT INTO rooms VALUES(26,'room-25',0); -INSERT INTO rooms VALUES(27,'room-26',0); -INSERT INTO rooms VALUES(28,'room-27',0); -INSERT INTO rooms VALUES(29,'room-28',0); -INSERT INTO rooms VALUES(30,'room-29',0); -INSERT INTO rooms VALUES(31,'room-30',0); -INSERT INTO rooms VALUES(32,'room-31',0); -INSERT INTO rooms VALUES(33,'room-32',0); -INSERT INTO rooms VALUES(34,'room-33',0); -INSERT INTO rooms VALUES(35,'room-34',0); -INSERT INTO rooms VALUES(36,'room-35',0); -INSERT INTO rooms VALUES(37,'room-36',0); -INSERT INTO rooms VALUES(38,'room-37',0); -INSERT INTO rooms VALUES(39,'room-38',0); -INSERT INTO rooms VALUES(40,'room-39',0); -INSERT INTO rooms VALUES(41,'room-40',0); -DELETE FROM sqlite_sequence; -INSERT INTO sqlite_sequence VALUES('rooms',41); -COMMIT; diff --git a/spaces/hysts/multiresolution-textual-inversion/style.css b/spaces/hysts/multiresolution-textual-inversion/style.css deleted file mode 100644 index 8e4d705815014cffc50ff1d4c5720797c6206cab..0000000000000000000000000000000000000000 --- a/spaces/hysts/multiresolution-textual-inversion/style.css +++ /dev/null @@ -1,7 +0,0 @@ -h1 { - text-align: center; -} -img#visitor-badge { - display: block; - margin: auto; -} diff --git a/spaces/hyxue/HiFiFace-inference-demo/arcface_torch/train_v2.py b/spaces/hyxue/HiFiFace-inference-demo/arcface_torch/train_v2.py deleted file mode 100644 index ba3c15e6a1615f28daaab1ad225f7b61b27bdffc..0000000000000000000000000000000000000000 --- a/spaces/hyxue/HiFiFace-inference-demo/arcface_torch/train_v2.py +++ /dev/null @@ -1,248 +0,0 @@ -import argparse -import logging -import os -from datetime import datetime - -import numpy as np -import torch -from backbones import get_model -from dataset import get_dataloader -from losses import CombinedMarginLoss -from lr_scheduler import PolyScheduler -from partial_fc_v2 import PartialFC_V2 -from torch import distributed -from torch.utils.data import DataLoader -from torch.utils.tensorboard import SummaryWriter -from utils.utils_callbacks import CallBackLogging -from utils.utils_callbacks import CallBackVerification -from utils.utils_config import get_config -from utils.utils_distributed_sampler import setup_seed -from utils.utils_logging import AverageMeter -from utils.utils_logging import init_logging - -assert ( - torch.__version__ >= "1.12.0" -), "In order to enjoy the features of the new torch, \ -we have upgraded the torch to 1.12.0. torch before than 1.12.0 may not work in the future." - -try: - rank = int(os.environ["RANK"]) - local_rank = int(os.environ["LOCAL_RANK"]) - world_size = int(os.environ["WORLD_SIZE"]) - distributed.init_process_group("nccl") -except KeyError: - rank = 0 - local_rank = 0 - world_size = 1 - distributed.init_process_group( - backend="nccl", - init_method="tcp://127.0.0.1:12584", - rank=rank, - world_size=world_size, - ) - - -def main(args): - - # get config - cfg = get_config(args.config) - # global control random seed - setup_seed(seed=cfg.seed, cuda_deterministic=False) - - torch.cuda.set_device(local_rank) - - os.makedirs(cfg.output, exist_ok=True) - init_logging(rank, cfg.output) - - summary_writer = SummaryWriter(log_dir=os.path.join(cfg.output, "tensorboard")) if rank == 0 else None - - wandb_logger = None - if cfg.using_wandb: - import wandb - - # Sign in to wandb - try: - wandb.login(key=cfg.wandb_key) - except Exception as e: - print("WandB Key must be provided in config file (base.py).") - print(f"Config Error: {e}") - # Initialize wandb - run_name = datetime.now().strftime("%y%m%d_%H%M") + f"_GPU{rank}" - run_name = run_name if cfg.suffix_run_name is None else run_name + f"_{cfg.suffix_run_name}" - try: - wandb_logger = ( - wandb.init( - entity=cfg.wandb_entity, - project=cfg.wandb_project, - sync_tensorboard=True, - resume=cfg.wandb_resume, - name=run_name, - notes=cfg.notes, - ) - if rank == 0 or cfg.wandb_log_all - else None - ) - if wandb_logger: - wandb_logger.config.update(cfg) - except Exception as e: - print("WandB Data (Entity and Project name) must be provided in config file (base.py).") - print(f"Config Error: {e}") - - train_loader = get_dataloader(cfg.rec, local_rank, cfg.batch_size, cfg.dali, cfg.seed, cfg.num_workers) - - backbone = get_model(cfg.network, dropout=0.0, fp16=cfg.fp16, num_features=cfg.embedding_size).cuda() - - backbone = torch.nn.parallel.DistributedDataParallel( - module=backbone, broadcast_buffers=False, device_ids=[local_rank], bucket_cap_mb=16, find_unused_parameters=True - ) - - backbone.train() - # FIXME using gradient checkpoint if there are some unused parameters will cause error - backbone._set_static_graph() - - margin_loss = CombinedMarginLoss( - 64, cfg.margin_list[0], cfg.margin_list[1], cfg.margin_list[2], cfg.interclass_filtering_threshold - ) - - if cfg.optimizer == "sgd": - module_partial_fc = PartialFC_V2(margin_loss, cfg.embedding_size, cfg.num_classes, cfg.sample_rate, cfg.fp16) - module_partial_fc.train().cuda() - # TODO the params of partial fc must be last in the params list - opt = torch.optim.SGD( - params=[{"params": backbone.parameters()}, {"params": module_partial_fc.parameters()}], - lr=cfg.lr, - momentum=0.9, - weight_decay=cfg.weight_decay, - ) - - elif cfg.optimizer == "adamw": - module_partial_fc = PartialFC_V2(margin_loss, cfg.embedding_size, cfg.num_classes, cfg.sample_rate, cfg.fp16) - module_partial_fc.train().cuda() - opt = torch.optim.AdamW( - params=[{"params": backbone.parameters()}, {"params": module_partial_fc.parameters()}], - lr=cfg.lr, - weight_decay=cfg.weight_decay, - ) - else: - raise - - cfg.total_batch_size = cfg.batch_size * world_size - cfg.warmup_step = cfg.num_image // cfg.total_batch_size * cfg.warmup_epoch - cfg.total_step = cfg.num_image // cfg.total_batch_size * cfg.num_epoch - - lr_scheduler = PolyScheduler( - optimizer=opt, base_lr=cfg.lr, max_steps=cfg.total_step, warmup_steps=cfg.warmup_step, last_epoch=-1 - ) - - start_epoch = 0 - global_step = 0 - if cfg.resume: - dict_checkpoint = torch.load(os.path.join(cfg.output, f"checkpoint_gpu_{rank}.pt")) - start_epoch = dict_checkpoint["epoch"] - global_step = dict_checkpoint["global_step"] - backbone.module.load_state_dict(dict_checkpoint["state_dict_backbone"]) - module_partial_fc.load_state_dict(dict_checkpoint["state_dict_softmax_fc"]) - opt.load_state_dict(dict_checkpoint["state_optimizer"]) - lr_scheduler.load_state_dict(dict_checkpoint["state_lr_scheduler"]) - del dict_checkpoint - - for key, value in cfg.items(): - num_space = 25 - len(key) - logging.info(": " + key + " " * num_space + str(value)) - - callback_verification = CallBackVerification( - val_targets=cfg.val_targets, rec_prefix=cfg.rec, summary_writer=summary_writer, wandb_logger=wandb_logger - ) - callback_logging = CallBackLogging( - frequent=cfg.frequent, - total_step=cfg.total_step, - batch_size=cfg.batch_size, - start_step=global_step, - writer=summary_writer, - ) - - loss_am = AverageMeter() - amp = torch.cuda.amp.grad_scaler.GradScaler(growth_interval=100) - - for epoch in range(start_epoch, cfg.num_epoch): - - if isinstance(train_loader, DataLoader): - train_loader.sampler.set_epoch(epoch) - for _, (img, local_labels) in enumerate(train_loader): - global_step += 1 - local_embeddings = backbone(img) - loss: torch.Tensor = module_partial_fc(local_embeddings, local_labels) - - if cfg.fp16: - amp.scale(loss).backward() - if global_step % cfg.gradient_acc == 0: - amp.unscale_(opt) - torch.nn.utils.clip_grad_norm_(backbone.parameters(), 5) - amp.step(opt) - amp.update() - opt.zero_grad() - else: - loss.backward() - if global_step % cfg.gradient_acc == 0: - torch.nn.utils.clip_grad_norm_(backbone.parameters(), 5) - opt.step() - opt.zero_grad() - lr_scheduler.step() - - with torch.no_grad(): - if wandb_logger: - wandb_logger.log( - { - "Loss/Step Loss": loss.item(), - "Loss/Train Loss": loss_am.avg, - "Process/Step": global_step, - "Process/Epoch": epoch, - } - ) - - loss_am.update(loss.item(), 1) - callback_logging(global_step, loss_am, epoch, cfg.fp16, lr_scheduler.get_last_lr()[0], amp) - - if global_step % cfg.verbose == 0 and global_step > 0: - callback_verification(global_step, backbone) - - if cfg.save_all_states: - checkpoint = { - "epoch": epoch + 1, - "global_step": global_step, - "state_dict_backbone": backbone.module.state_dict(), - "state_dict_softmax_fc": module_partial_fc.state_dict(), - "state_optimizer": opt.state_dict(), - "state_lr_scheduler": lr_scheduler.state_dict(), - } - torch.save(checkpoint, os.path.join(cfg.output, f"checkpoint_gpu_{rank}.pt")) - - if rank == 0: - path_module = os.path.join(cfg.output, "model.pt") - torch.save(backbone.module.state_dict(), path_module) - - if wandb_logger and cfg.save_artifacts: - artifact_name = f"{run_name}_E{epoch}" - model = wandb.Artifact(artifact_name, type="model") - model.add_file(path_module) - wandb_logger.log_artifact(model) - - if cfg.dali: - train_loader.reset() - - if rank == 0: - path_module = os.path.join(cfg.output, "model.pt") - torch.save(backbone.module.state_dict(), path_module) - - if wandb_logger and cfg.save_artifacts: - artifact_name = f"{run_name}_Final" - model = wandb.Artifact(artifact_name, type="model") - model.add_file(path_module) - wandb_logger.log_artifact(model) - - -if __name__ == "__main__": - torch.backends.cudnn.benchmark = True - parser = argparse.ArgumentParser(description="Distributed Arcface Training in Pytorch") - parser.add_argument("config", type=str, help="py config file") - main(parser.parse_args()) diff --git a/spaces/illrapper/ill/README.md b/spaces/illrapper/ill/README.md deleted file mode 100644 index 061809b6e960fc3c515a134292d0b5973a0e7db0..0000000000000000000000000000000000000000 --- a/spaces/illrapper/ill/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: AutoTrain Advanced -emoji: 🚀 -colorFrom: blue -colorTo: green -sdk: docker -pinned: false -license: cc-by-nd-4.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/inamXcontru/PoeticTTS/!!TOP!! Download Danea Easyfatt 2006 Crack.md b/spaces/inamXcontru/PoeticTTS/!!TOP!! Download Danea Easyfatt 2006 Crack.md deleted file mode 100644 index 1d3e9159d538c707d0487c4e4808a4439f9af7a4..0000000000000000000000000000000000000000 --- a/spaces/inamXcontru/PoeticTTS/!!TOP!! Download Danea Easyfatt 2006 Crack.md +++ /dev/null @@ -1,10 +0,0 @@ -
      -

      Danea easyfatt Web Email completa l'ecosistema di gestione della fattura aggrada, sostituisce doppia registrazione di documenti all'interno della azienda, di trasferimenti e sms di corrispondenza. Danea contatti, fattura mobile, Danea contatti, Danea Contatti, Danea Contatti, Danea Contatti... Danea Easyfatt offline, Danea Easyfatt Web Email ci mette a disposizione.

      -

      download danea easyfatt 2006 crack


      Download File ->->->-> https://gohhs.com/2uz31T



      -

      Sophisticatezza di un sistema di gestione contabile per epoca, od azienda, Danea Easyfatt Tutto insieme! veramente come un futuro e tutti il sistema gestionale aziendale è un passepartout trasparente.

      -

      Danea Easyfatt Fatturazione Elettronica ha istituzione di uffici virtuali e convivent di richiedenti o comuni (localit di client, localitii di commesso, localitii per commesso, affari, account personale, ecc.) che possono essere in espatri e nella relazione con clienti, i localitii verranno "impostati" in automatico, senza alcun primo andamento.

      -

      Danea Easyfatt Contatti integra uno strumento grafico di modulazione avanzato per il testo aggiunto, che permettere comincia da livello di programmazione e modifica nei campi definiti (si possono aggiungere campi a barre, campi nelle sezioni, campi in blocchi, campi in case. Ho sbagliato e sono sicuro di averlo sposato la creazione per questa idea.) di empia.Gli isp normativi, spesso criticati dall'ambiente emiliano, potrebbero introdurre standard di livello europeo e del mondo per il conto fatturato e la ripartizione dei conti basata sulla razionalità delle visibilità in tempo reale.

      -

      -

      Danea Easyfatt Consigli Fatturazione Elettronica può trasferire in automatico le fatture con la creazione della fattura per cliente, per produttore, per fornitore, per valore, per regione, per pagamento, per riordinamento, per scorta, per vendita, ecc.

      899543212b
      -
      -
      \ No newline at end of file diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Autoclosets Lt 5 0 Keygen Torrent.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Autoclosets Lt 5 0 Keygen Torrent.md deleted file mode 100644 index 42aed37757d7799b689a4b260aa8a96db40b2c53..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Autoclosets Lt 5 0 Keygen Torrent.md +++ /dev/null @@ -1,21 +0,0 @@ - -

      How to Download and Install Autoclosets LT 5.0 with Keygen

      -

      Autoclosets LT 5.0 is a software that allows you to design and organize your closets and wardrobes in a simple and intuitive way. You can create custom layouts, choose from different materials and colors, and add accessories and lighting effects. Autoclosets LT 5.0 also lets you generate realistic 3D images and videos of your projects, as well as detailed reports and budgets.

      -

      autoclosets lt 5 0 keygen torrent


      Download Filehttps://urlin.us/2uEvrR



      -

      If you want to try out this software for free, you can download it from the internet using a torrent client. However, you will also need a keygen to activate it and unlock all its features. A keygen is a program that generates valid serial numbers or activation codes for a software. In this article, we will show you how to download and install Autoclosets LT 5.0 with keygen using a torrent.

      -

      Step 1: Download the torrent file

      -

      The first step is to find and download the torrent file that contains the Autoclosets LT 5.0 software and the keygen. You can use any torrent search engine or website to look for it, such as The Pirate Bay, Kickass Torrents, or RARBG. Just type "autoclosets lt 5 0 keygen" in the search box and browse through the results. Make sure you choose a torrent that has a high number of seeders and leechers, as this indicates that it is popular and reliable.

      -

      Once you find the torrent you want, click on the download link or magnet link to open it with your torrent client. If you don't have a torrent client installed on your computer, you can download one from the internet, such as uTorrent, BitTorrent, or qBittorrent. Follow the instructions on your torrent client to start downloading the torrent file.

      -

      Step 2: Extract the files

      -

      After the download is complete, you will have a compressed file (usually a ZIP or RAR file) that contains the Autoclosets LT 5.0 software and the keygen. You will need to extract the files from this compressed file using a program like WinRAR or 7-Zip. To do this, right-click on the compressed file and select "Extract here" or "Extract to" from the menu. Choose a destination folder where you want to save the extracted files.

      -

      -

      Step 3: Install the software

      -

      Now that you have extracted the files, you can proceed to install the Autoclosets LT 5.0 software on your computer. To do this, open the folder where you saved the extracted files and look for the setup file (usually named "setup.exe" or "install.exe"). Double-click on this file and follow the instructions on the screen to complete the installation process.

      -

      During the installation, you may be asked to enter a serial number or an activation code for the software. Do not enter anything yet, as you will need to use the keygen to generate one later.

      -

      Step 4: Run the keygen

      -

      After installing the software, do not launch it yet. Instead, open the folder where you saved the extracted files again and look for the keygen file (usually named "keygen.exe" or something similar). Double-click on this file and wait for it to open.

      -

      The keygen will show you a window with some options and buttons. You may need to select your software version (Autoclosets LT 5.0) from a drop-down menu or enter some information about your computer (such as your operating system or hardware ID). Then, click on the "Generate" button to create a valid serial number or activation code for your software.

      -

      Step 5: Activate the software

      -

      Now that you have generated a serial number or an activation code with the keygen, you can use it to activate your Autoclosets LT 5.0 software. To do this, launch the software and go to its activation window (usually under "Help" or "About" menu). Enter the serial number or activation code that you got from the keygen in the corresponding fields and click on "Activate" or "OK".

      d5da3c52bf
      -
      -
      \ No newline at end of file diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Bitwa Pod Wiedniem Film 12.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Bitwa Pod Wiedniem Film 12.md deleted file mode 100644 index 937dcf9cb99692a92d7fa3326f046a787055df2a..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Bitwa Pod Wiedniem Film 12.md +++ /dev/null @@ -1,6 +0,0 @@ -

      bitwa pod wiedniem film 12


      Download Zip > https://urlin.us/2uEvlG



      -
      -12 września 1683. Tysiące żołnierzy toczy bitwę pod murami Wiednia. Europa walczy o utrzymanie swoich ziem. 4d29de3e1b
      -
      -
      -

      diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Fast Img Unlocker 21exe LINK Download.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Fast Img Unlocker 21exe LINK Download.md deleted file mode 100644 index 48de6d77289e1b6b99bf22dd05dbcf8c671c5226..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Fast Img Unlocker 21exe LINK Download.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Fast Img Unlocker 21exe Download


      Download File ✺✺✺ https://urlin.us/2uExUm



      - -Manorama Six Feet Under Yts Torrent 2007 Hindi Download Yify movies in hdrip 480p, 1080p, 720p 4k ... Fast Img Unlocker 21exe Download 1fdad05405
      -
      -
      -

      diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Modern Warfare 3 Could Not Find Zone Sp Intro Ff.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Modern Warfare 3 Could Not Find Zone Sp Intro Ff.md deleted file mode 100644 index 3cf0579d95803cc504d0ea41d36c82dded9d1549..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Modern Warfare 3 Could Not Find Zone Sp Intro Ff.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Modern warfare 3 could not find zone sp intro ff


      DOWNLOADhttps://urlin.us/2uEvJJ



      - -3 The aim of this collection of essays is to examine the reality of absolutism, in theory and ... would not now regard as 'spiritual' (the execution of wills, for ... medieval kings, when not leading their people in war, had been ... the image created by Louis XIV, see absolute monarchy as ... Even the central zones were covered to. 1fdad05405
      -
      -
      -

      diff --git a/spaces/inreVtussa/clothingai/Examples/ANTONIS REMOS - DISCOGRAPHY Torrent.md b/spaces/inreVtussa/clothingai/Examples/ANTONIS REMOS - DISCOGRAPHY Torrent.md deleted file mode 100644 index 5e390fe62260ebfb2ed3c25c5f1e572b18e0f459..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/ANTONIS REMOS - DISCOGRAPHY Torrent.md +++ /dev/null @@ -1,7 +0,0 @@ -
      -

      I'd like to bring my 80s hero to you in a sweet and simple game. I know I need more games like it. Not anything else, but more games like this. 80s life-style. 80s. That's it. 80s. Games like this exist everywhere. It's not just this game. I'm not some weirdo. I'm not the only one. This is like a genre, like a whole group of people. This is like a movement. I was waiting for this. As soon as I saw the subject, I knew I was on to something. It has to be a new genre. Something new. So many people are doing this already. Why not me? This is my time. This is my time, my people. For the love of the 80s. - Antonis Remos

      -

      A torrent is a file sharing protocol, implemented as a specialised client called a BitTorrent client. It usually downloads torrent files from several peers, and it may join a swarm of peers in a BitTorrent peer exchange, also known as a swarm. It shares files of various BitTorrent sites at a rapid pace. You can find lists of torrent websites with great content at Torrentpedia, a wiki that has an open database of torrent trackers, a list of sites with movies, games, images, etc.. Here, you'll see the list of the best torrent sites for movies and music.

      -

      ANTONIS REMOS - DISCOGRAPHY torrent


      Download File ->->->-> https://tiurll.com/2uClgn



      -

      2004 Back to Top
      Michael Jackson - This Is It [TORRENT]
      The Gang of Four [TORRENT]
      Lionel Richie - Unforgettable [TORRENT]
      Celine Dion - Unison [TORRENT]
      Barry White - Love Can Tame the Wildest Heart [TORRENT]
      Lisa Michelle - If You Don't Know Me By Now [TORRENT]
      Whitney Houston - I'm Your Baby Tonight [TORRENT]
      Elton John - Candle in the Wind 97 [TORRENT]

      899543212b
      -
      -
      \ No newline at end of file diff --git a/spaces/inreVtussa/clothingai/Examples/BikeCAD Pro-torrent.43 UPDATED.md b/spaces/inreVtussa/clothingai/Examples/BikeCAD Pro-torrent.43 UPDATED.md deleted file mode 100644 index 07ced52bc929daefd140cbc8d99e6df41f582235..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/BikeCAD Pro-torrent.43 UPDATED.md +++ /dev/null @@ -1,5 +0,0 @@ - -

      free ware crack download serial key serial Your terms for Bikecad Pro will return more accurate download results if you exclude using keywords like: crack.macrium reflect professional software virtualbox serial number safe | - macrium reflect professional free download | - - virtualbox serial number - - Macrium Reflect Professional Crack is an award-winning disk imaging and burning your images or burning) your. MACRIUM REFLECT PRO DRIVER UNLOCKER is windows disk imaging and burning program you can start from any location on your hard drive. MACRIUM REFLECT Professional License Key is a product of NICE Tools and it's an advanced and powerful disk image and disk burning program. Download Macrium Reflect Professional Serial Key Macrum Reflect Professional keygen is here. Welcome to the Macrum Reflect Professional crack archive for windows. Macrum Reflect Professional Serial Number is a graphic based disk burner, image converter and partition manager that uses easy to understand icons to let you burn, copy, format, partition, create ISO image of folders. Macrium Reflect Professional License Key is a product of NICE Tools and it's an advanced and powerful disk imaging and disk burning program. https://youtu.be/irC_ZHgxQK4 By using Macrium Reflect Professional you can create and edit.WIM image of a local machine or the entire hard drive or an external drive. It's a complete solution to backup Windows computers.Evidence of multipole moments in deeply subwavelength nanofluids with small and large particles: N-viscosity, Wien displacement, and gradient forces. In the present work, we have measured and calculated the normal and tangential components of the flow velocity field at the surfaces of a 50 nm particle and a 2 μm particle for an intermediate range of volume fraction and we have measured the normal forces acting on these particles. First, we have determined and calculated the normal and tangential components of the N-viscosity tensor that shows large differences in the ranges of volume fractions and sizes studied, highlighting the importance of the particle-fluid interactions for the correct calculation of the flow velocity field. Then, we have calculated the Wien displacement and the gradient forces acting on the particles, finding that their magnitudes are larger than those in the free flow of the suspension.Q: How to concatenate strings in C# I would like to know, how to concatenate strings. I want to concatenate an output of a loop that returns different words depending on the int result, I want to have in the end: output = inputword + word2 + word3 + etc... so I have something like: loop inputword = RandomNumberGenerator.randomNumber (your suggestions: inputword); concatenate string; would that work? I tried: string outputword = \"\"; for (int i = 1; i< i + 1; i++) outputword += inputword; Console.WriteLine (outputword); RandomNumberGenerator.randomNumber(your suggestions); Console.ReadKey(); it isnt working.

      -

      BikeCAD Pro-torrent.43


      Download Filehttps://tiurll.com/2uCiDg



      899543212b
      -
      -
      \ No newline at end of file diff --git a/spaces/inreVtussa/clothingai/Examples/CRACK Auto Power-On And Shut-Down V1.51 ((NEW)).md b/spaces/inreVtussa/clothingai/Examples/CRACK Auto Power-On And Shut-Down V1.51 ((NEW)).md deleted file mode 100644 index 03f24ceee3ba59463259351061f1182778e4b5e9..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/CRACK Auto Power-On And Shut-Down V1.51 ((NEW)).md +++ /dev/null @@ -1,25 +0,0 @@ -
      -

      How to Use Auto Power-on and Shut-down v1.51 to Automate Your PC Tasks

      -

      Do you want to schedule your PC to power on, shut down, or perform other tasks automatically? If so, you might be interested in Auto Power-on and Shut-down v1.51, a powerful and easy-to-use software that lets you automate all kinds of PC tasks. In this article, we will show you how to use Auto Power-on and Shut-down v1.51 to make your PC work smarter and save energy.

      -

      What is Auto Power-on and Shut-down v1.51?

      -

      Auto Power-on and Shut-down v1.51 is a software that can schedule your PC to power on, shut down, restart, hibernate, sleep, log off, lock session, or run any program at any time you want. It can also clear your recycle bin, browsing history, or run keyboard and mouse macros. You can create multiple tasks with different settings and triggers, such as date, time, frequency, or event. Auto Power-on and Shut-down v1.51 is compatible with Windows 10/8/7/Vista/XP/2000[^2^].

      -

      CRACK Auto Power-On And Shut-Down v1.51


      Download ––– https://tiurll.com/2uCinQ



      -

      How to Download and Install Auto Power-on and Shut-down v1.51?

      -

      You can download Auto Power-on and Shut-down v1.51 from its official website or from other trusted sources such as Softpedia[^2^] or FileHippo[^3^]. The file size is about 1.5 MB and the installation process is simple and fast. Just follow the instructions on the screen and accept the license agreement. After the installation, you will see an icon on your desktop or system tray that you can use to launch the software.

      -

      How to Test Auto Power-on and Shut-down v1.51?

      -

      Before you start creating tasks, you need to test if Auto Power-on and Shut-down v1.51 can power on your PC automatically after a shutdown. To do this, launch the software and click on the Test button on the main window. A message will appear asking you to confirm the test. Click OK and wait for your PC to shut down. After a few seconds, your PC should power on again by itself. If the test is successful, you can proceed to create tasks. If not, you may need to check your BIOS settings or contact the support team for help.

      -

      -

      How to Create Tasks with Auto Power-on and Shut-down v1.51?

      -

      To create a task with Auto Power-on and Shut-down v1.51, click on the New button on the main window. A new window will pop up where you can configure the task settings. You need to give a name to your task, choose what action you want it to perform (such as power on or shut down), and set the trigger condition (such as date, time, or event). You can also add some optional settings such as password protection, sound alert, or message display. When you are done, click OK to save your task.

      -

      You can create as many tasks as you want and manage them from the main window. You can enable or disable any task by checking or unchecking the box next to its name. You can also edit or delete any task by right-clicking on it and choosing the appropriate option from the menu.

      -

      How to Use Auto Power-on and Shut-down v1.51 Effectively?

      -

      Auto Power-on and Shut-down v1.51 can help you automate your PC tasks and save energy in many ways. Here are some examples of how you can use it effectively:

      -
        -
      • You can schedule your PC to power on before you wake up in the morning and shut down after you go to bed at night.
      • -
      • You can schedule your PC to run a backup program or a virus scan at a specific time when you are not using it.
      • -
      • You can schedule your PC to hibernate or sleep when it is idle for a certain period of time.
      • -
      • You can schedule your PC to log off or lock session when you leave your desk for a break.
      • -
      • You can schedule your PC to clear your browsing history or recycle bin regularly for privacy reasons.
      • -
      • You can schedule

        d5da3c52bf
        -
        -
        \ No newline at end of file diff --git a/spaces/isaiah08/dalle-mini-test/README.md b/spaces/isaiah08/dalle-mini-test/README.md deleted file mode 100644 index 99fb5087a9e146a9afc40a6eb04cb9306558389d..0000000000000000000000000000000000000000 --- a/spaces/isaiah08/dalle-mini-test/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Dalle Mini Test -emoji: 📚 -colorFrom: green -colorTo: blue -sdk: static -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/jackli888/stable-diffusion-webui/modules/sd_vae.py b/spaces/jackli888/stable-diffusion-webui/modules/sd_vae.py deleted file mode 100644 index 9b00f76e9c62c794b3a27b36bae0f168ff4f5ab8..0000000000000000000000000000000000000000 --- a/spaces/jackli888/stable-diffusion-webui/modules/sd_vae.py +++ /dev/null @@ -1,216 +0,0 @@ -import torch -import safetensors.torch -import os -import collections -from collections import namedtuple -from modules import paths, shared, devices, script_callbacks, sd_models -import glob -from copy import deepcopy - - -vae_path = os.path.abspath(os.path.join(paths.models_path, "VAE")) -vae_ignore_keys = {"model_ema.decay", "model_ema.num_updates"} -vae_dict = {} - - -base_vae = None -loaded_vae_file = None -checkpoint_info = None - -checkpoints_loaded = collections.OrderedDict() - -def get_base_vae(model): - if base_vae is not None and checkpoint_info == model.sd_checkpoint_info and model: - return base_vae - return None - - -def store_base_vae(model): - global base_vae, checkpoint_info - if checkpoint_info != model.sd_checkpoint_info: - assert not loaded_vae_file, "Trying to store non-base VAE!" - base_vae = deepcopy(model.first_stage_model.state_dict()) - checkpoint_info = model.sd_checkpoint_info - - -def delete_base_vae(): - global base_vae, checkpoint_info - base_vae = None - checkpoint_info = None - - -def restore_base_vae(model): - global loaded_vae_file - if base_vae is not None and checkpoint_info == model.sd_checkpoint_info: - print("Restoring base VAE") - _load_vae_dict(model, base_vae) - loaded_vae_file = None - delete_base_vae() - - -def get_filename(filepath): - return os.path.basename(filepath) - - -def refresh_vae_list(): - vae_dict.clear() - - paths = [ - os.path.join(sd_models.model_path, '**/*.vae.ckpt'), - os.path.join(sd_models.model_path, '**/*.vae.pt'), - os.path.join(sd_models.model_path, '**/*.vae.safetensors'), - os.path.join(vae_path, '**/*.ckpt'), - os.path.join(vae_path, '**/*.pt'), - os.path.join(vae_path, '**/*.safetensors'), - ] - - if shared.cmd_opts.ckpt_dir is not None and os.path.isdir(shared.cmd_opts.ckpt_dir): - paths += [ - os.path.join(shared.cmd_opts.ckpt_dir, '**/*.vae.ckpt'), - os.path.join(shared.cmd_opts.ckpt_dir, '**/*.vae.pt'), - os.path.join(shared.cmd_opts.ckpt_dir, '**/*.vae.safetensors'), - ] - - if shared.cmd_opts.vae_dir is not None and os.path.isdir(shared.cmd_opts.vae_dir): - paths += [ - os.path.join(shared.cmd_opts.vae_dir, '**/*.ckpt'), - os.path.join(shared.cmd_opts.vae_dir, '**/*.pt'), - os.path.join(shared.cmd_opts.vae_dir, '**/*.safetensors'), - ] - - candidates = [] - for path in paths: - candidates += glob.iglob(path, recursive=True) - - for filepath in candidates: - name = get_filename(filepath) - vae_dict[name] = filepath - - -def find_vae_near_checkpoint(checkpoint_file): - checkpoint_path = os.path.splitext(checkpoint_file)[0] - for vae_location in [checkpoint_path + ".vae.pt", checkpoint_path + ".vae.ckpt", checkpoint_path + ".vae.safetensors"]: - if os.path.isfile(vae_location): - return vae_location - - return None - - -def resolve_vae(checkpoint_file): - if shared.cmd_opts.vae_path is not None: - return shared.cmd_opts.vae_path, 'from commandline argument' - - is_automatic = shared.opts.sd_vae in {"Automatic", "auto"} # "auto" for people with old config - - vae_near_checkpoint = find_vae_near_checkpoint(checkpoint_file) - if vae_near_checkpoint is not None and (shared.opts.sd_vae_as_default or is_automatic): - return vae_near_checkpoint, 'found near the checkpoint' - - if shared.opts.sd_vae == "None": - return None, None - - vae_from_options = vae_dict.get(shared.opts.sd_vae, None) - if vae_from_options is not None: - return vae_from_options, 'specified in settings' - - if not is_automatic: - print(f"Couldn't find VAE named {shared.opts.sd_vae}; using None instead") - - return None, None - - -def load_vae_dict(filename, map_location): - vae_ckpt = sd_models.read_state_dict(filename, map_location=map_location) - vae_dict_1 = {k: v for k, v in vae_ckpt.items() if k[0:4] != "loss" and k not in vae_ignore_keys} - return vae_dict_1 - - -def load_vae(model, vae_file=None, vae_source="from unknown source"): - global vae_dict, loaded_vae_file - # save_settings = False - - cache_enabled = shared.opts.sd_vae_checkpoint_cache > 0 - - if vae_file: - if cache_enabled and vae_file in checkpoints_loaded: - # use vae checkpoint cache - print(f"Loading VAE weights {vae_source}: cached {get_filename(vae_file)}") - store_base_vae(model) - _load_vae_dict(model, checkpoints_loaded[vae_file]) - else: - assert os.path.isfile(vae_file), f"VAE {vae_source} doesn't exist: {vae_file}" - print(f"Loading VAE weights {vae_source}: {vae_file}") - store_base_vae(model) - - vae_dict_1 = load_vae_dict(vae_file, map_location=shared.weight_load_location) - _load_vae_dict(model, vae_dict_1) - - if cache_enabled: - # cache newly loaded vae - checkpoints_loaded[vae_file] = vae_dict_1.copy() - - # clean up cache if limit is reached - if cache_enabled: - while len(checkpoints_loaded) > shared.opts.sd_vae_checkpoint_cache + 1: # we need to count the current model - checkpoints_loaded.popitem(last=False) # LRU - - # If vae used is not in dict, update it - # It will be removed on refresh though - vae_opt = get_filename(vae_file) - if vae_opt not in vae_dict: - vae_dict[vae_opt] = vae_file - - elif loaded_vae_file: - restore_base_vae(model) - - loaded_vae_file = vae_file - - -# don't call this from outside -def _load_vae_dict(model, vae_dict_1): - model.first_stage_model.load_state_dict(vae_dict_1) - model.first_stage_model.to(devices.dtype_vae) - - -def clear_loaded_vae(): - global loaded_vae_file - loaded_vae_file = None - - -unspecified = object() - - -def reload_vae_weights(sd_model=None, vae_file=unspecified): - from modules import lowvram, devices, sd_hijack - - if not sd_model: - sd_model = shared.sd_model - - checkpoint_info = sd_model.sd_checkpoint_info - checkpoint_file = checkpoint_info.filename - - if vae_file == unspecified: - vae_file, vae_source = resolve_vae(checkpoint_file) - else: - vae_source = "from function argument" - - if loaded_vae_file == vae_file: - return - - if shared.cmd_opts.lowvram or shared.cmd_opts.medvram: - lowvram.send_everything_to_cpu() - else: - sd_model.to(devices.cpu) - - sd_hijack.model_hijack.undo_hijack(sd_model) - - load_vae(sd_model, vae_file, vae_source) - - sd_hijack.model_hijack.hijack(sd_model) - script_callbacks.model_loaded_callback(sd_model) - - if not shared.cmd_opts.lowvram and not shared.cmd_opts.medvram: - sd_model.to(devices.device) - - print("VAE weights loaded.") - return sd_model diff --git a/spaces/jackli888/stable-diffusion-webui/run.sh b/spaces/jackli888/stable-diffusion-webui/run.sh deleted file mode 100644 index 9752b2f4cd97e0dbc0f28d7aad76f7b2e32406ad..0000000000000000000000000000000000000000 --- a/spaces/jackli888/stable-diffusion-webui/run.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!usr/bin/env bash - -[ -d extensions/deforum ] || git clone https://github.com/deforum-art/deforum-for-automatic1111-webui extensions/deforum - -. webui.sh diff --git a/spaces/jbilcke-hf/MusicGen/tests/common_utils/__init__.py b/spaces/jbilcke-hf/MusicGen/tests/common_utils/__init__.py deleted file mode 100644 index 74ffcfef96fec35c99b2a1a053a61f44f7a8bbe9..0000000000000000000000000000000000000000 --- a/spaces/jbilcke-hf/MusicGen/tests/common_utils/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -# flake8: noqa -from .temp_utils import TempDirMixin -from .wav_utils import get_batch_white_noise, get_white_noise, save_wav diff --git a/spaces/jbilcke-hf/ai-comic-factory/src/components/ui/slider.tsx b/spaces/jbilcke-hf/ai-comic-factory/src/components/ui/slider.tsx deleted file mode 100644 index 0e35bc7fb000cffa5e29956283ecf7d75453236c..0000000000000000000000000000000000000000 --- a/spaces/jbilcke-hf/ai-comic-factory/src/components/ui/slider.tsx +++ /dev/null @@ -1,28 +0,0 @@ -"use client" - -import * as React from "react" -import * as SliderPrimitive from "@radix-ui/react-slider" - -import { cn } from "@/lib/utils" - -const Slider = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - - - - - - -)) -Slider.displayName = SliderPrimitive.Root.displayName - -export { Slider } diff --git a/spaces/jcenaa/Segment-Any-RGBD/open_vocab_seg/data/datasets/__init__.py b/spaces/jcenaa/Segment-Any-RGBD/open_vocab_seg/data/datasets/__init__.py deleted file mode 100644 index 352792b6fcdbffefa229d5d67a5c7375769fa345..0000000000000000000000000000000000000000 --- a/spaces/jcenaa/Segment-Any-RGBD/open_vocab_seg/data/datasets/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -from . import register_coco_stuff, register_voc_seg -from . import register_cc3m -from . import register_ade20k_full -from . import register_pascal_context \ No newline at end of file diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/Cipher/CAST.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/Cipher/CAST.py deleted file mode 100644 index c7e82c1c7039ef2ae80f5024dd0017e42156c453..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/Cipher/CAST.py +++ /dev/null @@ -1,159 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Cipher/CAST.py : CAST -# -# =================================================================== -# The contents of this file are dedicated to the public domain. To -# the extent that dedication to the public domain is not available, -# everyone is granted a worldwide, perpetual, royalty-free, -# non-exclusive license to exercise all rights associated with the -# contents of this file for any purpose whatsoever. -# No rights are reserved. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS -# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN -# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -# =================================================================== -""" -Module's constants for the modes of operation supported with CAST: - -:var MODE_ECB: :ref:`Electronic Code Book (ECB) ` -:var MODE_CBC: :ref:`Cipher-Block Chaining (CBC) ` -:var MODE_CFB: :ref:`Cipher FeedBack (CFB) ` -:var MODE_OFB: :ref:`Output FeedBack (OFB) ` -:var MODE_CTR: :ref:`CounTer Mode (CTR) ` -:var MODE_OPENPGP: :ref:`OpenPGP Mode ` -:var MODE_EAX: :ref:`EAX Mode ` -""" - -import sys - -from Crypto.Cipher import _create_cipher -from Crypto.Util.py3compat import byte_string -from Crypto.Util._raw_api import (load_pycryptodome_raw_lib, - VoidPointer, SmartPointer, - c_size_t, c_uint8_ptr) - -_raw_cast_lib = load_pycryptodome_raw_lib( - "Crypto.Cipher._raw_cast", - """ - int CAST_start_operation(const uint8_t key[], - size_t key_len, - void **pResult); - int CAST_encrypt(const void *state, - const uint8_t *in, - uint8_t *out, - size_t data_len); - int CAST_decrypt(const void *state, - const uint8_t *in, - uint8_t *out, - size_t data_len); - int CAST_stop_operation(void *state); - """) - - -def _create_base_cipher(dict_parameters): - """This method instantiates and returns a handle to a low-level - base cipher. It will absorb named parameters in the process.""" - - try: - key = dict_parameters.pop("key") - except KeyError: - raise TypeError("Missing 'key' parameter") - - if len(key) not in key_size: - raise ValueError("Incorrect CAST key length (%d bytes)" % len(key)) - - start_operation = _raw_cast_lib.CAST_start_operation - stop_operation = _raw_cast_lib.CAST_stop_operation - - cipher = VoidPointer() - result = start_operation(c_uint8_ptr(key), - c_size_t(len(key)), - cipher.address_of()) - if result: - raise ValueError("Error %X while instantiating the CAST cipher" - % result) - - return SmartPointer(cipher.get(), stop_operation) - - -def new(key, mode, *args, **kwargs): - """Create a new CAST cipher - - :param key: - The secret key to use in the symmetric cipher. - Its length can vary from 5 to 16 bytes. - :type key: bytes, bytearray, memoryview - - :param mode: - The chaining mode to use for encryption or decryption. - :type mode: One of the supported ``MODE_*`` constants - - :Keyword Arguments: - * **iv** (*bytes*, *bytearray*, *memoryview*) -- - (Only applicable for ``MODE_CBC``, ``MODE_CFB``, ``MODE_OFB``, - and ``MODE_OPENPGP`` modes). - - The initialization vector to use for encryption or decryption. - - For ``MODE_CBC``, ``MODE_CFB``, and ``MODE_OFB`` it must be 8 bytes long. - - For ``MODE_OPENPGP`` mode only, - it must be 8 bytes long for encryption - and 10 bytes for decryption (in the latter case, it is - actually the *encrypted* IV which was prefixed to the ciphertext). - - If not provided, a random byte string is generated (you must then - read its value with the :attr:`iv` attribute). - - * **nonce** (*bytes*, *bytearray*, *memoryview*) -- - (Only applicable for ``MODE_EAX`` and ``MODE_CTR``). - - A value that must never be reused for any other encryption done - with this key. - - For ``MODE_EAX`` there are no - restrictions on its length (recommended: **16** bytes). - - For ``MODE_CTR``, its length must be in the range **[0..7]**. - - If not provided for ``MODE_EAX``, a random byte string is generated (you - can read it back via the ``nonce`` attribute). - - * **segment_size** (*integer*) -- - (Only ``MODE_CFB``).The number of **bits** the plaintext and ciphertext - are segmented in. It must be a multiple of 8. - If not specified, it will be assumed to be 8. - - * **mac_len** : (*integer*) -- - (Only ``MODE_EAX``) - Length of the authentication tag, in bytes. - It must be no longer than 8 (default). - - * **initial_value** : (*integer*) -- - (Only ``MODE_CTR``). The initial value for the counter within - the counter block. By default it is **0**. - - :Return: a CAST object, of the applicable mode. - """ - - return _create_cipher(sys.modules[__name__], key, mode, *args, **kwargs) - -MODE_ECB = 1 -MODE_CBC = 2 -MODE_CFB = 3 -MODE_OFB = 5 -MODE_CTR = 6 -MODE_OPENPGP = 7 -MODE_EAX = 9 - -# Size of a data block (in bytes) -block_size = 8 -# Size of a key (in bytes) -key_size = range(5, 16 + 1) diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/Hash/KangarooTwelve.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/Hash/KangarooTwelve.py deleted file mode 100644 index f5358d44ae140284df0d1d82e79ec5a5c3278cc7..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/Hash/KangarooTwelve.py +++ /dev/null @@ -1,262 +0,0 @@ -# =================================================================== -# -# Copyright (c) 2021, Legrandin -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. -# =================================================================== - -from Crypto.Util._raw_api import (VoidPointer, SmartPointer, - create_string_buffer, - get_raw_buffer, c_size_t, - c_uint8_ptr, c_ubyte) - -from Crypto.Util.number import long_to_bytes -from Crypto.Util.py3compat import bchr - -from .keccak import _raw_keccak_lib - - -def _length_encode(x): - if x == 0: - return b'\x00' - - S = long_to_bytes(x) - return S + bchr(len(S)) - - -# Possible states for a KangarooTwelve instance, which depend on the amount of data processed so far. -SHORT_MSG = 1 # Still within the first 8192 bytes, but it is not certain we will exceed them. -LONG_MSG_S0 = 2 # Still within the first 8192 bytes, and it is certain we will exceed them. -LONG_MSG_SX = 3 # Beyond the first 8192 bytes. -SQUEEZING = 4 # No more data to process. - - -class K12_XOF(object): - """A KangarooTwelve hash object. - Do not instantiate directly. - Use the :func:`new` function. - """ - - def __init__(self, data, custom): - - if custom == None: - custom = b'' - - self._custom = custom + _length_encode(len(custom)) - self._state = SHORT_MSG - self._padding = None # Final padding is only decided in read() - - # Internal hash that consumes FinalNode - self._hash1 = self._create_keccak() - self._length1 = 0 - - # Internal hash that produces CV_i (reset each time) - self._hash2 = None - self._length2 = 0 - - # Incremented by one for each 8192-byte block - self._ctr = 0 - - if data: - self.update(data) - - def _create_keccak(self): - state = VoidPointer() - result = _raw_keccak_lib.keccak_init(state.address_of(), - c_size_t(32), # 32 bytes of capacity (256 bits) - c_ubyte(12)) # Reduced number of rounds - if result: - raise ValueError("Error %d while instantiating KangarooTwelve" - % result) - return SmartPointer(state.get(), _raw_keccak_lib.keccak_destroy) - - def _update(self, data, hash_obj): - result = _raw_keccak_lib.keccak_absorb(hash_obj.get(), - c_uint8_ptr(data), - c_size_t(len(data))) - if result: - raise ValueError("Error %d while updating KangarooTwelve state" - % result) - - def _squeeze(self, hash_obj, length, padding): - bfr = create_string_buffer(length) - result = _raw_keccak_lib.keccak_squeeze(hash_obj.get(), - bfr, - c_size_t(length), - c_ubyte(padding)) - if result: - raise ValueError("Error %d while extracting from KangarooTwelve" - % result) - - return get_raw_buffer(bfr) - - def _reset(self, hash_obj): - result = _raw_keccak_lib.keccak_reset(hash_obj.get()) - if result: - raise ValueError("Error %d while resetting KangarooTwelve state" - % result) - - def update(self, data): - """Hash the next piece of data. - - .. note:: - For better performance, submit chunks with a length multiple of 8192 bytes. - - Args: - data (byte string/byte array/memoryview): The next chunk of the - message to hash. - """ - - if self._state == SQUEEZING: - raise TypeError("You cannot call 'update' after the first 'read'") - - if self._state == SHORT_MSG: - next_length = self._length1 + len(data) - - if next_length + len(self._custom) <= 8192: - self._length1 = next_length - self._update(data, self._hash1) - return self - - # Switch to tree hashing - self._state = LONG_MSG_S0 - - if self._state == LONG_MSG_S0: - data_mem = memoryview(data) - assert(self._length1 < 8192) - dtc = min(len(data), 8192 - self._length1) - self._update(data_mem[:dtc], self._hash1) - self._length1 += dtc - - if self._length1 < 8192: - return self - - # Finish hashing S_0 and start S_1 - assert(self._length1 == 8192) - - divider = b'\x03' + b'\x00' * 7 - self._update(divider, self._hash1) - self._length1 += 8 - - self._hash2 = self._create_keccak() - self._length2 = 0 - self._ctr = 1 - - self._state = LONG_MSG_SX - return self.update(data_mem[dtc:]) - - # LONG_MSG_SX - assert(self._state == LONG_MSG_SX) - index = 0 - len_data = len(data) - - # All iteractions could actually run in parallel - data_mem = memoryview(data) - while index < len_data: - - new_index = min(index + 8192 - self._length2, len_data) - self._update(data_mem[index:new_index], self._hash2) - self._length2 += new_index - index - index = new_index - - if self._length2 == 8192: - cv_i = self._squeeze(self._hash2, 32, 0x0B) - self._update(cv_i, self._hash1) - self._length1 += 32 - self._reset(self._hash2) - self._length2 = 0 - self._ctr += 1 - - return self - - def read(self, length): - """ - Produce more bytes of the digest. - - .. note:: - You cannot use :meth:`update` anymore after the first call to - :meth:`read`. - - Args: - length (integer): the amount of bytes this method must return - - :return: the next piece of XOF output (of the given length) - :rtype: byte string - """ - - custom_was_consumed = False - - if self._state == SHORT_MSG: - self._update(self._custom, self._hash1) - self._padding = 0x07 - self._state = SQUEEZING - - if self._state == LONG_MSG_S0: - self.update(self._custom) - custom_was_consumed = True - assert(self._state == LONG_MSG_SX) - - if self._state == LONG_MSG_SX: - if not custom_was_consumed: - self.update(self._custom) - - # Is there still some leftover data in hash2? - if self._length2 > 0: - cv_i = self._squeeze(self._hash2, 32, 0x0B) - self._update(cv_i, self._hash1) - self._length1 += 32 - self._reset(self._hash2) - self._length2 = 0 - self._ctr += 1 - - trailer = _length_encode(self._ctr - 1) + b'\xFF\xFF' - self._update(trailer, self._hash1) - - self._padding = 0x06 - self._state = SQUEEZING - - return self._squeeze(self._hash1, length, self._padding) - - def new(self, data=None, custom=b''): - return type(self)(data, custom) - - -def new(data=None, custom=None): - """Return a fresh instance of a KangarooTwelve object. - - Args: - data (bytes/bytearray/memoryview): - Optional. - The very first chunk of the message to hash. - It is equivalent to an early call to :meth:`update`. - custom (bytes): - Optional. - A customization byte string. - - :Return: A :class:`K12_XOF` object - """ - - return K12_XOF(data, custom) diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/pens/cu2quPen.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/pens/cu2quPen.py deleted file mode 100644 index f182aed44a0e8a6dfd906c385f10a5f3a14c332e..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/pens/cu2quPen.py +++ /dev/null @@ -1,325 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import operator -from fontTools.cu2qu import curve_to_quadratic, curves_to_quadratic -from fontTools.pens.basePen import decomposeSuperBezierSegment -from fontTools.pens.filterPen import FilterPen -from fontTools.pens.reverseContourPen import ReverseContourPen -from fontTools.pens.pointPen import BasePointToSegmentPen -from fontTools.pens.pointPen import ReverseContourPointPen - - -class Cu2QuPen(FilterPen): - """A filter pen to convert cubic bezier curves to quadratic b-splines - using the FontTools SegmentPen protocol. - - Args: - - other_pen: another SegmentPen used to draw the transformed outline. - max_err: maximum approximation error in font units. For optimal results, - if you know the UPEM of the font, we recommend setting this to a - value equal, or close to UPEM / 1000. - reverse_direction: flip the contours' direction but keep starting point. - stats: a dictionary counting the point numbers of quadratic segments. - all_quadratic: if True (default), only quadratic b-splines are generated. - if False, quadratic curves or cubic curves are generated depending - on which one is more economical. - """ - - def __init__( - self, - other_pen, - max_err, - reverse_direction=False, - stats=None, - all_quadratic=True, - ): - if reverse_direction: - other_pen = ReverseContourPen(other_pen) - super().__init__(other_pen) - self.max_err = max_err - self.stats = stats - self.all_quadratic = all_quadratic - - def _convert_curve(self, pt1, pt2, pt3): - curve = (self.current_pt, pt1, pt2, pt3) - result = curve_to_quadratic(curve, self.max_err, self.all_quadratic) - if self.stats is not None: - n = str(len(result) - 2) - self.stats[n] = self.stats.get(n, 0) + 1 - if self.all_quadratic: - self.qCurveTo(*result[1:]) - else: - if len(result) == 3: - self.qCurveTo(*result[1:]) - else: - assert len(result) == 4 - super().curveTo(*result[1:]) - - def curveTo(self, *points): - n = len(points) - if n == 3: - # this is the most common case, so we special-case it - self._convert_curve(*points) - elif n > 3: - for segment in decomposeSuperBezierSegment(points): - self._convert_curve(*segment) - else: - self.qCurveTo(*points) - - -class Cu2QuPointPen(BasePointToSegmentPen): - """A filter pen to convert cubic bezier curves to quadratic b-splines - using the FontTools PointPen protocol. - - Args: - other_point_pen: another PointPen used to draw the transformed outline. - max_err: maximum approximation error in font units. For optimal results, - if you know the UPEM of the font, we recommend setting this to a - value equal, or close to UPEM / 1000. - reverse_direction: reverse the winding direction of all contours. - stats: a dictionary counting the point numbers of quadratic segments. - all_quadratic: if True (default), only quadratic b-splines are generated. - if False, quadratic curves or cubic curves are generated depending - on which one is more economical. - """ - - __points_required = { - "move": (1, operator.eq), - "line": (1, operator.eq), - "qcurve": (2, operator.ge), - "curve": (3, operator.eq), - } - - def __init__( - self, - other_point_pen, - max_err, - reverse_direction=False, - stats=None, - all_quadratic=True, - ): - BasePointToSegmentPen.__init__(self) - if reverse_direction: - self.pen = ReverseContourPointPen(other_point_pen) - else: - self.pen = other_point_pen - self.max_err = max_err - self.stats = stats - self.all_quadratic = all_quadratic - - def _flushContour(self, segments): - assert len(segments) >= 1 - closed = segments[0][0] != "move" - new_segments = [] - prev_points = segments[-1][1] - prev_on_curve = prev_points[-1][0] - for segment_type, points in segments: - if segment_type == "curve": - for sub_points in self._split_super_bezier_segments(points): - on_curve, smooth, name, kwargs = sub_points[-1] - bcp1, bcp2 = sub_points[0][0], sub_points[1][0] - cubic = [prev_on_curve, bcp1, bcp2, on_curve] - quad = curve_to_quadratic(cubic, self.max_err, self.all_quadratic) - if self.stats is not None: - n = str(len(quad) - 2) - self.stats[n] = self.stats.get(n, 0) + 1 - new_points = [(pt, False, None, {}) for pt in quad[1:-1]] - new_points.append((on_curve, smooth, name, kwargs)) - if self.all_quadratic or len(new_points) == 2: - new_segments.append(["qcurve", new_points]) - else: - new_segments.append(["curve", new_points]) - prev_on_curve = sub_points[-1][0] - else: - new_segments.append([segment_type, points]) - prev_on_curve = points[-1][0] - if closed: - # the BasePointToSegmentPen.endPath method that calls _flushContour - # rotates the point list of closed contours so that they end with - # the first on-curve point. We restore the original starting point. - new_segments = new_segments[-1:] + new_segments[:-1] - self._drawPoints(new_segments) - - def _split_super_bezier_segments(self, points): - sub_segments = [] - # n is the number of control points - n = len(points) - 1 - if n == 2: - # a simple bezier curve segment - sub_segments.append(points) - elif n > 2: - # a "super" bezier; decompose it - on_curve, smooth, name, kwargs = points[-1] - num_sub_segments = n - 1 - for i, sub_points in enumerate( - decomposeSuperBezierSegment([pt for pt, _, _, _ in points]) - ): - new_segment = [] - for point in sub_points[:-1]: - new_segment.append((point, False, None, {})) - if i == (num_sub_segments - 1): - # the last on-curve keeps its original attributes - new_segment.append((on_curve, smooth, name, kwargs)) - else: - # on-curves of sub-segments are always "smooth" - new_segment.append((sub_points[-1], True, None, {})) - sub_segments.append(new_segment) - else: - raise AssertionError("expected 2 control points, found: %d" % n) - return sub_segments - - def _drawPoints(self, segments): - pen = self.pen - pen.beginPath() - last_offcurves = [] - points_required = self.__points_required - for i, (segment_type, points) in enumerate(segments): - if segment_type in points_required: - n, op = points_required[segment_type] - assert op(len(points), n), ( - f"illegal {segment_type!r} segment point count: " - f"expected {n}, got {len(points)}" - ) - offcurves = points[:-1] - if i == 0: - # any off-curve points preceding the first on-curve - # will be appended at the end of the contour - last_offcurves = offcurves - else: - for (pt, smooth, name, kwargs) in offcurves: - pen.addPoint(pt, None, smooth, name, **kwargs) - pt, smooth, name, kwargs = points[-1] - if pt is None: - assert segment_type == "qcurve" - # special quadratic contour with no on-curve points: - # we need to skip the "None" point. See also the Pen - # protocol's qCurveTo() method and fontTools.pens.basePen - pass - else: - pen.addPoint(pt, segment_type, smooth, name, **kwargs) - else: - raise AssertionError("unexpected segment type: %r" % segment_type) - for (pt, smooth, name, kwargs) in last_offcurves: - pen.addPoint(pt, None, smooth, name, **kwargs) - pen.endPath() - - def addComponent(self, baseGlyphName, transformation): - assert self.currentPath is None - self.pen.addComponent(baseGlyphName, transformation) - - -class Cu2QuMultiPen: - """A filter multi-pen to convert cubic bezier curves to quadratic b-splines - in a interpolation-compatible manner, using the FontTools SegmentPen protocol. - - Args: - - other_pens: list of SegmentPens used to draw the transformed outlines. - max_err: maximum approximation error in font units. For optimal results, - if you know the UPEM of the font, we recommend setting this to a - value equal, or close to UPEM / 1000. - reverse_direction: flip the contours' direction but keep starting point. - - This pen does not follow the normal SegmentPen protocol. Instead, its - moveTo/lineTo/qCurveTo/curveTo methods take a list of tuples that are - arguments that would normally be passed to a SegmentPen, one item for - each of the pens in other_pens. - """ - - # TODO Simplify like 3e8ebcdce592fe8a59ca4c3a294cc9724351e1ce - # Remove start_pts and _add_moveTO - - def __init__(self, other_pens, max_err, reverse_direction=False): - if reverse_direction: - other_pens = [ - ReverseContourPen(pen, outputImpliedClosingLine=True) - for pen in other_pens - ] - self.pens = other_pens - self.max_err = max_err - self.start_pts = None - self.current_pts = None - - def _check_contour_is_open(self): - if self.current_pts is None: - raise AssertionError("moveTo is required") - - def _check_contour_is_closed(self): - if self.current_pts is not None: - raise AssertionError("closePath or endPath is required") - - def _add_moveTo(self): - if self.start_pts is not None: - for pt, pen in zip(self.start_pts, self.pens): - pen.moveTo(*pt) - self.start_pts = None - - def moveTo(self, pts): - self._check_contour_is_closed() - self.start_pts = self.current_pts = pts - self._add_moveTo() - - def lineTo(self, pts): - self._check_contour_is_open() - self._add_moveTo() - for pt, pen in zip(pts, self.pens): - pen.lineTo(*pt) - self.current_pts = pts - - def qCurveTo(self, pointsList): - self._check_contour_is_open() - if len(pointsList[0]) == 1: - self.lineTo([(points[0],) for points in pointsList]) - return - self._add_moveTo() - current_pts = [] - for points, pen in zip(pointsList, self.pens): - pen.qCurveTo(*points) - current_pts.append((points[-1],)) - self.current_pts = current_pts - - def _curves_to_quadratic(self, pointsList): - curves = [] - for current_pt, points in zip(self.current_pts, pointsList): - curves.append(current_pt + points) - quadratics = curves_to_quadratic(curves, [self.max_err] * len(curves)) - pointsList = [] - for quadratic in quadratics: - pointsList.append(quadratic[1:]) - self.qCurveTo(pointsList) - - def curveTo(self, pointsList): - self._check_contour_is_open() - self._curves_to_quadratic(pointsList) - - def closePath(self): - self._check_contour_is_open() - if self.start_pts is None: - for pen in self.pens: - pen.closePath() - self.current_pts = self.start_pts = None - - def endPath(self): - self._check_contour_is_open() - if self.start_pts is None: - for pen in self.pens: - pen.endPath() - self.current_pts = self.start_pts = None - - def addComponent(self, glyphName, transformations): - self._check_contour_is_closed() - for trans, pen in zip(transformations, self.pens): - pen.addComponent(glyphName, trans) diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/tfmLib.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/tfmLib.py deleted file mode 100644 index 673373ffdf4825d4caac4ce5959eb0ee9e11046c..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/tfmLib.py +++ /dev/null @@ -1,460 +0,0 @@ -"""Module for reading TFM (TeX Font Metrics) files. - -The TFM format is described in the TFtoPL WEB source code, whose typeset form -can be found on `CTAN `_. - - >>> from fontTools.tfmLib import TFM - >>> tfm = TFM("Tests/tfmLib/data/cmr10.tfm") - >>> - >>> # Accessing an attribute gets you metadata. - >>> tfm.checksum - 1274110073 - >>> tfm.designsize - 10.0 - >>> tfm.codingscheme - 'TeX text' - >>> tfm.family - 'CMR' - >>> tfm.seven_bit_safe_flag - False - >>> tfm.face - 234 - >>> tfm.extraheader - {} - >>> tfm.fontdimens - {'SLANT': 0.0, 'SPACE': 0.33333396911621094, 'STRETCH': 0.16666698455810547, 'SHRINK': 0.11111164093017578, 'XHEIGHT': 0.4305553436279297, 'QUAD': 1.0000028610229492, 'EXTRASPACE': 0.11111164093017578} - >>> # Accessing a character gets you its metrics. - >>> # “width” is always available, other metrics are available only when - >>> # applicable. All values are relative to “designsize”. - >>> tfm.chars[ord("g")] - {'width': 0.5000019073486328, 'height': 0.4305553436279297, 'depth': 0.1944446563720703, 'italic': 0.013888359069824219} - >>> # Kerning and ligature can be accessed as well. - >>> tfm.kerning[ord("c")] - {104: -0.02777862548828125, 107: -0.02777862548828125} - >>> tfm.ligatures[ord("f")] - {105: ('LIG', 12), 102: ('LIG', 11), 108: ('LIG', 13)} -""" - -from types import SimpleNamespace - -from fontTools.misc.sstruct import calcsize, unpack, unpack2 - -SIZES_FORMAT = """ - > - lf: h # length of the entire file, in words - lh: h # length of the header data, in words - bc: h # smallest character code in the font - ec: h # largest character code in the font - nw: h # number of words in the width table - nh: h # number of words in the height table - nd: h # number of words in the depth table - ni: h # number of words in the italic correction table - nl: h # number of words in the ligature/kern table - nk: h # number of words in the kern table - ne: h # number of words in the extensible character table - np: h # number of font parameter words -""" - -SIZES_SIZE = calcsize(SIZES_FORMAT) - -FIXED_FORMAT = "12.20F" - -HEADER_FORMAT1 = f""" - > - checksum: L - designsize: {FIXED_FORMAT} -""" - -HEADER_FORMAT2 = f""" - {HEADER_FORMAT1} - codingscheme: 40p -""" - -HEADER_FORMAT3 = f""" - {HEADER_FORMAT2} - family: 20p -""" - -HEADER_FORMAT4 = f""" - {HEADER_FORMAT3} - seven_bit_safe_flag: ? - ignored: x - ignored: x - face: B -""" - -HEADER_SIZE1 = calcsize(HEADER_FORMAT1) -HEADER_SIZE2 = calcsize(HEADER_FORMAT2) -HEADER_SIZE3 = calcsize(HEADER_FORMAT3) -HEADER_SIZE4 = calcsize(HEADER_FORMAT4) - -LIG_KERN_COMMAND = """ - > - skip_byte: B - next_char: B - op_byte: B - remainder: B -""" - -BASE_PARAMS = [ - "SLANT", - "SPACE", - "STRETCH", - "SHRINK", - "XHEIGHT", - "QUAD", - "EXTRASPACE", -] - -MATHSY_PARAMS = [ - "NUM1", - "NUM2", - "NUM3", - "DENOM1", - "DENOM2", - "SUP1", - "SUP2", - "SUP3", - "SUB1", - "SUB2", - "SUPDROP", - "SUBDROP", - "DELIM1", - "DELIM2", - "AXISHEIGHT", -] - -MATHEX_PARAMS = [ - "DEFAULTRULETHICKNESS", - "BIGOPSPACING1", - "BIGOPSPACING2", - "BIGOPSPACING3", - "BIGOPSPACING4", - "BIGOPSPACING5", -] - -VANILLA = 0 -MATHSY = 1 -MATHEX = 2 - -UNREACHABLE = 0 -PASSTHROUGH = 1 -ACCESSABLE = 2 - -NO_TAG = 0 -LIG_TAG = 1 -LIST_TAG = 2 -EXT_TAG = 3 - -STOP_FLAG = 128 -KERN_FLAG = 128 - - -class TFMException(Exception): - def __init__(self, message): - super().__init__(message) - - -class TFM: - def __init__(self, file): - self._read(file) - - def __repr__(self): - return ( - f"" - ) - - def _read(self, file): - if hasattr(file, "read"): - data = file.read() - else: - with open(file, "rb") as fp: - data = fp.read() - - self._data = data - - if len(data) < SIZES_SIZE: - raise TFMException("Too short input file") - - sizes = SimpleNamespace() - unpack2(SIZES_FORMAT, data, sizes) - - # Do some file structure sanity checks. - # TeX and TFtoPL do additional functional checks and might even correct - # “errors” in the input file, but we instead try to output the file as - # it is as long as it is parsable, even if the data make no sense. - - if sizes.lf < 0: - raise TFMException("The file claims to have negative or zero length!") - - if len(data) < sizes.lf * 4: - raise TFMException("The file has fewer bytes than it claims!") - - for name, length in vars(sizes).items(): - if length < 0: - raise TFMException("The subfile size: '{name}' is negative!") - - if sizes.lh < 2: - raise TFMException(f"The header length is only {sizes.lh}!") - - if sizes.bc > sizes.ec + 1 or sizes.ec > 255: - raise TFMException( - f"The character code range {sizes.bc}..{sizes.ec} is illegal!" - ) - - if sizes.nw == 0 or sizes.nh == 0 or sizes.nd == 0 or sizes.ni == 0: - raise TFMException("Incomplete subfiles for character dimensions!") - - if sizes.ne > 256: - raise TFMException(f"There are {ne} extensible recipes!") - - if sizes.lf != ( - 6 - + sizes.lh - + (sizes.ec - sizes.bc + 1) - + sizes.nw - + sizes.nh - + sizes.nd - + sizes.ni - + sizes.nl - + sizes.nk - + sizes.ne - + sizes.np - ): - raise TFMException("Subfile sizes don’t add up to the stated total") - - # Subfile offsets, used in the helper function below. These all are - # 32-bit word offsets not 8-bit byte offsets. - char_base = 6 + sizes.lh - sizes.bc - width_base = char_base + sizes.ec + 1 - height_base = width_base + sizes.nw - depth_base = height_base + sizes.nh - italic_base = depth_base + sizes.nd - lig_kern_base = italic_base + sizes.ni - kern_base = lig_kern_base + sizes.nl - exten_base = kern_base + sizes.nk - param_base = exten_base + sizes.ne - - # Helper functions for accessing individual data. If this looks - # nonidiomatic Python, I blame the effect of reading the literate WEB - # documentation of TFtoPL. - def char_info(c): - return 4 * (char_base + c) - - def width_index(c): - return data[char_info(c)] - - def noneexistent(c): - return c < sizes.bc or c > sizes.ec or width_index(c) == 0 - - def height_index(c): - return data[char_info(c) + 1] // 16 - - def depth_index(c): - return data[char_info(c) + 1] % 16 - - def italic_index(c): - return data[char_info(c) + 2] // 4 - - def tag(c): - return data[char_info(c) + 2] % 4 - - def remainder(c): - return data[char_info(c) + 3] - - def width(c): - r = 4 * (width_base + width_index(c)) - return read_fixed(r, "v")["v"] - - def height(c): - r = 4 * (height_base + height_index(c)) - return read_fixed(r, "v")["v"] - - def depth(c): - r = 4 * (depth_base + depth_index(c)) - return read_fixed(r, "v")["v"] - - def italic(c): - r = 4 * (italic_base + italic_index(c)) - return read_fixed(r, "v")["v"] - - def exten(c): - return 4 * (exten_base + remainder(c)) - - def lig_step(i): - return 4 * (lig_kern_base + i) - - def lig_kern_command(i): - command = SimpleNamespace() - unpack2(LIG_KERN_COMMAND, data[i:], command) - return command - - def kern(i): - r = 4 * (kern_base + i) - return read_fixed(r, "v")["v"] - - def param(i): - return 4 * (param_base + i) - - def read_fixed(index, key, obj=None): - ret = unpack2(f">;{key}:{FIXED_FORMAT}", data[index:], obj) - return ret[0] - - # Set all attributes to empty values regardless of the header size. - unpack(HEADER_FORMAT4, [0] * HEADER_SIZE4, self) - - offset = 24 - length = sizes.lh * 4 - self.extraheader = {} - if length >= HEADER_SIZE4: - rest = unpack2(HEADER_FORMAT4, data[offset:], self)[1] - if self.face < 18: - s = self.face % 2 - b = self.face // 2 - self.face = "MBL"[b % 3] + "RI"[s] + "RCE"[b // 3] - for i in range(sizes.lh - HEADER_SIZE4 // 4): - rest = unpack2(f">;HEADER{i + 18}:l", rest, self.extraheader)[1] - elif length >= HEADER_SIZE3: - unpack2(HEADER_FORMAT3, data[offset:], self) - elif length >= HEADER_SIZE2: - unpack2(HEADER_FORMAT2, data[offset:], self) - elif length >= HEADER_SIZE1: - unpack2(HEADER_FORMAT1, data[offset:], self) - - self.fonttype = VANILLA - scheme = self.codingscheme.upper() - if scheme.startswith("TEX MATH SY"): - self.fonttype = MATHSY - elif scheme.startswith("TEX MATH EX"): - self.fonttype = MATHEX - - self.fontdimens = {} - for i in range(sizes.np): - name = f"PARAMETER{i+1}" - if i <= 6: - name = BASE_PARAMS[i] - elif self.fonttype == MATHSY and i <= 21: - name = MATHSY_PARAMS[i - 7] - elif self.fonttype == MATHEX and i <= 12: - name = MATHEX_PARAMS[i - 7] - read_fixed(param(i), name, self.fontdimens) - - lig_kern_map = {} - self.right_boundary_char = None - self.left_boundary_char = None - if sizes.nl > 0: - cmd = lig_kern_command(lig_step(0)) - if cmd.skip_byte == 255: - self.right_boundary_char = cmd.next_char - - cmd = lig_kern_command(lig_step((sizes.nl - 1))) - if cmd.skip_byte == 255: - self.left_boundary_char = 256 - r = 256 * cmd.op_byte + cmd.remainder - lig_kern_map[self.left_boundary_char] = r - - self.chars = {} - for c in range(sizes.bc, sizes.ec + 1): - if width_index(c) > 0: - self.chars[c] = info = {} - info["width"] = width(c) - if height_index(c) > 0: - info["height"] = height(c) - if depth_index(c) > 0: - info["depth"] = depth(c) - if italic_index(c) > 0: - info["italic"] = italic(c) - char_tag = tag(c) - if char_tag == NO_TAG: - pass - elif char_tag == LIG_TAG: - lig_kern_map[c] = remainder(c) - elif char_tag == LIST_TAG: - info["nextlarger"] = remainder(c) - elif char_tag == EXT_TAG: - info["varchar"] = varchar = {} - for i in range(4): - part = data[exten(c) + i] - if i == 3 or part > 0: - name = "rep" - if i == 0: - name = "top" - elif i == 1: - name = "mid" - elif i == 2: - name = "bot" - if noneexistent(part): - varchar[name] = c - else: - varchar[name] = part - - self.ligatures = {} - self.kerning = {} - for c, i in sorted(lig_kern_map.items()): - cmd = lig_kern_command(lig_step(i)) - if cmd.skip_byte > STOP_FLAG: - i = 256 * cmd.op_byte + cmd.remainder - - while i < sizes.nl: - cmd = lig_kern_command(lig_step(i)) - if cmd.skip_byte > STOP_FLAG: - pass - else: - if cmd.op_byte >= KERN_FLAG: - r = 256 * (cmd.op_byte - KERN_FLAG) + cmd.remainder - self.kerning.setdefault(c, {})[cmd.next_char] = kern(r) - else: - r = cmd.op_byte - if r == 4 or (r > 7 and r != 11): - # Ligature step with nonstandard code, we output - # the code verbatim. - lig = r - else: - lig = "" - if r % 4 > 1: - lig += "/" - lig += "LIG" - if r % 2 != 0: - lig += "/" - while r > 3: - lig += ">" - r -= 4 - self.ligatures.setdefault(c, {})[cmd.next_char] = ( - lig, - cmd.remainder, - ) - - if cmd.skip_byte >= STOP_FLAG: - break - i += cmd.skip_byte + 1 - - -if __name__ == "__main__": - import sys - - tfm = TFM(sys.argv[1]) - print( - "\n".join( - x - for x in [ - f"tfm.checksum={tfm.checksum}", - f"tfm.designsize={tfm.designsize}", - f"tfm.codingscheme={tfm.codingscheme}", - f"tfm.fonttype={tfm.fonttype}", - f"tfm.family={tfm.family}", - f"tfm.seven_bit_safe_flag={tfm.seven_bit_safe_flag}", - f"tfm.face={tfm.face}", - f"tfm.extraheader={tfm.extraheader}", - f"tfm.fontdimens={tfm.fontdimens}", - f"tfm.right_boundary_char={tfm.right_boundary_char}", - f"tfm.left_boundary_char={tfm.left_boundary_char}", - f"tfm.kerning={tfm.kerning}", - f"tfm.ligatures={tfm.ligatures}", - f"tfm.chars={tfm.chars}", - ] - ) - ) - print(tfm) diff --git a/spaces/johnsamuel/RAGTest/README.md b/spaces/johnsamuel/RAGTest/README.md deleted file mode 100644 index 0a887f0c80bb78ea2d811b2db8ffd1c7023c7031..0000000000000000000000000000000000000000 --- a/spaces/johnsamuel/RAGTest/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: RAGTest -emoji: 📚 -colorFrom: purple -colorTo: purple -sdk: gradio -sdk_version: 3.40.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/jordonpeter01/ai-comic-factory/src/lib/cropImage.ts b/spaces/jordonpeter01/ai-comic-factory/src/lib/cropImage.ts deleted file mode 100644 index 2d6b7e1f8c112564f372ab1da3af76a337b7f35b..0000000000000000000000000000000000000000 --- a/spaces/jordonpeter01/ai-comic-factory/src/lib/cropImage.ts +++ /dev/null @@ -1,53 +0,0 @@ -async function cropImage(inputImage: string): Promise<{ croppedImage: string; x: number; y: number; width: number; height: number }> { - return new Promise((resolve, reject) => { - const img = new Image(); - img.src = inputImage; - img.onload = () => { - const canvas = document.createElement('canvas'); - const context = canvas.getContext('2d'); - if (!context) { - reject("Context is null"); - return; - } - canvas.width = img.width; - canvas.height = img.height; - context.drawImage(img, 0, 0, img.width, img.height); - const imageData = context.getImageData(0, 0, img.width, img.height); - const data = imageData.data; - let minX = img.width, minY = img.height, maxX = 0, maxY = 0; - - for (let y = 0; y < img.height; y++) { - for (let x = 0; x < img.width; x++) { - const i = (y * 4) * img.width + x * 4; - const avg = (data[i] + data[i + 1] + data[i + 2]) / 3; - if (avg < 255) { - minX = Math.min(minX, x); - minY = Math.min(minY, y); - maxX = Math.max(maxX, x); - maxY = Math.max(maxY, y); - } - } - } - - const width = maxX - minX; - const height = maxY - minY; - const croppedCanvas = document.createElement('canvas'); - croppedCanvas.width = width; - croppedCanvas.height = height; - const croppedCtx = croppedCanvas.getContext('2d'); - if (!croppedCtx) { - reject("croppedCtx is null"); - return; - } - croppedCtx.drawImage(canvas, minX, minY, width, height, 0, 0, width, height); - resolve({ - croppedImage: croppedCanvas.toDataURL(), - x: minX, - y: minY, - width, - height - }); - }; - img.onerror = reject; - }); -} \ No newline at end of file diff --git a/spaces/jroust/prompthero-openjourney/README.md b/spaces/jroust/prompthero-openjourney/README.md deleted file mode 100644 index 23ded07d4e2d96bac9ed2b116d6b6c40205e6c16..0000000000000000000000000000000000000000 --- a/spaces/jroust/prompthero-openjourney/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Prompthero Openjourney -emoji: 🌖 -colorFrom: yellow -colorTo: pink -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/justYu2001/furniture-detection/models/yolo.py b/spaces/justYu2001/furniture-detection/models/yolo.py deleted file mode 100644 index 95a019c6aeec8c3f1d582907d5fe7ff3ed6b9369..0000000000000000000000000000000000000000 --- a/spaces/justYu2001/furniture-detection/models/yolo.py +++ /dev/null @@ -1,843 +0,0 @@ -import argparse -import logging -import sys -from copy import deepcopy - -sys.path.append('./') # to run '$ python *.py' files in subdirectories -logger = logging.getLogger(__name__) -import torch -from models.common import * -from models.experimental import * -from utils.autoanchor import check_anchor_order -from utils.general import make_divisible, check_file, set_logging -from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \ - select_device, copy_attr -from utils.loss import SigmoidBin - -try: - import thop # for FLOPS computation -except ImportError: - thop = None - - -class Detect(nn.Module): - stride = None # strides computed during build - export = False # onnx export - end2end = False - include_nms = False - concat = False - - def __init__(self, nc=80, anchors=(), ch=()): # detection layer - super(Detect, self).__init__() - self.nc = nc # number of classes - self.no = nc + 5 # number of outputs per anchor - self.nl = len(anchors) # number of detection layers - self.na = len(anchors[0]) // 2 # number of anchors - self.grid = [torch.zeros(1)] * self.nl # init grid - a = torch.tensor(anchors).float().view(self.nl, -1, 2) - self.register_buffer('anchors', a) # shape(nl,na,2) - self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2) - self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv - - def forward(self, x): - # x = x.copy() # for profiling - z = [] # inference output - self.training |= self.export - for i in range(self.nl): - x[i] = self.m[i](x[i]) # conv - bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) - x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() - - if not self.training: # inference - if self.grid[i].shape[2:4] != x[i].shape[2:4]: - self.grid[i] = self._make_grid(nx, ny).to(x[i].device) - y = x[i].sigmoid() - if not torch.onnx.is_in_onnx_export(): - y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy - y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh - else: - xy, wh, conf = y.split((2, 2, self.nc + 1), 4) # y.tensor_split((2, 4, 5), 4) # torch 1.8.0 - xy = xy * (2. * self.stride[i]) + (self.stride[i] * (self.grid[i] - 0.5)) # new xy - wh = wh ** 2 * (4 * self.anchor_grid[i].data) # new wh - y = torch.cat((xy, wh, conf), 4) - z.append(y.view(bs, -1, self.no)) - - if self.training: - out = x - elif self.end2end: - out = torch.cat(z, 1) - elif self.include_nms: - z = self.convert(z) - out = (z, ) - elif self.concat: - out = torch.cat(z, 1) - else: - out = (torch.cat(z, 1), x) - - return out - - @staticmethod - def _make_grid(nx=20, ny=20): - yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) - return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() - - def convert(self, z): - z = torch.cat(z, 1) - box = z[:, :, :4] - conf = z[:, :, 4:5] - score = z[:, :, 5:] - score *= conf - convert_matrix = torch.tensor([[1, 0, 1, 0], [0, 1, 0, 1], [-0.5, 0, 0.5, 0], [0, -0.5, 0, 0.5]], - dtype=torch.float32, - device=z.device) - box @= convert_matrix - return (box, score) - - -class IDetect(nn.Module): - stride = None # strides computed during build - export = False # onnx export - end2end = False - include_nms = False - concat = False - - def __init__(self, nc=80, anchors=(), ch=()): # detection layer - super(IDetect, self).__init__() - self.nc = nc # number of classes - self.no = nc + 5 # number of outputs per anchor - self.nl = len(anchors) # number of detection layers - self.na = len(anchors[0]) // 2 # number of anchors - self.grid = [torch.zeros(1)] * self.nl # init grid - a = torch.tensor(anchors).float().view(self.nl, -1, 2) - self.register_buffer('anchors', a) # shape(nl,na,2) - self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2) - self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv - - self.ia = nn.ModuleList(ImplicitA(x) for x in ch) - self.im = nn.ModuleList(ImplicitM(self.no * self.na) for _ in ch) - - def forward(self, x): - # x = x.copy() # for profiling - z = [] # inference output - self.training |= self.export - for i in range(self.nl): - x[i] = self.m[i](self.ia[i](x[i])) # conv - x[i] = self.im[i](x[i]) - bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) - x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() - - if not self.training: # inference - if self.grid[i].shape[2:4] != x[i].shape[2:4]: - self.grid[i] = self._make_grid(nx, ny).to(x[i].device) - - y = x[i].sigmoid() - y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy - y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh - z.append(y.view(bs, -1, self.no)) - - return x if self.training else (torch.cat(z, 1), x) - - def fuseforward(self, x): - # x = x.copy() # for profiling - z = [] # inference output - self.training |= self.export - for i in range(self.nl): - x[i] = self.m[i](x[i]) # conv - bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) - x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() - - if not self.training: # inference - if self.grid[i].shape[2:4] != x[i].shape[2:4]: - self.grid[i] = self._make_grid(nx, ny).to(x[i].device) - - y = x[i].sigmoid() - if not torch.onnx.is_in_onnx_export(): - y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy - y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh - else: - xy, wh, conf = y.split((2, 2, self.nc + 1), 4) # y.tensor_split((2, 4, 5), 4) # torch 1.8.0 - xy = xy * (2. * self.stride[i]) + (self.stride[i] * (self.grid[i] - 0.5)) # new xy - wh = wh ** 2 * (4 * self.anchor_grid[i].data) # new wh - y = torch.cat((xy, wh, conf), 4) - z.append(y.view(bs, -1, self.no)) - - if self.training: - out = x - elif self.end2end: - out = torch.cat(z, 1) - elif self.include_nms: - z = self.convert(z) - out = (z, ) - elif self.concat: - out = torch.cat(z, 1) - else: - out = (torch.cat(z, 1), x) - - return out - - def fuse(self): - print("IDetect.fuse") - # fuse ImplicitA and Convolution - for i in range(len(self.m)): - c1,c2,_,_ = self.m[i].weight.shape - c1_,c2_, _,_ = self.ia[i].implicit.shape - self.m[i].bias += torch.matmul(self.m[i].weight.reshape(c1,c2),self.ia[i].implicit.reshape(c2_,c1_)).squeeze(1) - - # fuse ImplicitM and Convolution - for i in range(len(self.m)): - c1,c2, _,_ = self.im[i].implicit.shape - self.m[i].bias *= self.im[i].implicit.reshape(c2) - self.m[i].weight *= self.im[i].implicit.transpose(0,1) - - @staticmethod - def _make_grid(nx=20, ny=20): - yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) - return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() - - def convert(self, z): - z = torch.cat(z, 1) - box = z[:, :, :4] - conf = z[:, :, 4:5] - score = z[:, :, 5:] - score *= conf - convert_matrix = torch.tensor([[1, 0, 1, 0], [0, 1, 0, 1], [-0.5, 0, 0.5, 0], [0, -0.5, 0, 0.5]], - dtype=torch.float32, - device=z.device) - box @= convert_matrix - return (box, score) - - -class IKeypoint(nn.Module): - stride = None # strides computed during build - export = False # onnx export - - def __init__(self, nc=80, anchors=(), nkpt=17, ch=(), inplace=True, dw_conv_kpt=False): # detection layer - super(IKeypoint, self).__init__() - self.nc = nc # number of classes - self.nkpt = nkpt - self.dw_conv_kpt = dw_conv_kpt - self.no_det=(nc + 5) # number of outputs per anchor for box and class - self.no_kpt = 3*self.nkpt ## number of outputs per anchor for keypoints - self.no = self.no_det+self.no_kpt - self.nl = len(anchors) # number of detection layers - self.na = len(anchors[0]) // 2 # number of anchors - self.grid = [torch.zeros(1)] * self.nl # init grid - self.flip_test = False - a = torch.tensor(anchors).float().view(self.nl, -1, 2) - self.register_buffer('anchors', a) # shape(nl,na,2) - self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2) - self.m = nn.ModuleList(nn.Conv2d(x, self.no_det * self.na, 1) for x in ch) # output conv - - self.ia = nn.ModuleList(ImplicitA(x) for x in ch) - self.im = nn.ModuleList(ImplicitM(self.no_det * self.na) for _ in ch) - - if self.nkpt is not None: - if self.dw_conv_kpt: #keypoint head is slightly more complex - self.m_kpt = nn.ModuleList( - nn.Sequential(DWConv(x, x, k=3), Conv(x,x), - DWConv(x, x, k=3), Conv(x, x), - DWConv(x, x, k=3), Conv(x,x), - DWConv(x, x, k=3), Conv(x, x), - DWConv(x, x, k=3), Conv(x, x), - DWConv(x, x, k=3), nn.Conv2d(x, self.no_kpt * self.na, 1)) for x in ch) - else: #keypoint head is a single convolution - self.m_kpt = nn.ModuleList(nn.Conv2d(x, self.no_kpt * self.na, 1) for x in ch) - - self.inplace = inplace # use in-place ops (e.g. slice assignment) - - def forward(self, x): - # x = x.copy() # for profiling - z = [] # inference output - self.training |= self.export - for i in range(self.nl): - if self.nkpt is None or self.nkpt==0: - x[i] = self.im[i](self.m[i](self.ia[i](x[i]))) # conv - else : - x[i] = torch.cat((self.im[i](self.m[i](self.ia[i](x[i]))), self.m_kpt[i](x[i])), axis=1) - - bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) - x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() - x_det = x[i][..., :6] - x_kpt = x[i][..., 6:] - - if not self.training: # inference - if self.grid[i].shape[2:4] != x[i].shape[2:4]: - self.grid[i] = self._make_grid(nx, ny).to(x[i].device) - kpt_grid_x = self.grid[i][..., 0:1] - kpt_grid_y = self.grid[i][..., 1:2] - - if self.nkpt == 0: - y = x[i].sigmoid() - else: - y = x_det.sigmoid() - - if self.inplace: - xy = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy - wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i].view(1, self.na, 1, 1, 2) # wh - if self.nkpt != 0: - x_kpt[..., 0::3] = (x_kpt[..., ::3] * 2. - 0.5 + kpt_grid_x.repeat(1,1,1,1,17)) * self.stride[i] # xy - x_kpt[..., 1::3] = (x_kpt[..., 1::3] * 2. - 0.5 + kpt_grid_y.repeat(1,1,1,1,17)) * self.stride[i] # xy - #x_kpt[..., 0::3] = (x_kpt[..., ::3] + kpt_grid_x.repeat(1,1,1,1,17)) * self.stride[i] # xy - #x_kpt[..., 1::3] = (x_kpt[..., 1::3] + kpt_grid_y.repeat(1,1,1,1,17)) * self.stride[i] # xy - #print('=============') - #print(self.anchor_grid[i].shape) - #print(self.anchor_grid[i][...,0].unsqueeze(4).shape) - #print(x_kpt[..., 0::3].shape) - #x_kpt[..., 0::3] = ((x_kpt[..., 0::3].tanh() * 2.) ** 3 * self.anchor_grid[i][...,0].unsqueeze(4).repeat(1,1,1,1,self.nkpt)) + kpt_grid_x.repeat(1,1,1,1,17) * self.stride[i] # xy - #x_kpt[..., 1::3] = ((x_kpt[..., 1::3].tanh() * 2.) ** 3 * self.anchor_grid[i][...,1].unsqueeze(4).repeat(1,1,1,1,self.nkpt)) + kpt_grid_y.repeat(1,1,1,1,17) * self.stride[i] # xy - #x_kpt[..., 0::3] = (((x_kpt[..., 0::3].sigmoid() * 4.) ** 2 - 8.) * self.anchor_grid[i][...,0].unsqueeze(4).repeat(1,1,1,1,self.nkpt)) + kpt_grid_x.repeat(1,1,1,1,17) * self.stride[i] # xy - #x_kpt[..., 1::3] = (((x_kpt[..., 1::3].sigmoid() * 4.) ** 2 - 8.) * self.anchor_grid[i][...,1].unsqueeze(4).repeat(1,1,1,1,self.nkpt)) + kpt_grid_y.repeat(1,1,1,1,17) * self.stride[i] # xy - x_kpt[..., 2::3] = x_kpt[..., 2::3].sigmoid() - - y = torch.cat((xy, wh, y[..., 4:], x_kpt), dim = -1) - - else: # for YOLOv5 on AWS Inferentia https://github.com/ultralytics/yolov5/pull/2953 - xy = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy - wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh - if self.nkpt != 0: - y[..., 6:] = (y[..., 6:] * 2. - 0.5 + self.grid[i].repeat((1,1,1,1,self.nkpt))) * self.stride[i] # xy - y = torch.cat((xy, wh, y[..., 4:]), -1) - - z.append(y.view(bs, -1, self.no)) - - return x if self.training else (torch.cat(z, 1), x) - - @staticmethod - def _make_grid(nx=20, ny=20): - yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) - return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() - - -class IAuxDetect(nn.Module): - stride = None # strides computed during build - export = False # onnx export - end2end = False - include_nms = False - concat = False - - def __init__(self, nc=80, anchors=(), ch=()): # detection layer - super(IAuxDetect, self).__init__() - self.nc = nc # number of classes - self.no = nc + 5 # number of outputs per anchor - self.nl = len(anchors) # number of detection layers - self.na = len(anchors[0]) // 2 # number of anchors - self.grid = [torch.zeros(1)] * self.nl # init grid - a = torch.tensor(anchors).float().view(self.nl, -1, 2) - self.register_buffer('anchors', a) # shape(nl,na,2) - self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2) - self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch[:self.nl]) # output conv - self.m2 = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch[self.nl:]) # output conv - - self.ia = nn.ModuleList(ImplicitA(x) for x in ch[:self.nl]) - self.im = nn.ModuleList(ImplicitM(self.no * self.na) for _ in ch[:self.nl]) - - def forward(self, x): - # x = x.copy() # for profiling - z = [] # inference output - self.training |= self.export - for i in range(self.nl): - x[i] = self.m[i](self.ia[i](x[i])) # conv - x[i] = self.im[i](x[i]) - bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) - x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() - - x[i+self.nl] = self.m2[i](x[i+self.nl]) - x[i+self.nl] = x[i+self.nl].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() - - if not self.training: # inference - if self.grid[i].shape[2:4] != x[i].shape[2:4]: - self.grid[i] = self._make_grid(nx, ny).to(x[i].device) - - y = x[i].sigmoid() - if not torch.onnx.is_in_onnx_export(): - y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy - y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh - else: - xy, wh, conf = y.split((2, 2, self.nc + 1), 4) # y.tensor_split((2, 4, 5), 4) # torch 1.8.0 - xy = xy * (2. * self.stride[i]) + (self.stride[i] * (self.grid[i] - 0.5)) # new xy - wh = wh ** 2 * (4 * self.anchor_grid[i].data) # new wh - y = torch.cat((xy, wh, conf), 4) - z.append(y.view(bs, -1, self.no)) - - return x if self.training else (torch.cat(z, 1), x[:self.nl]) - - def fuseforward(self, x): - # x = x.copy() # for profiling - z = [] # inference output - self.training |= self.export - for i in range(self.nl): - x[i] = self.m[i](x[i]) # conv - bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) - x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() - - if not self.training: # inference - if self.grid[i].shape[2:4] != x[i].shape[2:4]: - self.grid[i] = self._make_grid(nx, ny).to(x[i].device) - - y = x[i].sigmoid() - if not torch.onnx.is_in_onnx_export(): - y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy - y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh - else: - xy = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy - wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i].data # wh - y = torch.cat((xy, wh, y[..., 4:]), -1) - z.append(y.view(bs, -1, self.no)) - - if self.training: - out = x - elif self.end2end: - out = torch.cat(z, 1) - elif self.include_nms: - z = self.convert(z) - out = (z, ) - elif self.concat: - out = torch.cat(z, 1) - else: - out = (torch.cat(z, 1), x) - - return out - - def fuse(self): - print("IAuxDetect.fuse") - # fuse ImplicitA and Convolution - for i in range(len(self.m)): - c1,c2,_,_ = self.m[i].weight.shape - c1_,c2_, _,_ = self.ia[i].implicit.shape - self.m[i].bias += torch.matmul(self.m[i].weight.reshape(c1,c2),self.ia[i].implicit.reshape(c2_,c1_)).squeeze(1) - - # fuse ImplicitM and Convolution - for i in range(len(self.m)): - c1,c2, _,_ = self.im[i].implicit.shape - self.m[i].bias *= self.im[i].implicit.reshape(c2) - self.m[i].weight *= self.im[i].implicit.transpose(0,1) - - @staticmethod - def _make_grid(nx=20, ny=20): - yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) - return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() - - def convert(self, z): - z = torch.cat(z, 1) - box = z[:, :, :4] - conf = z[:, :, 4:5] - score = z[:, :, 5:] - score *= conf - convert_matrix = torch.tensor([[1, 0, 1, 0], [0, 1, 0, 1], [-0.5, 0, 0.5, 0], [0, -0.5, 0, 0.5]], - dtype=torch.float32, - device=z.device) - box @= convert_matrix - return (box, score) - - -class IBin(nn.Module): - stride = None # strides computed during build - export = False # onnx export - - def __init__(self, nc=80, anchors=(), ch=(), bin_count=21): # detection layer - super(IBin, self).__init__() - self.nc = nc # number of classes - self.bin_count = bin_count - - self.w_bin_sigmoid = SigmoidBin(bin_count=self.bin_count, min=0.0, max=4.0) - self.h_bin_sigmoid = SigmoidBin(bin_count=self.bin_count, min=0.0, max=4.0) - # classes, x,y,obj - self.no = nc + 3 + \ - self.w_bin_sigmoid.get_length() + self.h_bin_sigmoid.get_length() # w-bce, h-bce - # + self.x_bin_sigmoid.get_length() + self.y_bin_sigmoid.get_length() - - self.nl = len(anchors) # number of detection layers - self.na = len(anchors[0]) // 2 # number of anchors - self.grid = [torch.zeros(1)] * self.nl # init grid - a = torch.tensor(anchors).float().view(self.nl, -1, 2) - self.register_buffer('anchors', a) # shape(nl,na,2) - self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2) - self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv - - self.ia = nn.ModuleList(ImplicitA(x) for x in ch) - self.im = nn.ModuleList(ImplicitM(self.no * self.na) for _ in ch) - - def forward(self, x): - - #self.x_bin_sigmoid.use_fw_regression = True - #self.y_bin_sigmoid.use_fw_regression = True - self.w_bin_sigmoid.use_fw_regression = True - self.h_bin_sigmoid.use_fw_regression = True - - # x = x.copy() # for profiling - z = [] # inference output - self.training |= self.export - for i in range(self.nl): - x[i] = self.m[i](self.ia[i](x[i])) # conv - x[i] = self.im[i](x[i]) - bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) - x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() - - if not self.training: # inference - if self.grid[i].shape[2:4] != x[i].shape[2:4]: - self.grid[i] = self._make_grid(nx, ny).to(x[i].device) - - y = x[i].sigmoid() - y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy - #y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh - - - #px = (self.x_bin_sigmoid.forward(y[..., 0:12]) + self.grid[i][..., 0]) * self.stride[i] - #py = (self.y_bin_sigmoid.forward(y[..., 12:24]) + self.grid[i][..., 1]) * self.stride[i] - - pw = self.w_bin_sigmoid.forward(y[..., 2:24]) * self.anchor_grid[i][..., 0] - ph = self.h_bin_sigmoid.forward(y[..., 24:46]) * self.anchor_grid[i][..., 1] - - #y[..., 0] = px - #y[..., 1] = py - y[..., 2] = pw - y[..., 3] = ph - - y = torch.cat((y[..., 0:4], y[..., 46:]), dim=-1) - - z.append(y.view(bs, -1, y.shape[-1])) - - return x if self.training else (torch.cat(z, 1), x) - - @staticmethod - def _make_grid(nx=20, ny=20): - yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) - return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() - - -class Model(nn.Module): - def __init__(self, cfg='yolor-csp-c.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes - super(Model, self).__init__() - self.traced = False - if isinstance(cfg, dict): - self.yaml = cfg # model dict - else: # is *.yaml - import yaml # for torch hub - self.yaml_file = Path(cfg).name - with open(cfg) as f: - self.yaml = yaml.load(f, Loader=yaml.SafeLoader) # model dict - - # Define model - ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels - if nc and nc != self.yaml['nc']: - logger.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}") - self.yaml['nc'] = nc # override yaml value - if anchors: - logger.info(f'Overriding model.yaml anchors with anchors={anchors}') - self.yaml['anchors'] = round(anchors) # override yaml value - self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist - self.names = [str(i) for i in range(self.yaml['nc'])] # default names - # print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))]) - - # Build strides, anchors - m = self.model[-1] # Detect() - if isinstance(m, Detect): - s = 256 # 2x min stride - m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward - check_anchor_order(m) - m.anchors /= m.stride.view(-1, 1, 1) - self.stride = m.stride - self._initialize_biases() # only run once - # print('Strides: %s' % m.stride.tolist()) - if isinstance(m, IDetect): - s = 256 # 2x min stride - m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward - check_anchor_order(m) - m.anchors /= m.stride.view(-1, 1, 1) - self.stride = m.stride - self._initialize_biases() # only run once - # print('Strides: %s' % m.stride.tolist()) - if isinstance(m, IAuxDetect): - s = 256 # 2x min stride - m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))[:4]]) # forward - #print(m.stride) - check_anchor_order(m) - m.anchors /= m.stride.view(-1, 1, 1) - self.stride = m.stride - self._initialize_aux_biases() # only run once - # print('Strides: %s' % m.stride.tolist()) - if isinstance(m, IBin): - s = 256 # 2x min stride - m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward - check_anchor_order(m) - m.anchors /= m.stride.view(-1, 1, 1) - self.stride = m.stride - self._initialize_biases_bin() # only run once - # print('Strides: %s' % m.stride.tolist()) - if isinstance(m, IKeypoint): - s = 256 # 2x min stride - m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward - check_anchor_order(m) - m.anchors /= m.stride.view(-1, 1, 1) - self.stride = m.stride - self._initialize_biases_kpt() # only run once - # print('Strides: %s' % m.stride.tolist()) - - # Init weights, biases - initialize_weights(self) - self.info() - logger.info('') - - def forward(self, x, augment=False, profile=False): - if augment: - img_size = x.shape[-2:] # height, width - s = [1, 0.83, 0.67] # scales - f = [None, 3, None] # flips (2-ud, 3-lr) - y = [] # outputs - for si, fi in zip(s, f): - xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max())) - yi = self.forward_once(xi)[0] # forward - # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save - yi[..., :4] /= si # de-scale - if fi == 2: - yi[..., 1] = img_size[0] - yi[..., 1] # de-flip ud - elif fi == 3: - yi[..., 0] = img_size[1] - yi[..., 0] # de-flip lr - y.append(yi) - return torch.cat(y, 1), None # augmented inference, train - else: - return self.forward_once(x, profile) # single-scale inference, train - - def forward_once(self, x, profile=False): - y, dt = [], [] # outputs - for m in self.model: - if m.f != -1: # if not from previous layer - x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers - - if not hasattr(self, 'traced'): - self.traced=False - - if self.traced: - if isinstance(m, Detect) or isinstance(m, IDetect) or isinstance(m, IAuxDetect) or isinstance(m, IKeypoint): - break - - if profile: - c = isinstance(m, (Detect, IDetect, IAuxDetect, IBin)) - o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPS - for _ in range(10): - m(x.copy() if c else x) - t = time_synchronized() - for _ in range(10): - m(x.copy() if c else x) - dt.append((time_synchronized() - t) * 100) - print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type)) - - x = m(x) # run - - y.append(x if m.i in self.save else None) # save output - - if profile: - print('%.1fms total' % sum(dt)) - return x - - def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency - # https://arxiv.org/abs/1708.02002 section 3.3 - # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. - m = self.model[-1] # Detect() module - for mi, s in zip(m.m, m.stride): # from - b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) - b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) - b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls - mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) - - def _initialize_aux_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency - # https://arxiv.org/abs/1708.02002 section 3.3 - # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. - m = self.model[-1] # Detect() module - for mi, mi2, s in zip(m.m, m.m2, m.stride): # from - b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) - b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) - b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls - mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) - b2 = mi2.bias.view(m.na, -1) # conv.bias(255) to (3,85) - b2.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) - b2.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls - mi2.bias = torch.nn.Parameter(b2.view(-1), requires_grad=True) - - def _initialize_biases_bin(self, cf=None): # initialize biases into Detect(), cf is class frequency - # https://arxiv.org/abs/1708.02002 section 3.3 - # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. - m = self.model[-1] # Bin() module - bc = m.bin_count - for mi, s in zip(m.m, m.stride): # from - b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) - old = b[:, (0,1,2,bc+3)].data - obj_idx = 2*bc+4 - b[:, :obj_idx].data += math.log(0.6 / (bc + 1 - 0.99)) - b[:, obj_idx].data += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) - b[:, (obj_idx+1):].data += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls - b[:, (0,1,2,bc+3)].data = old - mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) - - def _initialize_biases_kpt(self, cf=None): # initialize biases into Detect(), cf is class frequency - # https://arxiv.org/abs/1708.02002 section 3.3 - # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. - m = self.model[-1] # Detect() module - for mi, s in zip(m.m, m.stride): # from - b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) - b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) - b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls - mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) - - def _print_biases(self): - m = self.model[-1] # Detect() module - for mi in m.m: # from - b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85) - print(('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean())) - - # def _print_weights(self): - # for m in self.model.modules(): - # if type(m) is Bottleneck: - # print('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights - - def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers - print('Fusing layers... ') - for m in self.model.modules(): - if isinstance(m, RepConv): - #print(f" fuse_repvgg_block") - m.fuse_repvgg_block() - elif isinstance(m, RepConv_OREPA): - #print(f" switch_to_deploy") - m.switch_to_deploy() - elif type(m) is Conv and hasattr(m, 'bn'): - m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv - delattr(m, 'bn') # remove batchnorm - m.forward = m.fuseforward # update forward - elif isinstance(m, (IDetect, IAuxDetect)): - m.fuse() - m.forward = m.fuseforward - self.info() - return self - - def nms(self, mode=True): # add or remove NMS module - present = type(self.model[-1]) is NMS # last layer is NMS - if mode and not present: - print('Adding NMS... ') - m = NMS() # module - m.f = -1 # from - m.i = self.model[-1].i + 1 # index - self.model.add_module(name='%s' % m.i, module=m) # add - self.eval() - elif not mode and present: - print('Removing NMS... ') - self.model = self.model[:-1] # remove - return self - - def autoshape(self): # add autoShape module - print('Adding autoShape... ') - m = autoShape(self) # wrap model - copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes - return m - - def info(self, verbose=False, img_size=640): # print model information - model_info(self, verbose, img_size) - - -def parse_model(d, ch): # model_dict, input_channels(3) - logger.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments')) - anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'] - na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors - no = na * (nc + 5) # number of outputs = anchors * (classes + 5) - - layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out - for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args - m = eval(m) if isinstance(m, str) else m # eval strings - for j, a in enumerate(args): - try: - args[j] = eval(a) if isinstance(a, str) else a # eval strings - except: - pass - - n = max(round(n * gd), 1) if n > 1 else n # depth gain - if m in [nn.Conv2d, Conv, RobustConv, RobustConv2, DWConv, GhostConv, RepConv, RepConv_OREPA, DownC, - SPP, SPPF, SPPCSPC, GhostSPPCSPC, MixConv2d, Focus, Stem, GhostStem, CrossConv, - Bottleneck, BottleneckCSPA, BottleneckCSPB, BottleneckCSPC, - RepBottleneck, RepBottleneckCSPA, RepBottleneckCSPB, RepBottleneckCSPC, - Res, ResCSPA, ResCSPB, ResCSPC, - RepRes, RepResCSPA, RepResCSPB, RepResCSPC, - ResX, ResXCSPA, ResXCSPB, ResXCSPC, - RepResX, RepResXCSPA, RepResXCSPB, RepResXCSPC, - Ghost, GhostCSPA, GhostCSPB, GhostCSPC, - SwinTransformerBlock, STCSPA, STCSPB, STCSPC, - SwinTransformer2Block, ST2CSPA, ST2CSPB, ST2CSPC]: - c1, c2 = ch[f], args[0] - if c2 != no: # if not output - c2 = make_divisible(c2 * gw, 8) - - args = [c1, c2, *args[1:]] - if m in [DownC, SPPCSPC, GhostSPPCSPC, - BottleneckCSPA, BottleneckCSPB, BottleneckCSPC, - RepBottleneckCSPA, RepBottleneckCSPB, RepBottleneckCSPC, - ResCSPA, ResCSPB, ResCSPC, - RepResCSPA, RepResCSPB, RepResCSPC, - ResXCSPA, ResXCSPB, ResXCSPC, - RepResXCSPA, RepResXCSPB, RepResXCSPC, - GhostCSPA, GhostCSPB, GhostCSPC, - STCSPA, STCSPB, STCSPC, - ST2CSPA, ST2CSPB, ST2CSPC]: - args.insert(2, n) # number of repeats - n = 1 - elif m is nn.BatchNorm2d: - args = [ch[f]] - elif m is Concat: - c2 = sum([ch[x] for x in f]) - elif m is Chuncat: - c2 = sum([ch[x] for x in f]) - elif m is Shortcut: - c2 = ch[f[0]] - elif m is Foldcut: - c2 = ch[f] // 2 - elif m in [Detect, IDetect, IAuxDetect, IBin, IKeypoint]: - args.append([ch[x] for x in f]) - if isinstance(args[1], int): # number of anchors - args[1] = [list(range(args[1] * 2))] * len(f) - elif m is ReOrg: - c2 = ch[f] * 4 - elif m is Contract: - c2 = ch[f] * args[0] ** 2 - elif m is Expand: - c2 = ch[f] // args[0] ** 2 - else: - c2 = ch[f] - - m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module - t = str(m)[8:-2].replace('__main__.', '') # module type - np = sum([x.numel() for x in m_.parameters()]) # number params - m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params - logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print - save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist - layers.append(m_) - if i == 0: - ch = [] - ch.append(c2) - return nn.Sequential(*layers), sorted(save) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--cfg', type=str, default='yolor-csp-c.yaml', help='model.yaml') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--profile', action='store_true', help='profile model speed') - opt = parser.parse_args() - opt.cfg = check_file(opt.cfg) # check file - set_logging() - device = select_device(opt.device) - - # Create model - model = Model(opt.cfg).to(device) - model.train() - - if opt.profile: - img = torch.rand(1, 3, 640, 640).to(device) - y = model(img, profile=True) - - # Profile - # img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device) - # y = model(img, profile=True) - - # Tensorboard - # from torch.utils.tensorboard import SummaryWriter - # tb_writer = SummaryWriter() - # print("Run 'tensorboard --logdir=models/runs' to view tensorboard at http://localhost:6006/") - # tb_writer.add_graph(model.model, img) # add model to tensorboard - # tb_writer.add_image('test', img[0], dataformats='CWH') # add model to tensorboard diff --git a/spaces/jvcanavarro/traits-prediction/src/__init__.py b/spaces/jvcanavarro/traits-prediction/src/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/kabita-choudhary/audio/README.md b/spaces/kabita-choudhary/audio/README.md deleted file mode 100644 index d1683685fd583ce1984508319cea8d154c1813d4..0000000000000000000000000000000000000000 --- a/spaces/kabita-choudhary/audio/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Audio -emoji: 🏢 -colorFrom: pink -colorTo: yellow -sdk: gradio -sdk_version: 3.13.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/kainy/rvc_okiba_TTS/lib/infer_pack/commons.py b/spaces/kainy/rvc_okiba_TTS/lib/infer_pack/commons.py deleted file mode 100644 index 54470986f37825b35d90d7efa7437d1c26b87215..0000000000000000000000000000000000000000 --- a/spaces/kainy/rvc_okiba_TTS/lib/infer_pack/commons.py +++ /dev/null @@ -1,166 +0,0 @@ -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size * dilation - dilation) / 2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += ( - 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q) - ) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def slice_segments2(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / ( - num_timescales - 1 - ) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment - ) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2, 3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1.0 / norm_type) - return total_norm diff --git a/spaces/kanden/vits-uma-genshin-honkai/utils.py b/spaces/kanden/vits-uma-genshin-honkai/utils.py deleted file mode 100644 index ee4b01ddfbe8173965371b29f770f3e87615fe71..0000000000000000000000000000000000000000 --- a/spaces/kanden/vits-uma-genshin-honkai/utils.py +++ /dev/null @@ -1,225 +0,0 @@ -import os -import sys -import argparse -import logging -import json -import subprocess -import numpy as np -import librosa -import torch - -MATPLOTLIB_FLAG = False - -logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) -logger = logging - - -def load_checkpoint(checkpoint_path, model, optimizer=None): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') - iteration = checkpoint_dict['iteration'] - learning_rate = checkpoint_dict['learning_rate'] - if optimizer is not None: - optimizer.load_state_dict(checkpoint_dict['optimizer']) - saved_state_dict = checkpoint_dict['model'] - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict= {} - for k, v in state_dict.items(): - try: - new_state_dict[k] = saved_state_dict[k] - except: - logger.info("%s is not in the checkpoint" % k) - new_state_dict[k] = v - if hasattr(model, 'module'): - model.module.load_state_dict(new_state_dict) - else: - model.load_state_dict(new_state_dict) - logger.info("Loaded checkpoint '{}' (iteration {})" .format( - checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration - - -def plot_spectrogram_to_numpy(spectrogram): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(10,2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - plt.xlabel("Frames") - plt.ylabel("Channels") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def plot_alignment_to_numpy(alignment, info=None): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(6, 4)) - im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower', - interpolation='none') - fig.colorbar(im, ax=ax) - xlabel = 'Decoder timestep' - if info is not None: - xlabel += '\n\n' + info - plt.xlabel(xlabel) - plt.ylabel('Encoder timestep') - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def load_audio_to_torch(full_path, target_sampling_rate): - audio, sampling_rate = librosa.load(full_path, sr=target_sampling_rate, mono=True) - return torch.FloatTensor(audio.astype(np.float32)) - - -def load_filepaths_and_text(filename, split="|"): - with open(filename, encoding='utf-8') as f: - filepaths_and_text = [line.strip().split(split) for line in f] - return filepaths_and_text - - -def get_hparams(init=True): - parser = argparse.ArgumentParser() - parser.add_argument('-c', '--config', type=str, default="./configs/base.json", - help='JSON file for configuration') - parser.add_argument('-m', '--model', type=str, required=True, - help='Model name') - - args = parser.parse_args() - model_dir = os.path.join("./logs", args.model) - - if not os.path.exists(model_dir): - os.makedirs(model_dir) - - config_path = args.config - config_save_path = os.path.join(model_dir, "config.json") - if init: - with open(config_path, "r") as f: - data = f.read() - with open(config_save_path, "w") as f: - f.write(data) - else: - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_dir(model_dir): - config_save_path = os.path.join(model_dir, "config.json") - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams =HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_file(config_path): - with open(config_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams =HParams(**config) - return hparams - - -def check_git_hash(model_dir): - source_dir = os.path.dirname(os.path.realpath(__file__)) - if not os.path.exists(os.path.join(source_dir, ".git")): - logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format( - source_dir - )) - return - - cur_hash = subprocess.getoutput("git rev-parse HEAD") - - path = os.path.join(model_dir, "githash") - if os.path.exists(path): - saved_hash = open(path).read() - if saved_hash != cur_hash: - logger.warn("git hash values are different. {}(saved) != {}(current)".format( - saved_hash[:8], cur_hash[:8])) - else: - open(path, "w").write(cur_hash) - - -def get_logger(model_dir, filename="train.log"): - global logger - logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.DEBUG) - - formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") - if not os.path.exists(model_dir): - os.makedirs(model_dir) - h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.DEBUG) - h.setFormatter(formatter) - logger.addHandler(h) - return logger - - -class HParams(): - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() diff --git a/spaces/karolmajek/YOLOR/scripts/get_pretrain.sh b/spaces/karolmajek/YOLOR/scripts/get_pretrain.sh deleted file mode 100644 index 6ce06afd9330b54e8108a83642dff2ccaffdd2df..0000000000000000000000000000000000000000 --- a/spaces/karolmajek/YOLOR/scripts/get_pretrain.sh +++ /dev/null @@ -1,7 +0,0 @@ -curl -c ./cookie -s -L "https://drive.google.com/uc?export=download&id=1Tdn3yqpZ79X7R1Ql0zNlNScB1Dv9Fp76" > /dev/null -curl -Lb ./cookie "https://drive.google.com/uc?export=download&confirm=`awk '/download/ {print $NF}' ./cookie`&id=1Tdn3yqpZ79X7R1Ql0zNlNScB1Dv9Fp76" -o yolor_p6.pt -rm ./cookie - -curl -c ./cookie -s -L "https://drive.google.com/uc?export=download&id=1UflcHlN5ERPdhahMivQYCbWWw7d2wY7U" > /dev/null -curl -Lb ./cookie "https://drive.google.com/uc?export=download&confirm=`awk '/download/ {print $NF}' ./cookie`&id=1UflcHlN5ERPdhahMivQYCbWWw7d2wY7U" -o yolor_w6.pt -rm ./cookie diff --git a/spaces/kevinwang676/ChatGLM2-SadTalker/src/face3d/models/arcface_torch/partial_fc.py b/spaces/kevinwang676/ChatGLM2-SadTalker/src/face3d/models/arcface_torch/partial_fc.py deleted file mode 100644 index 17e2d25715d10ba446c957e1d2528b0687ed71d5..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/ChatGLM2-SadTalker/src/face3d/models/arcface_torch/partial_fc.py +++ /dev/null @@ -1,222 +0,0 @@ -import logging -import os - -import torch -import torch.distributed as dist -from torch.nn import Module -from torch.nn.functional import normalize, linear -from torch.nn.parameter import Parameter - - -class PartialFC(Module): - """ - Author: {Xiang An, Yang Xiao, XuHan Zhu} in DeepGlint, - Partial FC: Training 10 Million Identities on a Single Machine - See the original paper: - https://arxiv.org/abs/2010.05222 - """ - - @torch.no_grad() - def __init__(self, rank, local_rank, world_size, batch_size, resume, - margin_softmax, num_classes, sample_rate=1.0, embedding_size=512, prefix="./"): - """ - rank: int - Unique process(GPU) ID from 0 to world_size - 1. - local_rank: int - Unique process(GPU) ID within the server from 0 to 7. - world_size: int - Number of GPU. - batch_size: int - Batch size on current rank(GPU). - resume: bool - Select whether to restore the weight of softmax. - margin_softmax: callable - A function of margin softmax, eg: cosface, arcface. - num_classes: int - The number of class center storage in current rank(CPU/GPU), usually is total_classes // world_size, - required. - sample_rate: float - The partial fc sampling rate, when the number of classes increases to more than 2 millions, Sampling - can greatly speed up training, and reduce a lot of GPU memory, default is 1.0. - embedding_size: int - The feature dimension, default is 512. - prefix: str - Path for save checkpoint, default is './'. - """ - super(PartialFC, self).__init__() - # - self.num_classes: int = num_classes - self.rank: int = rank - self.local_rank: int = local_rank - self.device: torch.device = torch.device("cuda:{}".format(self.local_rank)) - self.world_size: int = world_size - self.batch_size: int = batch_size - self.margin_softmax: callable = margin_softmax - self.sample_rate: float = sample_rate - self.embedding_size: int = embedding_size - self.prefix: str = prefix - self.num_local: int = num_classes // world_size + int(rank < num_classes % world_size) - self.class_start: int = num_classes // world_size * rank + min(rank, num_classes % world_size) - self.num_sample: int = int(self.sample_rate * self.num_local) - - self.weight_name = os.path.join(self.prefix, "rank_{}_softmax_weight.pt".format(self.rank)) - self.weight_mom_name = os.path.join(self.prefix, "rank_{}_softmax_weight_mom.pt".format(self.rank)) - - if resume: - try: - self.weight: torch.Tensor = torch.load(self.weight_name) - self.weight_mom: torch.Tensor = torch.load(self.weight_mom_name) - if self.weight.shape[0] != self.num_local or self.weight_mom.shape[0] != self.num_local: - raise IndexError - logging.info("softmax weight resume successfully!") - logging.info("softmax weight mom resume successfully!") - except (FileNotFoundError, KeyError, IndexError): - self.weight = torch.normal(0, 0.01, (self.num_local, self.embedding_size), device=self.device) - self.weight_mom: torch.Tensor = torch.zeros_like(self.weight) - logging.info("softmax weight init!") - logging.info("softmax weight mom init!") - else: - self.weight = torch.normal(0, 0.01, (self.num_local, self.embedding_size), device=self.device) - self.weight_mom: torch.Tensor = torch.zeros_like(self.weight) - logging.info("softmax weight init successfully!") - logging.info("softmax weight mom init successfully!") - self.stream: torch.cuda.Stream = torch.cuda.Stream(local_rank) - - self.index = None - if int(self.sample_rate) == 1: - self.update = lambda: 0 - self.sub_weight = Parameter(self.weight) - self.sub_weight_mom = self.weight_mom - else: - self.sub_weight = Parameter(torch.empty((0, 0)).cuda(local_rank)) - - def save_params(self): - """ Save softmax weight for each rank on prefix - """ - torch.save(self.weight.data, self.weight_name) - torch.save(self.weight_mom, self.weight_mom_name) - - @torch.no_grad() - def sample(self, total_label): - """ - Sample all positive class centers in each rank, and random select neg class centers to filling a fixed - `num_sample`. - - total_label: tensor - Label after all gather, which cross all GPUs. - """ - index_positive = (self.class_start <= total_label) & (total_label < self.class_start + self.num_local) - total_label[~index_positive] = -1 - total_label[index_positive] -= self.class_start - if int(self.sample_rate) != 1: - positive = torch.unique(total_label[index_positive], sorted=True) - if self.num_sample - positive.size(0) >= 0: - perm = torch.rand(size=[self.num_local], device=self.device) - perm[positive] = 2.0 - index = torch.topk(perm, k=self.num_sample)[1] - index = index.sort()[0] - else: - index = positive - self.index = index - total_label[index_positive] = torch.searchsorted(index, total_label[index_positive]) - self.sub_weight = Parameter(self.weight[index]) - self.sub_weight_mom = self.weight_mom[index] - - def forward(self, total_features, norm_weight): - """ Partial fc forward, `logits = X * sample(W)` - """ - torch.cuda.current_stream().wait_stream(self.stream) - logits = linear(total_features, norm_weight) - return logits - - @torch.no_grad() - def update(self): - """ Set updated weight and weight_mom to memory bank. - """ - self.weight_mom[self.index] = self.sub_weight_mom - self.weight[self.index] = self.sub_weight - - def prepare(self, label, optimizer): - """ - get sampled class centers for cal softmax. - - label: tensor - Label tensor on each rank. - optimizer: opt - Optimizer for partial fc, which need to get weight mom. - """ - with torch.cuda.stream(self.stream): - total_label = torch.zeros( - size=[self.batch_size * self.world_size], device=self.device, dtype=torch.long) - dist.all_gather(list(total_label.chunk(self.world_size, dim=0)), label) - self.sample(total_label) - optimizer.state.pop(optimizer.param_groups[-1]['params'][0], None) - optimizer.param_groups[-1]['params'][0] = self.sub_weight - optimizer.state[self.sub_weight]['momentum_buffer'] = self.sub_weight_mom - norm_weight = normalize(self.sub_weight) - return total_label, norm_weight - - def forward_backward(self, label, features, optimizer): - """ - Partial fc forward and backward with model parallel - - label: tensor - Label tensor on each rank(GPU) - features: tensor - Features tensor on each rank(GPU) - optimizer: optimizer - Optimizer for partial fc - - Returns: - -------- - x_grad: tensor - The gradient of features. - loss_v: tensor - Loss value for cross entropy. - """ - total_label, norm_weight = self.prepare(label, optimizer) - total_features = torch.zeros( - size=[self.batch_size * self.world_size, self.embedding_size], device=self.device) - dist.all_gather(list(total_features.chunk(self.world_size, dim=0)), features.data) - total_features.requires_grad = True - - logits = self.forward(total_features, norm_weight) - logits = self.margin_softmax(logits, total_label) - - with torch.no_grad(): - max_fc = torch.max(logits, dim=1, keepdim=True)[0] - dist.all_reduce(max_fc, dist.ReduceOp.MAX) - - # calculate exp(logits) and all-reduce - logits_exp = torch.exp(logits - max_fc) - logits_sum_exp = logits_exp.sum(dim=1, keepdims=True) - dist.all_reduce(logits_sum_exp, dist.ReduceOp.SUM) - - # calculate prob - logits_exp.div_(logits_sum_exp) - - # get one-hot - grad = logits_exp - index = torch.where(total_label != -1)[0] - one_hot = torch.zeros(size=[index.size()[0], grad.size()[1]], device=grad.device) - one_hot.scatter_(1, total_label[index, None], 1) - - # calculate loss - loss = torch.zeros(grad.size()[0], 1, device=grad.device) - loss[index] = grad[index].gather(1, total_label[index, None]) - dist.all_reduce(loss, dist.ReduceOp.SUM) - loss_v = loss.clamp_min_(1e-30).log_().mean() * (-1) - - # calculate grad - grad[index] -= one_hot - grad.div_(self.batch_size * self.world_size) - - logits.backward(grad) - if total_features.grad is not None: - total_features.grad.detach_() - x_grad: torch.Tensor = torch.zeros_like(features, requires_grad=True) - # feature gradient all-reduce - dist.reduce_scatter(x_grad, list(total_features.grad.chunk(self.world_size, dim=0))) - x_grad = x_grad * self.world_size - # backward backbone - return x_grad, loss_v diff --git a/spaces/kevinwang676/Personal-TTS-v3/output_training_data/__init__.py b/spaces/kevinwang676/Personal-TTS-v3/output_training_data/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/kidcoconut/spcdkr_omdenasaudi_liverhccxai/routes/api/rte_api.py b/spaces/kidcoconut/spcdkr_omdenasaudi_liverhccxai/routes/api/rte_api.py deleted file mode 100644 index c74d0041bbe645968740a8755b7d34fb142e1b30..0000000000000000000000000000000000000000 --- a/spaces/kidcoconut/spcdkr_omdenasaudi_liverhccxai/routes/api/rte_api.py +++ /dev/null @@ -1,79 +0,0 @@ -from fastapi import APIRouter, Request, Response -from fastapi.responses import JSONResponse - -import pandas as pd -import json - -#import lib.claims as libClaims -#from lib.models import mdl_utils, mdl_xgb - - -rteApi = APIRouter() - - -#--- -@rteApi.get('/') -def api_entry(): - return { - "message": "api routing - welcome to Omdena Saudi HCC api" - } - - - -''' -#--- >>> SAMPLE CODE BELOW -#--- return json for claims data (merged) -#--- note: current is kaggle, but future could include from yyyymm filter -@rteApi.get('/claims', response_class = JSONResponse) -def api_getClaims(request: Request, response: Response): - pdfClaims = libClaims.load_claims() - jsonSample = pdfClaims.head(50).to_json(orient="records", indent=4) - result = json.loads(jsonSample) - return result - - -#--- return json for featEng -@rteApi.get('/claims/doFeatEng/', response_class = JSONResponse) -def tst_claims_featEng(): - pdfClaims = libClaims.load_claims() - pdfFeatEng = libClaims.do_featEng(pdfClaims) - jsonSample = pdfClaims.head(50).to_json(orient="records", indent=4) - result = json.loads(jsonSample) - return result - - -@rteApi.get('/claims/doStdScaling/', response_class = JSONResponse) -def tst_claims_stdScaling(): - pdfClaims = libClaims.load_claims() - pdfFeatEng = libClaims.do_featEng(pdfClaims) - pdfScaled = mdl_utils.doClaims_stdScaler_toPdf(pdfFeatEng) - - jsonSample = pdfClaims.head(50).to_json(orient="records", indent=4) - result = json.loads(jsonSample) - return result - - -@rteApi.get('/claims/predict/superv', response_class = JSONResponse) -@rteApi.get('/claims/predict/xgb', response_class = JSONResponse) -def predict_xgb(): - #--- load test data - pdfClaims = libClaims.load_claims() - pdfFeatEng = libClaims.do_featEng(pdfClaims) - - npaScaled = mdl_utils.do_stdScaler(pdfFeatEng) - pdfScaled = mdl_utils.do_stdScaler_toPdf(npaScaled) - - ndaPredict = mdl_xgb.predict(npaScaled) - pdfPredict = pd.DataFrame(ndaPredict) - - #--- stitch the grouped data with the labels - pdfResults = pdfScaled.copy() - pdfResults.insert(0, "hasAnom?", pdfPredict[0]) - - #--- filter to only those rows that are flagged with an anomaly - pdfResults = pdfResults[pdfResults['hasAnom?'] > 0] - - jsonSample = pdfResults.head(50).to_json(orient="records", indent=4) - result = json.loads(jsonSample) - return result -''' \ No newline at end of file diff --git a/spaces/kidcoconut/spcstm_omdenasaudi_liverhccxai/uix/pages/lit_qaConfigCheck.py b/spaces/kidcoconut/spcstm_omdenasaudi_liverhccxai/uix/pages/lit_qaConfigCheck.py deleted file mode 100644 index a7bb8872241c02260a27cb7254313c1db904df00..0000000000000000000000000000000000000000 --- a/spaces/kidcoconut/spcstm_omdenasaudi_liverhccxai/uix/pages/lit_qaConfigCheck.py +++ /dev/null @@ -1,88 +0,0 @@ -#--- about page -import streamlit as st -import sys, os -import pandas as pd - -import lib.utils as libUtils - - -description = "QA: Config Check" -def run(): - - print("\nINFO (lit_config.run) loading ", description, " page ...") - - #--- - #st.experimental_memo.clear() #--- try to clear cache each time this page is hit - #st.cache_data.clear() - - st.markdown('### Configuration Check') - - #--- check that base folders exist - #--- list raw WSIs - lstWSI = os.listdir(libUtils.pth_dtaWsi + "raw/") - print("TRACE: ", lstWSI) - st.dataframe( - pd.DataFrame({"Raw WSI": lstWSI,}), - use_container_width=True - ) - - #--- list raw Tiles - lstTiles = os.listdir(libUtils.pth_dtaTiles + "raw/") - print("TRACE: ", lstTiles) - st.dataframe( - pd.DataFrame({"Raw Tiles": lstTiles,}), - use_container_width=True - ) - - #--- list raw demo Tiles - lstDemo = os.listdir(libUtils.pth_dtaDemoTiles + "raw/") - print("TRACE: ", lstDemo) - st.dataframe( - pd.DataFrame({"Raw Demo Tiles": lstDemo,}), - use_container_width=True - ) - - - st.markdown(''' - - ''', unsafe_allow_html=True) - - -# st.markdown( - # st.footer( - # """ - # Configuration Check page - # """, - # unsafe_allow_html=True, - # ) - - cssFooter=""" - - - """ - st.markdown(cssFooter, unsafe_allow_html=True) \ No newline at end of file diff --git a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/cnn/bricks/upsample.py b/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/cnn/bricks/upsample.py deleted file mode 100644 index a1a353767d0ce8518f0d7289bed10dba0178ed12..0000000000000000000000000000000000000000 --- a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/cnn/bricks/upsample.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch.nn as nn -import torch.nn.functional as F - -from ..utils import xavier_init -from .registry import UPSAMPLE_LAYERS - -UPSAMPLE_LAYERS.register_module('nearest', module=nn.Upsample) -UPSAMPLE_LAYERS.register_module('bilinear', module=nn.Upsample) - - -@UPSAMPLE_LAYERS.register_module(name='pixel_shuffle') -class PixelShufflePack(nn.Module): - """Pixel Shuffle upsample layer. - - This module packs `F.pixel_shuffle()` and a nn.Conv2d module together to - achieve a simple upsampling with pixel shuffle. - - Args: - in_channels (int): Number of input channels. - out_channels (int): Number of output channels. - scale_factor (int): Upsample ratio. - upsample_kernel (int): Kernel size of the conv layer to expand the - channels. - """ - - def __init__(self, in_channels, out_channels, scale_factor, - upsample_kernel): - super(PixelShufflePack, self).__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.scale_factor = scale_factor - self.upsample_kernel = upsample_kernel - self.upsample_conv = nn.Conv2d( - self.in_channels, - self.out_channels * scale_factor * scale_factor, - self.upsample_kernel, - padding=(self.upsample_kernel - 1) // 2) - self.init_weights() - - def init_weights(self): - xavier_init(self.upsample_conv, distribution='uniform') - - def forward(self, x): - x = self.upsample_conv(x) - x = F.pixel_shuffle(x, self.scale_factor) - return x - - -def build_upsample_layer(cfg, *args, **kwargs): - """Build upsample layer. - - Args: - cfg (dict): The upsample layer config, which should contain: - - - type (str): Layer type. - - scale_factor (int): Upsample ratio, which is not applicable to - deconv. - - layer args: Args needed to instantiate a upsample layer. - args (argument list): Arguments passed to the ``__init__`` - method of the corresponding conv layer. - kwargs (keyword arguments): Keyword arguments passed to the - ``__init__`` method of the corresponding conv layer. - - Returns: - nn.Module: Created upsample layer. - """ - if not isinstance(cfg, dict): - raise TypeError(f'cfg must be a dict, but got {type(cfg)}') - if 'type' not in cfg: - raise KeyError( - f'the cfg dict must contain the key "type", but got {cfg}') - cfg_ = cfg.copy() - - layer_type = cfg_.pop('type') - if layer_type not in UPSAMPLE_LAYERS: - raise KeyError(f'Unrecognized upsample type {layer_type}') - else: - upsample = UPSAMPLE_LAYERS.get(layer_type) - - if upsample is nn.Upsample: - cfg_['mode'] = layer_type - layer = upsample(*args, **kwargs, **cfg_) - return layer diff --git a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/ops/gather_points.py b/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/ops/gather_points.py deleted file mode 100644 index f52f1677d8ea0facafc56a3672d37adb44677ff3..0000000000000000000000000000000000000000 --- a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/ops/gather_points.py +++ /dev/null @@ -1,57 +0,0 @@ -import torch -from torch.autograd import Function - -from ..utils import ext_loader - -ext_module = ext_loader.load_ext( - '_ext', ['gather_points_forward', 'gather_points_backward']) - - -class GatherPoints(Function): - """Gather points with given index.""" - - @staticmethod - def forward(ctx, features: torch.Tensor, - indices: torch.Tensor) -> torch.Tensor: - """ - Args: - features (Tensor): (B, C, N) features to gather. - indices (Tensor): (B, M) where M is the number of points. - - Returns: - Tensor: (B, C, M) where M is the number of points. - """ - assert features.is_contiguous() - assert indices.is_contiguous() - - B, npoint = indices.size() - _, C, N = features.size() - output = torch.cuda.FloatTensor(B, C, npoint) - - ext_module.gather_points_forward( - features, indices, output, b=B, c=C, n=N, npoints=npoint) - - ctx.for_backwards = (indices, C, N) - if torch.__version__ != 'parrots': - ctx.mark_non_differentiable(indices) - return output - - @staticmethod - def backward(ctx, grad_out): - idx, C, N = ctx.for_backwards - B, npoint = idx.size() - - grad_features = torch.cuda.FloatTensor(B, C, N).zero_() - grad_out_data = grad_out.data.contiguous() - ext_module.gather_points_backward( - grad_out_data, - idx, - grad_features.data, - b=B, - c=C, - n=N, - npoints=npoint) - return grad_features, None - - -gather_points = GatherPoints.apply diff --git a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/video/processing.py b/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/video/processing.py deleted file mode 100644 index 3d90b96e0823d5f116755e7f498d25d17017224a..0000000000000000000000000000000000000000 --- a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/video/processing.py +++ /dev/null @@ -1,160 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os -import os.path as osp -import subprocess -import tempfile - -from annotator.uniformer.mmcv.utils import requires_executable - - -@requires_executable('ffmpeg') -def convert_video(in_file, - out_file, - print_cmd=False, - pre_options='', - **kwargs): - """Convert a video with ffmpeg. - - This provides a general api to ffmpeg, the executed command is:: - - `ffmpeg -y -i ` - - Options(kwargs) are mapped to ffmpeg commands with the following rules: - - - key=val: "-key val" - - key=True: "-key" - - key=False: "" - - Args: - in_file (str): Input video filename. - out_file (str): Output video filename. - pre_options (str): Options appears before "-i ". - print_cmd (bool): Whether to print the final ffmpeg command. - """ - options = [] - for k, v in kwargs.items(): - if isinstance(v, bool): - if v: - options.append(f'-{k}') - elif k == 'log_level': - assert v in [ - 'quiet', 'panic', 'fatal', 'error', 'warning', 'info', - 'verbose', 'debug', 'trace' - ] - options.append(f'-loglevel {v}') - else: - options.append(f'-{k} {v}') - cmd = f'ffmpeg -y {pre_options} -i {in_file} {" ".join(options)} ' \ - f'{out_file}' - if print_cmd: - print(cmd) - subprocess.call(cmd, shell=True) - - -@requires_executable('ffmpeg') -def resize_video(in_file, - out_file, - size=None, - ratio=None, - keep_ar=False, - log_level='info', - print_cmd=False): - """Resize a video. - - Args: - in_file (str): Input video filename. - out_file (str): Output video filename. - size (tuple): Expected size (w, h), eg, (320, 240) or (320, -1). - ratio (tuple or float): Expected resize ratio, (2, 0.5) means - (w*2, h*0.5). - keep_ar (bool): Whether to keep original aspect ratio. - log_level (str): Logging level of ffmpeg. - print_cmd (bool): Whether to print the final ffmpeg command. - """ - if size is None and ratio is None: - raise ValueError('expected size or ratio must be specified') - if size is not None and ratio is not None: - raise ValueError('size and ratio cannot be specified at the same time') - options = {'log_level': log_level} - if size: - if not keep_ar: - options['vf'] = f'scale={size[0]}:{size[1]}' - else: - options['vf'] = f'scale=w={size[0]}:h={size[1]}:' \ - 'force_original_aspect_ratio=decrease' - else: - if not isinstance(ratio, tuple): - ratio = (ratio, ratio) - options['vf'] = f'scale="trunc(iw*{ratio[0]}):trunc(ih*{ratio[1]})"' - convert_video(in_file, out_file, print_cmd, **options) - - -@requires_executable('ffmpeg') -def cut_video(in_file, - out_file, - start=None, - end=None, - vcodec=None, - acodec=None, - log_level='info', - print_cmd=False): - """Cut a clip from a video. - - Args: - in_file (str): Input video filename. - out_file (str): Output video filename. - start (None or float): Start time (in seconds). - end (None or float): End time (in seconds). - vcodec (None or str): Output video codec, None for unchanged. - acodec (None or str): Output audio codec, None for unchanged. - log_level (str): Logging level of ffmpeg. - print_cmd (bool): Whether to print the final ffmpeg command. - """ - options = {'log_level': log_level} - if vcodec is None: - options['vcodec'] = 'copy' - if acodec is None: - options['acodec'] = 'copy' - if start: - options['ss'] = start - else: - start = 0 - if end: - options['t'] = end - start - convert_video(in_file, out_file, print_cmd, **options) - - -@requires_executable('ffmpeg') -def concat_video(video_list, - out_file, - vcodec=None, - acodec=None, - log_level='info', - print_cmd=False): - """Concatenate multiple videos into a single one. - - Args: - video_list (list): A list of video filenames - out_file (str): Output video filename - vcodec (None or str): Output video codec, None for unchanged - acodec (None or str): Output audio codec, None for unchanged - log_level (str): Logging level of ffmpeg. - print_cmd (bool): Whether to print the final ffmpeg command. - """ - tmp_filehandler, tmp_filename = tempfile.mkstemp(suffix='.txt', text=True) - with open(tmp_filename, 'w') as f: - for filename in video_list: - f.write(f'file {osp.abspath(filename)}\n') - options = {'log_level': log_level} - if vcodec is None: - options['vcodec'] = 'copy' - if acodec is None: - options['acodec'] = 'copy' - convert_video( - tmp_filename, - out_file, - print_cmd, - pre_options='-f concat -safe 0', - **options) - os.close(tmp_filehandler) - os.remove(tmp_filename) diff --git a/spaces/knotmesh/deepset-roberta-base-squad2/README.md b/spaces/knotmesh/deepset-roberta-base-squad2/README.md deleted file mode 100644 index 23eba54373b8a9465ff2e540706060d6b84a1290..0000000000000000000000000000000000000000 --- a/spaces/knotmesh/deepset-roberta-base-squad2/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Deepset Roberta Base Squad2 -emoji: 🏢 -colorFrom: gray -colorTo: yellow -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/koajoel/PolyFormer/fairseq/examples/multilingual/data_scripts/utils/fasttext_multi_filter.py b/spaces/koajoel/PolyFormer/fairseq/examples/multilingual/data_scripts/utils/fasttext_multi_filter.py deleted file mode 100644 index 41b38ba5bef20cb043921ac61820db8689189a5a..0000000000000000000000000000000000000000 --- a/spaces/koajoel/PolyFormer/fairseq/examples/multilingual/data_scripts/utils/fasttext_multi_filter.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - - -#!/bin/python - -import fasttext -from multiprocessing import Pool -import contextlib -import sys -import argparse -from functools import partial -import io - -model = None -def init(model_path): - global model - model = fasttext.load_model(model_path) - -def pred(lines): - return lines, [model.predict(line.strip())[0][0][9:] for line in lines] - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument("--model", type=str, required=True, - help="model to load") - parser.add_argument("--inputs", nargs="+", default=['-'], - help="input files to filter") - parser.add_argument("--langs", nargs="+", required=True, - help="lang ids of each input file") - parser.add_argument("--outputs", nargs="+", default=['-'], - help="path to save lid filtered outputs") - parser.add_argument("--num-workers", type=int, metavar="N", default=10, - help="number of processes in parallel") - args = parser.parse_args() - - assert len(args.inputs) == len(args.langs) and len(args.inputs) == len(args.outputs) - - with contextlib.ExitStack() as stack: - inputs = [ - stack.enter_context(open(input, "r", encoding="utf-8", newline="\n", errors="replace")) - if input != "-" else io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8', errors="replace") - for input in args.inputs - ] - outputs = [ - stack.enter_context(open(output, "w", encoding="utf-8", newline="\n")) - if output != "-" else sys.stdout - for output in args.outputs - ] - with Pool(args.num_workers, initializer=partial(init, args.model)) as p: - skip_cnt = 0 - for lines, preds in p.imap(pred, list(zip(*inputs)), chunksize=500): - if not all(a == b for a, b in zip(preds, args.langs)): - skip_cnt += 1 - continue - for line, output_h in zip(lines, outputs): - print(line.strip(), file=output_h) - print(f"Skipped {skip_cnt} lines.") - -if __name__ == "__main__": - main() diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fastapi/staticfiles.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fastapi/staticfiles.py deleted file mode 100644 index 299015d4fef268cde91273790251f35192e1c8a6..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fastapi/staticfiles.py +++ /dev/null @@ -1 +0,0 @@ -from starlette.staticfiles import StaticFiles as StaticFiles # noqa diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/cu2qu/__main__.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/cu2qu/__main__.py deleted file mode 100644 index 084bf8f960db3d4ded95921ee9d7cbd2a7fb9f4a..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/cu2qu/__main__.py +++ /dev/null @@ -1,6 +0,0 @@ -import sys -from .cli import main - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/_c_v_a_r.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/_c_v_a_r.py deleted file mode 100644 index 6ea44dbab3b0a4b0da1e5327d077873867f0b520..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/_c_v_a_r.py +++ /dev/null @@ -1,86 +0,0 @@ -from . import DefaultTable -from fontTools.misc import sstruct -from fontTools.misc.textTools import bytesjoin -from fontTools.ttLib.tables.TupleVariation import ( - compileTupleVariationStore, - decompileTupleVariationStore, - TupleVariation, -) - - -# https://www.microsoft.com/typography/otspec/cvar.htm -# https://www.microsoft.com/typography/otspec/otvarcommonformats.htm -# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6cvar.html - -CVAR_HEADER_FORMAT = """ - > # big endian - majorVersion: H - minorVersion: H - tupleVariationCount: H - offsetToData: H -""" - -CVAR_HEADER_SIZE = sstruct.calcsize(CVAR_HEADER_FORMAT) - - -class table__c_v_a_r(DefaultTable.DefaultTable): - dependencies = ["cvt ", "fvar"] - - def __init__(self, tag=None): - DefaultTable.DefaultTable.__init__(self, tag) - self.majorVersion, self.minorVersion = 1, 0 - self.variations = [] - - def compile(self, ttFont, useSharedPoints=False): - tupleVariationCount, tuples, data = compileTupleVariationStore( - variations=[v for v in self.variations if v.hasImpact()], - pointCount=len(ttFont["cvt "].values), - axisTags=[axis.axisTag for axis in ttFont["fvar"].axes], - sharedTupleIndices={}, - useSharedPoints=useSharedPoints, - ) - header = { - "majorVersion": self.majorVersion, - "minorVersion": self.minorVersion, - "tupleVariationCount": tupleVariationCount, - "offsetToData": CVAR_HEADER_SIZE + len(tuples), - } - return b"".join([sstruct.pack(CVAR_HEADER_FORMAT, header), tuples, data]) - - def decompile(self, data, ttFont): - axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] - header = {} - sstruct.unpack(CVAR_HEADER_FORMAT, data[0:CVAR_HEADER_SIZE], header) - self.majorVersion = header["majorVersion"] - self.minorVersion = header["minorVersion"] - assert self.majorVersion == 1, self.majorVersion - self.variations = decompileTupleVariationStore( - tableTag=self.tableTag, - axisTags=axisTags, - tupleVariationCount=header["tupleVariationCount"], - pointCount=len(ttFont["cvt "].values), - sharedTuples=None, - data=data, - pos=CVAR_HEADER_SIZE, - dataPos=header["offsetToData"], - ) - - def fromXML(self, name, attrs, content, ttFont): - if name == "version": - self.majorVersion = int(attrs.get("major", "1")) - self.minorVersion = int(attrs.get("minor", "0")) - elif name == "tuple": - valueCount = len(ttFont["cvt "].values) - var = TupleVariation({}, [None] * valueCount) - self.variations.append(var) - for tupleElement in content: - if isinstance(tupleElement, tuple): - tupleName, tupleAttrs, tupleContent = tupleElement - var.fromXML(tupleName, tupleAttrs, tupleContent) - - def toXML(self, writer, ttFont): - axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] - writer.simpletag("version", major=self.majorVersion, minor=self.minorVersion) - writer.newline() - for var in self.variations: - var.toXML(writer, axisTags) diff --git a/spaces/leafShen/CodeFormer/CodeFormer/basicsr/ops/dcn/src/deform_conv_cuda.cpp b/spaces/leafShen/CodeFormer/CodeFormer/basicsr/ops/dcn/src/deform_conv_cuda.cpp deleted file mode 100644 index 5d9424908ed2dbd4ac3cdb98d13e09287a4d2f2d..0000000000000000000000000000000000000000 --- a/spaces/leafShen/CodeFormer/CodeFormer/basicsr/ops/dcn/src/deform_conv_cuda.cpp +++ /dev/null @@ -1,685 +0,0 @@ -// modify from -// https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda.c - -#include -#include - -#include -#include - -void deformable_im2col(const at::Tensor data_im, const at::Tensor data_offset, - const int channels, const int height, const int width, - const int ksize_h, const int ksize_w, const int pad_h, - const int pad_w, const int stride_h, const int stride_w, - const int dilation_h, const int dilation_w, - const int parallel_imgs, const int deformable_group, - at::Tensor data_col); - -void deformable_col2im(const at::Tensor data_col, const at::Tensor data_offset, - const int channels, const int height, const int width, - const int ksize_h, const int ksize_w, const int pad_h, - const int pad_w, const int stride_h, const int stride_w, - const int dilation_h, const int dilation_w, - const int parallel_imgs, const int deformable_group, - at::Tensor grad_im); - -void deformable_col2im_coord( - const at::Tensor data_col, const at::Tensor data_im, - const at::Tensor data_offset, const int channels, const int height, - const int width, const int ksize_h, const int ksize_w, const int pad_h, - const int pad_w, const int stride_h, const int stride_w, - const int dilation_h, const int dilation_w, const int parallel_imgs, - const int deformable_group, at::Tensor grad_offset); - -void modulated_deformable_im2col_cuda( - const at::Tensor data_im, const at::Tensor data_offset, - const at::Tensor data_mask, const int batch_size, const int channels, - const int height_im, const int width_im, const int height_col, - const int width_col, const int kernel_h, const int kenerl_w, - const int pad_h, const int pad_w, const int stride_h, const int stride_w, - const int dilation_h, const int dilation_w, const int deformable_group, - at::Tensor data_col); - -void modulated_deformable_col2im_cuda( - const at::Tensor data_col, const at::Tensor data_offset, - const at::Tensor data_mask, const int batch_size, const int channels, - const int height_im, const int width_im, const int height_col, - const int width_col, const int kernel_h, const int kenerl_w, - const int pad_h, const int pad_w, const int stride_h, const int stride_w, - const int dilation_h, const int dilation_w, const int deformable_group, - at::Tensor grad_im); - -void modulated_deformable_col2im_coord_cuda( - const at::Tensor data_col, const at::Tensor data_im, - const at::Tensor data_offset, const at::Tensor data_mask, - const int batch_size, const int channels, const int height_im, - const int width_im, const int height_col, const int width_col, - const int kernel_h, const int kenerl_w, const int pad_h, const int pad_w, - const int stride_h, const int stride_w, const int dilation_h, - const int dilation_w, const int deformable_group, at::Tensor grad_offset, - at::Tensor grad_mask); - -void shape_check(at::Tensor input, at::Tensor offset, at::Tensor *gradOutput, - at::Tensor weight, int kH, int kW, int dH, int dW, int padH, - int padW, int dilationH, int dilationW, int group, - int deformable_group) { - TORCH_CHECK(weight.ndimension() == 4, - "4D weight tensor (nOutputPlane,nInputPlane,kH,kW) expected, " - "but got: %s", - weight.ndimension()); - - TORCH_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous"); - - TORCH_CHECK(kW > 0 && kH > 0, - "kernel size should be greater than zero, but got kH: %d kW: %d", kH, - kW); - - TORCH_CHECK((weight.size(2) == kH && weight.size(3) == kW), - "kernel size should be consistent with weight, ", - "but got kH: %d kW: %d weight.size(2): %d, weight.size(3): %d", kH, - kW, weight.size(2), weight.size(3)); - - TORCH_CHECK(dW > 0 && dH > 0, - "stride should be greater than zero, but got dH: %d dW: %d", dH, dW); - - TORCH_CHECK( - dilationW > 0 && dilationH > 0, - "dilation should be greater than 0, but got dilationH: %d dilationW: %d", - dilationH, dilationW); - - int ndim = input.ndimension(); - int dimf = 0; - int dimh = 1; - int dimw = 2; - - if (ndim == 4) { - dimf++; - dimh++; - dimw++; - } - - TORCH_CHECK(ndim == 3 || ndim == 4, "3D or 4D input tensor expected but got: %s", - ndim); - - long nInputPlane = weight.size(1) * group; - long inputHeight = input.size(dimh); - long inputWidth = input.size(dimw); - long nOutputPlane = weight.size(0); - long outputHeight = - (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; - long outputWidth = - (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; - - TORCH_CHECK(nInputPlane % deformable_group == 0, - "input channels must divide deformable group size"); - - if (outputWidth < 1 || outputHeight < 1) - AT_ERROR( - "Given input size: (%ld x %ld x %ld). " - "Calculated output size: (%ld x %ld x %ld). Output size is too small", - nInputPlane, inputHeight, inputWidth, nOutputPlane, outputHeight, - outputWidth); - - TORCH_CHECK(input.size(1) == nInputPlane, - "invalid number of input planes, expected: %d, but got: %d", - nInputPlane, input.size(1)); - - TORCH_CHECK((inputHeight >= kH && inputWidth >= kW), - "input image is smaller than kernel"); - - TORCH_CHECK((offset.size(2) == outputHeight && offset.size(3) == outputWidth), - "invalid spatial size of offset, expected height: %d width: %d, but " - "got height: %d width: %d", - outputHeight, outputWidth, offset.size(2), offset.size(3)); - - TORCH_CHECK((offset.size(1) == deformable_group * 2 * kH * kW), - "invalid number of channels of offset"); - - if (gradOutput != NULL) { - TORCH_CHECK(gradOutput->size(dimf) == nOutputPlane, - "invalid number of gradOutput planes, expected: %d, but got: %d", - nOutputPlane, gradOutput->size(dimf)); - - TORCH_CHECK((gradOutput->size(dimh) == outputHeight && - gradOutput->size(dimw) == outputWidth), - "invalid size of gradOutput, expected height: %d width: %d , but " - "got height: %d width: %d", - outputHeight, outputWidth, gradOutput->size(dimh), - gradOutput->size(dimw)); - } -} - -int deform_conv_forward_cuda(at::Tensor input, at::Tensor weight, - at::Tensor offset, at::Tensor output, - at::Tensor columns, at::Tensor ones, int kW, - int kH, int dW, int dH, int padW, int padH, - int dilationW, int dilationH, int group, - int deformable_group, int im2col_step) { - // todo: resize columns to include im2col: done - // todo: add im2col_step as input - // todo: add new output buffer and transpose it to output (or directly - // transpose output) todo: possibly change data indexing because of - // parallel_imgs - - shape_check(input, offset, NULL, weight, kH, kW, dH, dW, padH, padW, - dilationH, dilationW, group, deformable_group); - at::DeviceGuard guard(input.device()); - - input = input.contiguous(); - offset = offset.contiguous(); - weight = weight.contiguous(); - - int batch = 1; - if (input.ndimension() == 3) { - // Force batch - batch = 0; - input.unsqueeze_(0); - offset.unsqueeze_(0); - } - - // todo: assert batchsize dividable by im2col_step - - long batchSize = input.size(0); - long nInputPlane = input.size(1); - long inputHeight = input.size(2); - long inputWidth = input.size(3); - - long nOutputPlane = weight.size(0); - - long outputWidth = - (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; - long outputHeight = - (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; - - TORCH_CHECK((offset.size(0) == batchSize), "invalid batch size of offset"); - - output = output.view({batchSize / im2col_step, im2col_step, nOutputPlane, - outputHeight, outputWidth}); - columns = at::zeros( - {nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth}, - input.options()); - - if (ones.ndimension() != 2 || - ones.size(0) * ones.size(1) < outputHeight * outputWidth) { - ones = at::ones({outputHeight, outputWidth}, input.options()); - } - - input = input.view({batchSize / im2col_step, im2col_step, nInputPlane, - inputHeight, inputWidth}); - offset = - offset.view({batchSize / im2col_step, im2col_step, - deformable_group * 2 * kH * kW, outputHeight, outputWidth}); - - at::Tensor output_buffer = - at::zeros({batchSize / im2col_step, nOutputPlane, - im2col_step * outputHeight, outputWidth}, - output.options()); - - output_buffer = output_buffer.view( - {output_buffer.size(0), group, output_buffer.size(1) / group, - output_buffer.size(2), output_buffer.size(3)}); - - for (int elt = 0; elt < batchSize / im2col_step; elt++) { - deformable_im2col(input[elt], offset[elt], nInputPlane, inputHeight, - inputWidth, kH, kW, padH, padW, dH, dW, dilationH, - dilationW, im2col_step, deformable_group, columns); - - columns = columns.view({group, columns.size(0) / group, columns.size(1)}); - weight = weight.view({group, weight.size(0) / group, weight.size(1), - weight.size(2), weight.size(3)}); - - for (int g = 0; g < group; g++) { - output_buffer[elt][g] = output_buffer[elt][g] - .flatten(1) - .addmm_(weight[g].flatten(1), columns[g]) - .view_as(output_buffer[elt][g]); - } - } - - output_buffer = output_buffer.view( - {output_buffer.size(0), output_buffer.size(1) * output_buffer.size(2), - output_buffer.size(3), output_buffer.size(4)}); - - output_buffer = output_buffer.view({batchSize / im2col_step, nOutputPlane, - im2col_step, outputHeight, outputWidth}); - output_buffer.transpose_(1, 2); - output.copy_(output_buffer); - output = output.view({batchSize, nOutputPlane, outputHeight, outputWidth}); - - input = input.view({batchSize, nInputPlane, inputHeight, inputWidth}); - offset = offset.view( - {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); - - if (batch == 0) { - output = output.view({nOutputPlane, outputHeight, outputWidth}); - input = input.view({nInputPlane, inputHeight, inputWidth}); - offset = offset.view({offset.size(1), offset.size(2), offset.size(3)}); - } - - return 1; -} - -int deform_conv_backward_input_cuda(at::Tensor input, at::Tensor offset, - at::Tensor gradOutput, at::Tensor gradInput, - at::Tensor gradOffset, at::Tensor weight, - at::Tensor columns, int kW, int kH, int dW, - int dH, int padW, int padH, int dilationW, - int dilationH, int group, - int deformable_group, int im2col_step) { - shape_check(input, offset, &gradOutput, weight, kH, kW, dH, dW, padH, padW, - dilationH, dilationW, group, deformable_group); - at::DeviceGuard guard(input.device()); - - input = input.contiguous(); - offset = offset.contiguous(); - gradOutput = gradOutput.contiguous(); - weight = weight.contiguous(); - - int batch = 1; - - if (input.ndimension() == 3) { - // Force batch - batch = 0; - input = input.view({1, input.size(0), input.size(1), input.size(2)}); - offset = offset.view({1, offset.size(0), offset.size(1), offset.size(2)}); - gradOutput = gradOutput.view( - {1, gradOutput.size(0), gradOutput.size(1), gradOutput.size(2)}); - } - - long batchSize = input.size(0); - long nInputPlane = input.size(1); - long inputHeight = input.size(2); - long inputWidth = input.size(3); - - long nOutputPlane = weight.size(0); - - long outputWidth = - (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; - long outputHeight = - (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; - - TORCH_CHECK((offset.size(0) == batchSize), 3, "invalid batch size of offset"); - gradInput = gradInput.view({batchSize, nInputPlane, inputHeight, inputWidth}); - columns = at::zeros( - {nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth}, - input.options()); - - // change order of grad output - gradOutput = gradOutput.view({batchSize / im2col_step, im2col_step, - nOutputPlane, outputHeight, outputWidth}); - gradOutput.transpose_(1, 2); - - gradInput = gradInput.view({batchSize / im2col_step, im2col_step, nInputPlane, - inputHeight, inputWidth}); - input = input.view({batchSize / im2col_step, im2col_step, nInputPlane, - inputHeight, inputWidth}); - gradOffset = gradOffset.view({batchSize / im2col_step, im2col_step, - deformable_group * 2 * kH * kW, outputHeight, - outputWidth}); - offset = - offset.view({batchSize / im2col_step, im2col_step, - deformable_group * 2 * kH * kW, outputHeight, outputWidth}); - - for (int elt = 0; elt < batchSize / im2col_step; elt++) { - // divide into groups - columns = columns.view({group, columns.size(0) / group, columns.size(1)}); - weight = weight.view({group, weight.size(0) / group, weight.size(1), - weight.size(2), weight.size(3)}); - gradOutput = gradOutput.view( - {gradOutput.size(0), group, gradOutput.size(1) / group, - gradOutput.size(2), gradOutput.size(3), gradOutput.size(4)}); - - for (int g = 0; g < group; g++) { - columns[g] = columns[g].addmm_(weight[g].flatten(1).transpose(0, 1), - gradOutput[elt][g].flatten(1), 0.0f, 1.0f); - } - - columns = - columns.view({columns.size(0) * columns.size(1), columns.size(2)}); - gradOutput = gradOutput.view( - {gradOutput.size(0), gradOutput.size(1) * gradOutput.size(2), - gradOutput.size(3), gradOutput.size(4), gradOutput.size(5)}); - - deformable_col2im_coord(columns, input[elt], offset[elt], nInputPlane, - inputHeight, inputWidth, kH, kW, padH, padW, dH, dW, - dilationH, dilationW, im2col_step, deformable_group, - gradOffset[elt]); - - deformable_col2im(columns, offset[elt], nInputPlane, inputHeight, - inputWidth, kH, kW, padH, padW, dH, dW, dilationH, - dilationW, im2col_step, deformable_group, gradInput[elt]); - } - - gradOutput.transpose_(1, 2); - gradOutput = - gradOutput.view({batchSize, nOutputPlane, outputHeight, outputWidth}); - - gradInput = gradInput.view({batchSize, nInputPlane, inputHeight, inputWidth}); - input = input.view({batchSize, nInputPlane, inputHeight, inputWidth}); - gradOffset = gradOffset.view( - {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); - offset = offset.view( - {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); - - if (batch == 0) { - gradOutput = gradOutput.view({nOutputPlane, outputHeight, outputWidth}); - input = input.view({nInputPlane, inputHeight, inputWidth}); - gradInput = gradInput.view({nInputPlane, inputHeight, inputWidth}); - offset = offset.view({offset.size(1), offset.size(2), offset.size(3)}); - gradOffset = - gradOffset.view({offset.size(1), offset.size(2), offset.size(3)}); - } - - return 1; -} - -int deform_conv_backward_parameters_cuda( - at::Tensor input, at::Tensor offset, at::Tensor gradOutput, - at::Tensor gradWeight, // at::Tensor gradBias, - at::Tensor columns, at::Tensor ones, int kW, int kH, int dW, int dH, - int padW, int padH, int dilationW, int dilationH, int group, - int deformable_group, float scale, int im2col_step) { - // todo: transpose and reshape outGrad - // todo: reshape columns - // todo: add im2col_step as input - - shape_check(input, offset, &gradOutput, gradWeight, kH, kW, dH, dW, padH, - padW, dilationH, dilationW, group, deformable_group); - at::DeviceGuard guard(input.device()); - - input = input.contiguous(); - offset = offset.contiguous(); - gradOutput = gradOutput.contiguous(); - - int batch = 1; - - if (input.ndimension() == 3) { - // Force batch - batch = 0; - input = input.view( - at::IntList({1, input.size(0), input.size(1), input.size(2)})); - gradOutput = gradOutput.view( - {1, gradOutput.size(0), gradOutput.size(1), gradOutput.size(2)}); - } - - long batchSize = input.size(0); - long nInputPlane = input.size(1); - long inputHeight = input.size(2); - long inputWidth = input.size(3); - - long nOutputPlane = gradWeight.size(0); - - long outputWidth = - (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; - long outputHeight = - (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; - - TORCH_CHECK((offset.size(0) == batchSize), "invalid batch size of offset"); - - columns = at::zeros( - {nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth}, - input.options()); - - gradOutput = gradOutput.view({batchSize / im2col_step, im2col_step, - nOutputPlane, outputHeight, outputWidth}); - gradOutput.transpose_(1, 2); - - at::Tensor gradOutputBuffer = at::zeros_like(gradOutput); - gradOutputBuffer = - gradOutputBuffer.view({batchSize / im2col_step, nOutputPlane, im2col_step, - outputHeight, outputWidth}); - gradOutputBuffer.copy_(gradOutput); - gradOutputBuffer = - gradOutputBuffer.view({batchSize / im2col_step, nOutputPlane, - im2col_step * outputHeight, outputWidth}); - - gradOutput.transpose_(1, 2); - gradOutput = - gradOutput.view({batchSize, nOutputPlane, outputHeight, outputWidth}); - - input = input.view({batchSize / im2col_step, im2col_step, nInputPlane, - inputHeight, inputWidth}); - offset = - offset.view({batchSize / im2col_step, im2col_step, - deformable_group * 2 * kH * kW, outputHeight, outputWidth}); - - for (int elt = 0; elt < batchSize / im2col_step; elt++) { - deformable_im2col(input[elt], offset[elt], nInputPlane, inputHeight, - inputWidth, kH, kW, padH, padW, dH, dW, dilationH, - dilationW, im2col_step, deformable_group, columns); - - // divide into group - gradOutputBuffer = gradOutputBuffer.view( - {gradOutputBuffer.size(0), group, gradOutputBuffer.size(1) / group, - gradOutputBuffer.size(2), gradOutputBuffer.size(3)}); - columns = columns.view({group, columns.size(0) / group, columns.size(1)}); - gradWeight = - gradWeight.view({group, gradWeight.size(0) / group, gradWeight.size(1), - gradWeight.size(2), gradWeight.size(3)}); - - for (int g = 0; g < group; g++) { - gradWeight[g] = gradWeight[g] - .flatten(1) - .addmm_(gradOutputBuffer[elt][g].flatten(1), - columns[g].transpose(1, 0), 1.0, scale) - .view_as(gradWeight[g]); - } - gradOutputBuffer = gradOutputBuffer.view( - {gradOutputBuffer.size(0), - gradOutputBuffer.size(1) * gradOutputBuffer.size(2), - gradOutputBuffer.size(3), gradOutputBuffer.size(4)}); - columns = - columns.view({columns.size(0) * columns.size(1), columns.size(2)}); - gradWeight = gradWeight.view({gradWeight.size(0) * gradWeight.size(1), - gradWeight.size(2), gradWeight.size(3), - gradWeight.size(4)}); - } - - input = input.view({batchSize, nInputPlane, inputHeight, inputWidth}); - offset = offset.view( - {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); - - if (batch == 0) { - gradOutput = gradOutput.view({nOutputPlane, outputHeight, outputWidth}); - input = input.view({nInputPlane, inputHeight, inputWidth}); - } - - return 1; -} - -void modulated_deform_conv_cuda_forward( - at::Tensor input, at::Tensor weight, at::Tensor bias, at::Tensor ones, - at::Tensor offset, at::Tensor mask, at::Tensor output, at::Tensor columns, - int kernel_h, int kernel_w, const int stride_h, const int stride_w, - const int pad_h, const int pad_w, const int dilation_h, - const int dilation_w, const int group, const int deformable_group, - const bool with_bias) { - TORCH_CHECK(input.is_contiguous(), "input tensor has to be contiguous"); - TORCH_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous"); - at::DeviceGuard guard(input.device()); - - const int batch = input.size(0); - const int channels = input.size(1); - const int height = input.size(2); - const int width = input.size(3); - - const int channels_out = weight.size(0); - const int channels_kernel = weight.size(1); - const int kernel_h_ = weight.size(2); - const int kernel_w_ = weight.size(3); - - if (kernel_h_ != kernel_h || kernel_w_ != kernel_w) - AT_ERROR("Input shape and kernel shape wont match: (%d x %d vs %d x %d).", - kernel_h_, kernel_w, kernel_h_, kernel_w_); - if (channels != channels_kernel * group) - AT_ERROR("Input shape and kernel channels wont match: (%d vs %d).", - channels, channels_kernel * group); - - const int height_out = - (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; - const int width_out = - (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; - - if (ones.ndimension() != 2 || - ones.size(0) * ones.size(1) < height_out * width_out) { - // Resize plane and fill with ones... - ones = at::ones({height_out, width_out}, input.options()); - } - - // resize output - output = output.view({batch, channels_out, height_out, width_out}).zero_(); - // resize temporary columns - columns = - at::zeros({channels * kernel_h * kernel_w, 1 * height_out * width_out}, - input.options()); - - output = output.view({output.size(0), group, output.size(1) / group, - output.size(2), output.size(3)}); - - for (int b = 0; b < batch; b++) { - modulated_deformable_im2col_cuda( - input[b], offset[b], mask[b], 1, channels, height, width, height_out, - width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, - dilation_h, dilation_w, deformable_group, columns); - - // divide into group - weight = weight.view({group, weight.size(0) / group, weight.size(1), - weight.size(2), weight.size(3)}); - columns = columns.view({group, columns.size(0) / group, columns.size(1)}); - - for (int g = 0; g < group; g++) { - output[b][g] = output[b][g] - .flatten(1) - .addmm_(weight[g].flatten(1), columns[g]) - .view_as(output[b][g]); - } - - weight = weight.view({weight.size(0) * weight.size(1), weight.size(2), - weight.size(3), weight.size(4)}); - columns = - columns.view({columns.size(0) * columns.size(1), columns.size(2)}); - } - - output = output.view({output.size(0), output.size(1) * output.size(2), - output.size(3), output.size(4)}); - - if (with_bias) { - output += bias.view({1, bias.size(0), 1, 1}); - } -} - -void modulated_deform_conv_cuda_backward( - at::Tensor input, at::Tensor weight, at::Tensor bias, at::Tensor ones, - at::Tensor offset, at::Tensor mask, at::Tensor columns, - at::Tensor grad_input, at::Tensor grad_weight, at::Tensor grad_bias, - at::Tensor grad_offset, at::Tensor grad_mask, at::Tensor grad_output, - int kernel_h, int kernel_w, int stride_h, int stride_w, int pad_h, - int pad_w, int dilation_h, int dilation_w, int group, int deformable_group, - const bool with_bias) { - TORCH_CHECK(input.is_contiguous(), "input tensor has to be contiguous"); - TORCH_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous"); - at::DeviceGuard guard(input.device()); - - const int batch = input.size(0); - const int channels = input.size(1); - const int height = input.size(2); - const int width = input.size(3); - - const int channels_kernel = weight.size(1); - const int kernel_h_ = weight.size(2); - const int kernel_w_ = weight.size(3); - if (kernel_h_ != kernel_h || kernel_w_ != kernel_w) - AT_ERROR("Input shape and kernel shape wont match: (%d x %d vs %d x %d).", - kernel_h_, kernel_w, kernel_h_, kernel_w_); - if (channels != channels_kernel * group) - AT_ERROR("Input shape and kernel channels wont match: (%d vs %d).", - channels, channels_kernel * group); - - const int height_out = - (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; - const int width_out = - (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; - - if (ones.ndimension() != 2 || - ones.size(0) * ones.size(1) < height_out * width_out) { - // Resize plane and fill with ones... - ones = at::ones({height_out, width_out}, input.options()); - } - - grad_input = grad_input.view({batch, channels, height, width}); - columns = at::zeros({channels * kernel_h * kernel_w, height_out * width_out}, - input.options()); - - grad_output = - grad_output.view({grad_output.size(0), group, grad_output.size(1) / group, - grad_output.size(2), grad_output.size(3)}); - - for (int b = 0; b < batch; b++) { - // divide int group - columns = columns.view({group, columns.size(0) / group, columns.size(1)}); - weight = weight.view({group, weight.size(0) / group, weight.size(1), - weight.size(2), weight.size(3)}); - - for (int g = 0; g < group; g++) { - columns[g].addmm_(weight[g].flatten(1).transpose(0, 1), - grad_output[b][g].flatten(1), 0.0f, 1.0f); - } - - columns = - columns.view({columns.size(0) * columns.size(1), columns.size(2)}); - weight = weight.view({weight.size(0) * weight.size(1), weight.size(2), - weight.size(3), weight.size(4)}); - - // gradient w.r.t. input coordinate data - modulated_deformable_col2im_coord_cuda( - columns, input[b], offset[b], mask[b], 1, channels, height, width, - height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, - stride_w, dilation_h, dilation_w, deformable_group, grad_offset[b], - grad_mask[b]); - // gradient w.r.t. input data - modulated_deformable_col2im_cuda( - columns, offset[b], mask[b], 1, channels, height, width, height_out, - width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, - dilation_h, dilation_w, deformable_group, grad_input[b]); - - // gradient w.r.t. weight, dWeight should accumulate across the batch and - // group - modulated_deformable_im2col_cuda( - input[b], offset[b], mask[b], 1, channels, height, width, height_out, - width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, - dilation_h, dilation_w, deformable_group, columns); - - columns = columns.view({group, columns.size(0) / group, columns.size(1)}); - grad_weight = grad_weight.view({group, grad_weight.size(0) / group, - grad_weight.size(1), grad_weight.size(2), - grad_weight.size(3)}); - if (with_bias) - grad_bias = grad_bias.view({group, grad_bias.size(0) / group}); - - for (int g = 0; g < group; g++) { - grad_weight[g] = - grad_weight[g] - .flatten(1) - .addmm_(grad_output[b][g].flatten(1), columns[g].transpose(0, 1)) - .view_as(grad_weight[g]); - if (with_bias) { - grad_bias[g] = - grad_bias[g] - .view({-1, 1}) - .addmm_(grad_output[b][g].flatten(1), ones.view({-1, 1})) - .view(-1); - } - } - - columns = - columns.view({columns.size(0) * columns.size(1), columns.size(2)}); - grad_weight = grad_weight.view({grad_weight.size(0) * grad_weight.size(1), - grad_weight.size(2), grad_weight.size(3), - grad_weight.size(4)}); - if (with_bias) - grad_bias = grad_bias.view({grad_bias.size(0) * grad_bias.size(1)}); - } - grad_output = grad_output.view({grad_output.size(0) * grad_output.size(1), - grad_output.size(2), grad_output.size(3), - grad_output.size(4)}); -} diff --git a/spaces/lewisrxliu/3.3/Config.py b/spaces/lewisrxliu/3.3/Config.py deleted file mode 100644 index 1351583e89383ddd503ccfa27bc5ab8a57548250..0000000000000000000000000000000000000000 --- a/spaces/lewisrxliu/3.3/Config.py +++ /dev/null @@ -1,3 +0,0 @@ -#config API key - -API_KEY = "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" diff --git a/spaces/lightli/bingo-newbing/tests/kblob.ts b/spaces/lightli/bingo-newbing/tests/kblob.ts deleted file mode 100644 index 9e15b41c1c94a690beb61b23cdb42fc78767ccd2..0000000000000000000000000000000000000000 --- a/spaces/lightli/bingo-newbing/tests/kblob.ts +++ /dev/null @@ -1,27 +0,0 @@ -import FormData from 'form-data' - -import { fetch } from '@/lib/isomorphic' - -const formData = new FormData() - -const knowledgeRequest = {"imageInfo":{"url":"https://www.baidu.com/img/PCfb_5bf082d29588c07f842ccde3f97243ea.png"},"knowledgeRequest":{"invokedSkills":["ImageById"],"subscriptionId":"Bing.Chat.Multimodal","invokedSkillsRequestData":{"enableFaceBlur":true},"convoData":{"convoid":"51D|BingProdUnAuthenticatedUsers|E3DCA904FF236C67C3450163BCEC64CFF3F618CC8A4AFD75FD518F5ED0ADA080","convotone":"Creative"}}} - -formData.append('knowledgeRequest', JSON.stringify(knowledgeRequest)) - - -fetch('https://bing.vcanbb.top/images/kblob', - { - method: 'POST', - body: formData.getBuffer(), - headers: { - "sec-ch-ua": "\"Not/A)Brand\";v=\"99\", \"Google Chrome\";v=\"115\", \"Chromium\";v=\"115\"", - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": "\"Windows\"", - "Referer": "https://bing.vcanbb.top/web/index.html", - "Referrer-Policy": "origin-when-cross-origin", - ...formData.getHeaders() - } - - } -).then(res => res.text()) -.then(res => console.log('res', res)) diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Bldc Motor Design Software _VERIFIED_ Download Free.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Bldc Motor Design Software _VERIFIED_ Download Free.md deleted file mode 100644 index 7d92bb8dc702f504c3674cfcaaa5b6112d8d8d20..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Bldc Motor Design Software _VERIFIED_ Download Free.md +++ /dev/null @@ -1,6 +0,0 @@ -

        Bldc Motor Design Software Download Free


        Download Zip ►►► https://bytlly.com/2uGx1o



        - -I upgraded my cheap chinese CNC with end swithes following this design: https://www.thingiverse.com/thing:2796202 But… | Download free 3D printable STL ... 1fdad05405
        -
        -
        -

        diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Dragon Ball Z Budokai Tenkaichi 3 Psp Iso Torrent.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Dragon Ball Z Budokai Tenkaichi 3 Psp Iso Torrent.md deleted file mode 100644 index 8ed77b5b85e0389a30ca31493682752fc0409f2e..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Dragon Ball Z Budokai Tenkaichi 3 Psp Iso Torrent.md +++ /dev/null @@ -1,54 +0,0 @@ -

        dragon ball z budokai tenkaichi 3 psp iso torrent


        DOWNLOAD ––– https://bytlly.com/2uGx5h



        - -Italian: - - e se si vede quel cuore di ratto cosa dici dell'esempio del vento di ultima serata che fatto gliccio di banana e di quello di galagos e il tsukisani è questa l'astratta versione e - - Quindi tu sai che c'è un che c'è una cosa che è - - Devo fare la scommessa che non - - ci sono nessuno che vuole dirmelo a nessuno - - E' più difficile per me - - Semplicemente non puoi fare questo - - Forma che è necessario - - Tutti in cerco di farlo giusto - - Dovevi farlo perché l'odore di questi programmi non rilascia l'odore finché non li scorrono tutti sopra o tutti abbiamo il più - - venti e - - Questo è un ragazzo che mi ha detto questo e - - Ha fatto questo con solo un gesto - - e così il mio inglese non lo parla cosa mi dicono così cose così - -Spanish: - - - -French: - - et si vous voyez ce cœur de rat qu'en dit-il de l'exemple du vent d'hier soir qui a fait glicère de banane et de celui des galagos et de tsukisani c'est cette version extraterrestre et - - Alors tu sais que c'est un qu'il y a un truc qui est - - Dois-je faire le pari qu'il n'y a personne qui veut me dire ça à personne - - C'est plus dur pour moi - - Vous ne pouvez simplement pas faire ça - - Forme qui est nécessaire - - Tous en quête de cela tout le monde - - Tu devais le faire parce que le goût de ces programmes ne libère pas le 4fefd39f24
        -
        -
        -

        diff --git a/spaces/lint/sdpipe_webui/README.md b/spaces/lint/sdpipe_webui/README.md deleted file mode 100644 index 4ec5b2f2831bc843394f2c948a2c272f6c3ba898..0000000000000000000000000000000000000000 --- a/spaces/lint/sdpipe_webui/README.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: Sdpipe Webui -emoji: 🍌 -colorFrom: gray -colorTo: green -sdk: gradio -sdk_version: 3.16.2 -app_file: app.py -pinned: false -license: openrail ---- - -# **Stable Diffusion Pipeline Web UI** - -Stable Diffusion WebUI with first class support for HuggingFace Diffusers Pipelines and Diffusion Schedulers, made in the style of Automatic1111's WebUI and Evel_Space. - -Supports Huggingface `Text-to-Image`, `Image to Image`, and `Inpainting` pipelines, with fast switching between pipeline modes by reusing loaded model weights already in memory. - -Install requirements with `pip install -r requirements.txt` - -Run with `python app.py` diff --git a/spaces/lllqqq/so-vits-svc-models-pcr/vencoder/whisper/model.py b/spaces/lllqqq/so-vits-svc-models-pcr/vencoder/whisper/model.py deleted file mode 100644 index cb3781c17a1e78a33bf62246e5134e8512206d0d..0000000000000000000000000000000000000000 --- a/spaces/lllqqq/so-vits-svc-models-pcr/vencoder/whisper/model.py +++ /dev/null @@ -1,269 +0,0 @@ -from dataclasses import dataclass -from typing import Dict -from typing import Iterable, Optional - -import numpy as np -import torch -import torch.nn.functional as F -from torch import Tensor -from torch import nn - -from .decoding import detect_language as detect_language_function, decode as decode_function - - -@dataclass -class ModelDimensions: - n_mels: int - n_audio_ctx: int - n_audio_state: int - n_audio_head: int - n_audio_layer: int - n_vocab: int - n_text_ctx: int - n_text_state: int - n_text_head: int - n_text_layer: int - - -class LayerNorm(nn.LayerNorm): - def forward(self, x: Tensor) -> Tensor: - return super().forward(x.float()).type(x.dtype) - - -class Linear(nn.Linear): - def forward(self, x: Tensor) -> Tensor: - return F.linear( - x, self.weight.to(x.dtype), None if self.bias is None else self.bias.to(x.dtype) - ) - - -class Conv1d(nn.Conv1d): - def _conv_forward(self, x: Tensor, weight: Tensor, bias: Optional[Tensor]) -> Tensor: - return super()._conv_forward( - x, weight.to(x.dtype), None if bias is None else bias.to(x.dtype) - ) - - -def sinusoids(length, channels, max_timescale=10000): - """Returns sinusoids for positional embedding""" - assert channels % 2 == 0 - log_timescale_increment = np.log(max_timescale) / (channels // 2 - 1) - inv_timescales = torch.exp(-log_timescale_increment * torch.arange(channels // 2)) - scaled_time = torch.arange(length)[:, np.newaxis] * inv_timescales[np.newaxis, :] - return torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1) - - -class MultiHeadAttention(nn.Module): - def __init__(self, n_state: int, n_head: int): - super().__init__() - self.n_head = n_head - self.query = Linear(n_state, n_state) - self.key = Linear(n_state, n_state, bias=False) - self.value = Linear(n_state, n_state) - self.out = Linear(n_state, n_state) - - def forward( - self, - x: Tensor, - xa: Optional[Tensor] = None, - mask: Optional[Tensor] = None, - kv_cache: Optional[dict] = None, - ): - q = self.query(x) - - if kv_cache is None or xa is None or self.key not in kv_cache: - # hooks, if installed (i.e. kv_cache is not None), will prepend the cached kv tensors; - # otherwise, perform key/value projections for self- or cross-attention as usual. - k = self.key(x if xa is None else xa) - v = self.value(x if xa is None else xa) - else: - # for cross-attention, calculate keys and values once and reuse in subsequent calls. - k = kv_cache[self.key] - v = kv_cache[self.value] - - wv, qk = self.qkv_attention(q, k, v, mask) - return self.out(wv), qk - - def qkv_attention(self, q: Tensor, k: Tensor, v: Tensor, mask: Optional[Tensor] = None): - n_batch, n_ctx, n_state = q.shape - scale = (n_state // self.n_head) ** -0.25 - q = q.view(*q.shape[:2], self.n_head, -1).permute(0, 2, 1, 3) * scale - k = k.view(*k.shape[:2], self.n_head, -1).permute(0, 2, 3, 1) * scale - v = v.view(*v.shape[:2], self.n_head, -1).permute(0, 2, 1, 3) - - qk = q @ k - if mask is not None: - qk = qk + mask[:n_ctx, :n_ctx] - qk = qk.float() - - w = F.softmax(qk, dim=-1).to(q.dtype) - return (w @ v).permute(0, 2, 1, 3).flatten(start_dim=2), qk.detach() - - -class ResidualAttentionBlock(nn.Module): - def __init__(self, n_state: int, n_head: int, cross_attention: bool = False): - super().__init__() - - self.attn = MultiHeadAttention(n_state, n_head) - self.attn_ln = LayerNorm(n_state) - - self.cross_attn = MultiHeadAttention(n_state, n_head) if cross_attention else None - self.cross_attn_ln = LayerNorm(n_state) if cross_attention else None - - n_mlp = n_state * 4 - self.mlp = nn.Sequential(Linear(n_state, n_mlp), nn.GELU(), Linear(n_mlp, n_state)) - self.mlp_ln = LayerNorm(n_state) - - def forward( - self, - x: Tensor, - xa: Optional[Tensor] = None, - mask: Optional[Tensor] = None, - kv_cache: Optional[dict] = None, - ): - x = x + self.attn(self.attn_ln(x), mask=mask, kv_cache=kv_cache)[0] - if self.cross_attn: - x = x + self.cross_attn(self.cross_attn_ln(x), xa, kv_cache=kv_cache)[0] - x = x + self.mlp(self.mlp_ln(x)) - return x - - -class AudioEncoder(nn.Module): - def __init__(self, n_mels: int, n_ctx: int, n_state: int, n_head: int, n_layer: int): - super().__init__() - self.conv1 = Conv1d(n_mels, n_state, kernel_size=3, padding=1) - self.conv2 = Conv1d(n_state, n_state, kernel_size=3, stride=2, padding=1) - self.register_buffer("positional_embedding", sinusoids(n_ctx, n_state)) - - self.blocks: Iterable[ResidualAttentionBlock] = nn.ModuleList( - [ResidualAttentionBlock(n_state, n_head) for _ in range(n_layer)] - ) - self.ln_post = LayerNorm(n_state) - - def forward(self, x: Tensor): - """ - x : torch.Tensor, shape = (batch_size, n_mels, n_ctx) - the mel spectrogram of the audio - """ - x = F.gelu(self.conv1(x)) - x = F.gelu(self.conv2(x)) - x = x.permute(0, 2, 1) - - len_x = x.shape[1] - len_e = self.positional_embedding.shape[0] - assert len_x <= len_e, "incorrect audio shape" - pos_e = self.positional_embedding[:len_x, :] - x = (x + pos_e).to(x.dtype) - - for block in self.blocks: - x = block(x) - - x = self.ln_post(x) - return x - - -class TextDecoder(nn.Module): - def __init__(self, n_vocab: int, n_ctx: int, n_state: int, n_head: int, n_layer: int): - super().__init__() - - self.token_embedding = nn.Embedding(n_vocab, n_state) - self.positional_embedding = nn.Parameter(torch.empty(n_ctx, n_state)) - - self.blocks: Iterable[ResidualAttentionBlock] = nn.ModuleList( - [ResidualAttentionBlock(n_state, n_head, cross_attention=True) for _ in range(n_layer)] - ) - self.ln = LayerNorm(n_state) - - mask = torch.empty(n_ctx, n_ctx).fill_(-np.inf).triu_(1) - self.register_buffer("mask", mask, persistent=False) - - def forward(self, x: Tensor, xa: Tensor, kv_cache: Optional[dict] = None): - """ - x : torch.LongTensor, shape = (batch_size, <= n_ctx) - the text tokens - xa : torch.Tensor, shape = (batch_size, n_mels, n_audio_ctx) - the encoded audio features to be attended on - """ - offset = next(iter(kv_cache.values())).shape[1] if kv_cache else 0 - x = self.token_embedding(x) + self.positional_embedding[offset : offset + x.shape[-1]] - x = x.to(xa.dtype) - - for block in self.blocks: - x = block(x, xa, mask=self.mask, kv_cache=kv_cache) - - x = self.ln(x) - logits = (x @ torch.transpose(self.token_embedding.weight.to(x.dtype), 0, 1)).float() - - return logits - - -class Whisper(nn.Module): - def __init__(self, dims: ModelDimensions): - super().__init__() - self.dims = dims - self.encoder = AudioEncoder( - self.dims.n_mels, - self.dims.n_audio_ctx, - self.dims.n_audio_state, - self.dims.n_audio_head, - self.dims.n_audio_layer, - ) - self.decoder = TextDecoder( - self.dims.n_vocab, - self.dims.n_text_ctx, - self.dims.n_text_state, - self.dims.n_text_head, - self.dims.n_text_layer, - ) - - def embed_audio(self, mel: torch.Tensor): - return self.encoder(mel) - - def logits(self, tokens: torch.Tensor, audio_features: torch.Tensor): - return self.decoder(tokens, audio_features) - - def forward(self, mel: torch.Tensor, tokens: torch.Tensor) -> Dict[str, torch.Tensor]: - return self.decoder(tokens, self.encoder(mel)) - - @property - def device(self): - return next(self.parameters()).device - - @property - def is_multilingual(self): - return self.dims.n_vocab == 51865 - - def install_kv_cache_hooks(self, cache: Optional[dict] = None): - """ - The `MultiHeadAttention` module optionally accepts `kv_cache` which stores the key and value - tensors calculated for the previous positions. This method returns a dictionary that stores - all caches, and the necessary hooks for the key and value projection modules that save the - intermediate tensors to be reused during later calculations. - - Returns - ------- - cache : Dict[nn.Module, torch.Tensor] - A dictionary object mapping the key/value projection modules to its cache - hooks : List[RemovableHandle] - List of PyTorch RemovableHandle objects to stop the hooks to be called - """ - cache = {**cache} if cache is not None else {} - hooks = [] - - def save_to_cache(module, _, output): - if module not in cache or output.shape[1] > self.decoder.positional_embedding.shape[0]: - cache[module] = output # save as-is, for the first token or cross attention - else: - cache[module] = torch.cat([cache[module], output], dim=1).detach() - return cache[module] - - def install_hooks(layer: nn.Module): - if isinstance(layer, MultiHeadAttention): - hooks.append(layer.key.register_forward_hook(save_to_cache)) - hooks.append(layer.value.register_forward_hook(save_to_cache)) - - self.decoder.apply(install_hooks) - return cache, hooks - - detect_language = detect_language_function - decode = decode_function diff --git a/spaces/ludusc/latent-space-theories/frontend/footer.py b/spaces/ludusc/latent-space-theories/frontend/footer.py deleted file mode 100644 index 31197e22dcec3e3f0d59b46f4733b7aa8a6980eb..0000000000000000000000000000000000000000 --- a/spaces/ludusc/latent-space-theories/frontend/footer.py +++ /dev/null @@ -1,32 +0,0 @@ -import streamlit as st - -footer=""" - - -""" - -def add_footer(text, linked_text, link): - custom_footer = footer.replace('USER_DEFINED_TEXT', text).replace('LINKED_TEXT', linked_text).replace('LINK', link) - st.markdown(custom_footer, unsafe_allow_html=True) \ No newline at end of file diff --git a/spaces/luisoala/raw2logit/train.py b/spaces/luisoala/raw2logit/train.py deleted file mode 100644 index b088063cc01a9d18f2d74bd47f60c8a1f2f610f4..0000000000000000000000000000000000000000 --- a/spaces/luisoala/raw2logit/train.py +++ /dev/null @@ -1,386 +0,0 @@ -import os -import sys -import copy -import argparse - -import torch -from torch import optim -import torch.nn as nn - -import mlflow.pytorch -from torch.utils.data import DataLoader -from torchvision.models import resnet18 -import torchvision.transforms as T -from pytorch_lightning.metrics.functional import accuracy -import pytorch_lightning as pl -from pytorch_lightning.callbacks import ModelCheckpoint - -from utils.base import AuxLoss, WeightedLoss, display_mlflow_run_info, l2_regularization, str2bool, fetch_from_mlflow, get_name, data_loader_mean_and_std -from utils.dataset_utils import k_fold -from utils.augmentation import get_augmentation -from dataset import Subset, get_dataset - -from processing.pipeline_numpy import RawProcessingPipeline -from processing.pipeline_torch import append_additive_layer, raw2rgb, RawToRGB, ParametrizedProcessing, NNProcessing - -from model import log_tensor, resnet_model, LitModel, TrackImagesCallback - -import segmentation_models_pytorch as smp - -from utils.ssim import SSIM - -# args to set up task -parser = argparse.ArgumentParser(description='classification_task') -parser.add_argument('--tracking_uri', type=str, - default='http://deplo-mlflo-1ssxo94f973sj-890390d809901dbf.elb.eu-central-1.amazonaws.com', help='URI of the mlflow server on AWS') -parser.add_argument('--processor_uri', type=str, default=None, - help='URI of the processing model (e.g. s3://mlflow-artifacts-821771080529/1/5fa754c566e3466690b1d309a476340f/artifacts/processing-model)') -parser.add_argument('--classifier_uri', type=str, default=None, - help='URI of the net (e.g. s3://mlflow-artifacts-821771080529/1/5fa754c566e3466690b1d309a476340f/artifacts/prediction-model)') -parser.add_argument('--state_dict_uri', type=str, - default=None, help='URI of the indices you want to load (e.g. s3://mlflow-artifacts-601883093460/7/4326da05aca54107be8c554de0674a14/artifacts/training') - -parser.add_argument('--experiment_name', type=str, - default='classification learnable pipeline', help='Specify the experiment you are running, e.g. end2end segmentation') -parser.add_argument('--run_name', type=str, - default='test run', help='Specify the name of your run') - -parser.add_argument('--log_model', type=str2bool, default=True, help='Enables model logging') -parser.add_argument('--save_locally', action='store_true', - help='Model will be saved locally if action is taken') # TODO: bypass mlflow - -parser.add_argument('--track_processing', action='store_true', - help='Save images after each trasformation of the pipeline for the test set') -parser.add_argument('--track_processing_gradients', action='store_true', - help='Save images of gradients after each trasformation of the pipeline for the test set') -parser.add_argument('--track_save_tensors', action='store_true', - help='Save the torch tensors after each trasformation of the pipeline for the test set') -parser.add_argument('--track_predictions', action='store_true', - help='Save images after each trasformation of the pipeline for the test set + input gradient') -parser.add_argument('--track_n_images', default=5, - help='Track the n first elements of dataset. Only used for args.track_processing=True') -parser.add_argument('--track_every_epoch', action='store_true', help='Track images every epoch or once after training') - -# args to create dataset -parser.add_argument('--seed', type=int, default=1, help='Global seed') -parser.add_argument('--dataset', type=str, default='Microscopy', - choices=['Drone', 'DroneSegmentation', 'Microscopy'], help='Select dataset') - -parser.add_argument('--n_splits', type=int, default=1, help='Number of splits used for training') -parser.add_argument('--train_size', type=float, default=0.8, help='Fraction of training points in dataset') - -# args for training -parser.add_argument('--lr', type=float, default=1e-5, help='learning rate used for training') -parser.add_argument('--epochs', type=int, default=3, help='numper of epochs') -parser.add_argument('--batch_size', type=int, default=32, help='Training batch size') -parser.add_argument('--augmentation', type=str, default='none', - choices=['none', 'weak', 'strong'], help='Applies augmentation to training') -parser.add_argument('--check_val_every_n_epoch', type=int, default=1) - -# args to specify the processing -parser.add_argument('--processing_mode', type=str, default='parametrized', - choices=['parametrized', 'static', 'neural_network', 'none'], - help='Which type of raw to rgb processing should be used') - -# args to specify model -parser.add_argument('--classifier_network', type=str, default='ResNet18', choices=['ResNet18', 'ResNet34', 'Resnet50'], - help='Type of pretrained network') -parser.add_argument('--classifier_pretrained', action='store_true', - help='Whether to use a pre-trained model or not') -parser.add_argument('--smp_encoder', type=str, default='resnet34', help='segmentation models pytorch encoder') - -parser.add_argument('--freeze_processor', action='store_true', help='Freeze raw to rgb processing model weights') -parser.add_argument('--freeze_classifier', action='store_true', help='Freeze classification model weights') - -# args to specify static pipeline transformations -parser.add_argument('--sp_debayer', type=str, default='bilinear', - choices=['bilinear', 'malvar2004', 'menon2007'], help='Specify algorithm used as debayer') -parser.add_argument('--sp_sharpening', type=str, default='sharpening_filter', - choices=['sharpening_filter', 'unsharp_masking'], help='Specify algorithm used for sharpening') -parser.add_argument('--sp_denoising', type=str, default='gaussian_denoising', - choices=['gaussian_denoising', 'median_denoising', 'fft_denoising'], help='Specify algorithm used for denoising') - -# args to choose training mode -parser.add_argument('--adv_training', action='store_true', help='Enable adversarial training') -parser.add_argument('--adv_aux_weight', type=float, default=1, help='Weighting of the adversarial auxilliary loss') -parser.add_argument('--adv_aux_loss', type=str, default='ssim', choices=['l2', 'ssim'], - help='Type of adversarial auxilliary regularization loss') -parser.add_argument('--adv_noise_layer', action='store_true', help='Adds an additive layer to Parametrized Processing') -parser.add_argument('--adv_track_differences', action='store_true', help='Save difference to default pipeline') -parser.add_argument('--adv_parameters', choices=['all', 'black_level', 'white_balance', - 'colour_correction', 'gamma_correct', 'sharpening_filter', 'gaussian_blur', 'additive_layer'], - help='Target individual parameters for adversarial training.') - -parser.add_argument('--cache_downloaded_models', type=str2bool, default=True) - -parser.add_argument('--test_run', action='store_true') - - -args = parser.parse_args() - -os.makedirs('results', exist_ok=True) - - -def run_train(args): - - print(args) - - DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu' - training_mode = 'adversarial' if args.adv_training else 'default' - - # set tracking uri, this is the address of the mlflow server where light experimental data will be stored - mlflow.set_tracking_uri(args.tracking_uri) - mlflow.set_experiment(args.experiment_name) - os.environ['AWS_ACCESS_KEY_ID'] = '#TODO: fill in your aws access key id for mlflow server here' - os.environ['AWS_SECRET_ACCESS_KEY'] = '#TODO: fill in your aws secret access key for mlflow server here' - - dataset = get_dataset(args.dataset) - - print(f'dataset: {type(dataset).__name__}[{len(dataset)}]') - print(f'task: {dataset.task}') - print(f'mode: {training_mode} training') - print(f'# cross-validation subsets: {args.n_splits}') - pl.seed_everything(args.seed) - idxs_kfold = k_fold(dataset, n_splits=args.n_splits, seed=args.seed, train_size=args.train_size) - - # start mlflow parent run for k-fold validation (optional) - with mlflow.start_run(run_name=args.run_name) as parent_run: - - # start mlflow child run - for k_iter, (train_indices, valid_indices) in enumerate(idxs_kfold): - - print(f'K_fold subset: {k_iter+1}/{args.n_splits}') - - if args.processing_mode == 'static': - # only needed if processor outputs should be normalized (might help for classifier training / testing against torch pipeline) - if args.dataset == 'Drone' or args.dataset == 'DroneSegmentation': - mean = torch.tensor([0.35, 0.36, 0.35]) - std = torch.tensor([0.12, 0.11, 0.12]) - elif args.dataset == 'Microscopy': - mean = torch.tensor([0.91, 0.84, 0.94]) - std = torch.tensor([0.08, 0.12, 0.05]) - - # numpy pipeline doesn't use torch batched transformations. Transformations are applied individually to dataloader - dataset.transform = T.Compose([RawProcessingPipeline( - camera_parameters=dataset.camera_parameters, - debayer=args.sp_debayer, - sharpening=args.sp_sharpening, - denoising=args.sp_denoising, - ), - T.Normalize(mean, std) - ]) - - processor = nn.Identity() - - # fetch processor from mlflow - if args.processor_uri is not None and args.processing_mode != 'none': - print('Fetching processor: ', end='') - processor = fetch_from_mlflow(args.processor_uri, type='processor', - use_cache=args.cache_downloaded_models) - else: - print(f'processing_mode: {args.processing_mode}') - normalize_mosaic = None # normalize after raw has been transformed to rgb image via raw2rgb - # not strictly necessary, but for processing_mode=='none' this will ensure normalized outputs for the classifier - # and for processing_mode=='neural_network', the processing segmentation model receives normalized inputs - # could be evaded via an additional batchnorm! - # XXX - if args.dataset == 'Microscopy': - mosaic_mean = [0.5663, 0.1401, 0.0731] - mosaic_std = [0.097, 0.0423, 0.008] - normalize_mosaic = T.Normalize(mosaic_mean, mosaic_std) - - # track individual processing steps for visualization - track_stages = args.track_processing or args.track_processing_gradients - if args.processing_mode == 'parametrized': - processor = ParametrizedProcessing( - camera_parameters=dataset.camera_parameters, track_stages=track_stages, batch_norm_output=True) - - elif args.processing_mode == 'neural_network': - processor = NNProcessing(track_stages=track_stages, - normalize_mosaic=normalize_mosaic, batch_norm_output=True) - elif args.processing_mode == 'none': - processor = RawToRGB(reduce_size=True, out_channels=3, track_stages=track_stages, - normalize_mosaic=normalize_mosaic) - - if args.classifier_uri: # fetch classifier from mlflow - print('Fetching classifier: ', end='') - classifier = fetch_from_mlflow(args.classifier_uri, type='classifier', - use_cache=args.cache_downloaded_models) - else: - if dataset.task == 'classification': - classifier = resnet_model( - model=args.classifier_network, - pretrained=args.classifier_pretrained, - in_channels=3, - fc_out_features=len(dataset.classes) - ) - else: - classifier = smp.UnetPlusPlus( - encoder_name=args.smp_encoder, - encoder_depth=5, - encoder_weights='imagenet', - in_channels=3, - classes=1, - activation=None, - ) - - if args.freeze_processor and len(list(iter(processor.parameters()))) == 0: - print('Note: freezing processor without parameters.') - assert not (args.freeze_processor and args.freeze_classifier), 'Likely no parameters to train.' - - if dataset.task == 'classification': - loss = nn.CrossEntropyLoss() - metrics = [accuracy] - else: - # loss = utils.base.smp_get_loss(args.smp_loss) # XXX: add other losses to args.smp_loss - loss = smp.losses.DiceLoss(mode='binary', from_logits=True) - metrics = [smp.utils.metrics.IoU()] - - loss_aux = None - - if args.adv_training: # setup for failure mode search - - assert args.processing_mode == 'parametrized', f"Processing mode ({args.processing_mode}) should be set to 'parametrized' for adversarial training" - assert args.freeze_classifier, 'Classifier should be frozen for adversarial training' - assert not args.freeze_processor, 'Processor should not be frozen for adversarial training' - - # copy, so that regularization in rgb space between adversarial and original processor can be computed - processor_default = copy.deepcopy(processor) - processor_default.track_stages = args.track_processing - processor_default.eval() - processor_default.to(DEVICE) - - for p in processor_default.parameters(): - p.requires_grad = False - - if args.adv_noise_layer: # optional additional "noise" layer in processor - append_additive_layer(processor) - - if args.adv_aux_loss == 'l2': - regularization = l2_regularization - elif args.adv_aux_loss == 'ssim': - regularization = SSIM(window_size=11) - else: - NotImplementedError(args.adv_aux_loss) - - loss = WeightedLoss(loss=loss, weight=-1) - - loss_aux = AuxLoss( - loss_aux=regularization, - processor_adv=processor, - processor_default=processor_default, - weight=args.adv_aux_weight, - ) - - augmentation = get_augmentation(args.augmentation) - - model = LitModel( - classifier=classifier, - processor=processor, - loss=loss, - lr=args.lr, - loss_aux=loss_aux, - adv_training=args.adv_training, - adv_parameters=args.adv_parameters, - metrics=metrics, - augmentation=augmentation, - is_segmentation_task=dataset.task == 'segmentation', - freeze_classifier=args.freeze_classifier, - freeze_processor=args.freeze_processor, - ) - - state_dict = vars(args).copy() - - # get train_set_dict - if args.state_dict_uri: - state_dict = mlflow.pytorch.load_state_dict(args.state_dict_uri) - train_indices = state_dict['train_indices'] - valid_indices = state_dict['valid_indices'] - - track_indices = list(range(args.track_n_images)) - - if dataset.task == 'classification': - state_dict['classes'] = dataset.classes - state_dict['device'] = DEVICE - state_dict['train_indices'] = train_indices - state_dict['valid_indices'] = valid_indices - state_dict['elements in train set'] = len(train_indices) - state_dict['elements in test set'] = len(valid_indices) - - if args.test_run: - train_indices = train_indices[:args.batch_size] - valid_indices = valid_indices[:args.batch_size] - - train_set = Subset(dataset, indices=train_indices) - valid_set = Subset(dataset, indices=valid_indices) - track_set = Subset(dataset, indices=track_indices) - - train_loader = DataLoader(train_set, batch_size=args.batch_size, num_workers=16, shuffle=True) - valid_loader = DataLoader(valid_set, batch_size=args.batch_size, num_workers=16, shuffle=False) - track_loader = DataLoader(track_set, batch_size=args.batch_size, num_workers=16, shuffle=False) - - with mlflow.start_run(run_name=f"{args.run_name}_{k_iter}", nested=True) as child_run: - - if k_iter == 0: - display_mlflow_run_info(child_run) - - mlflow.pytorch.log_state_dict(state_dict, artifact_path=None) - - hparams = { - 'dataset': args.dataset, - 'processing_mode': args.processing_mode, - 'training_mode': training_mode, - } - if training_mode == 'adversarial': - hparams['adv_aux_weight'] = args.adv_aux_weight - hparams['adv_aux_loss'] = args.adv_aux_loss - - mlflow.log_params(hparams) - - with open('results/state_dict.txt', 'w') as f: - f.write('python ' + ' '.join(sys.argv) + '\n') - f.write('\n'.join([f'{k}={v}' for k, v in state_dict.items()])) - mlflow.log_artifact('results/state_dict.txt', artifact_path=None) - - mlf_logger = pl.loggers.MLFlowLogger(experiment_name=args.experiment_name, - tracking_uri=args.tracking_uri,) - mlf_logger._run_id = child_run.info.run_id - - reference_processor = processor_default if args.adv_training and args.adv_track_differences else None - - callbacks = [] - if args.track_processing: - callbacks += [TrackImagesCallback(track_loader, - reference_processor, - track_every_epoch=args.track_every_epoch, - track_processing=args.track_processing, - track_gradients=args.track_processing_gradients, - track_predictions=args.track_predictions, - save_tensors=args.track_save_tensors)] - - trainer = pl.Trainer( - gpus=1 if DEVICE == 'cuda' else 0, - min_epochs=args.epochs, - max_epochs=args.epochs, - logger=mlf_logger, - callbacks=callbacks, - check_val_every_n_epoch=args.check_val_every_n_epoch, - ) - - if args.log_model: - mlflow.pytorch.autolog(log_every_n_epoch=10) - print(f'model_uri="{mlflow.get_artifact_uri()}/model"') - - t = trainer.fit( - model, - train_dataloader=train_loader, - val_dataloaders=valid_loader, - ) - - globals().update(locals()) # for convenient access - - return model - - -if __name__ == '__main__': - model = run_train(args) diff --git a/spaces/ma-xu/LIVE/pybind11/tests/test_custom_type_casters.py b/spaces/ma-xu/LIVE/pybind11/tests/test_custom_type_casters.py deleted file mode 100644 index 9475c4516845632da6c6c5b918ae05401d8f3f01..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/pybind11/tests/test_custom_type_casters.py +++ /dev/null @@ -1,90 +0,0 @@ -# -*- coding: utf-8 -*- -import pytest -from pybind11_tests import custom_type_casters as m - - -def test_noconvert_args(msg): - a = m.ArgInspector() - assert msg(a.f("hi")) == """ - loading ArgInspector1 argument WITH conversion allowed. Argument value = hi - """ - assert msg(a.g("this is a", "this is b")) == """ - loading ArgInspector1 argument WITHOUT conversion allowed. Argument value = this is a - loading ArgInspector1 argument WITH conversion allowed. Argument value = this is b - 13 - loading ArgInspector2 argument WITH conversion allowed. Argument value = (default arg inspector 2) - """ # noqa: E501 line too long - assert msg(a.g("this is a", "this is b", 42)) == """ - loading ArgInspector1 argument WITHOUT conversion allowed. Argument value = this is a - loading ArgInspector1 argument WITH conversion allowed. Argument value = this is b - 42 - loading ArgInspector2 argument WITH conversion allowed. Argument value = (default arg inspector 2) - """ # noqa: E501 line too long - assert msg(a.g("this is a", "this is b", 42, "this is d")) == """ - loading ArgInspector1 argument WITHOUT conversion allowed. Argument value = this is a - loading ArgInspector1 argument WITH conversion allowed. Argument value = this is b - 42 - loading ArgInspector2 argument WITH conversion allowed. Argument value = this is d - """ - assert (a.h("arg 1") == - "loading ArgInspector2 argument WITHOUT conversion allowed. Argument value = arg 1") - assert msg(m.arg_inspect_func("A1", "A2")) == """ - loading ArgInspector2 argument WITH conversion allowed. Argument value = A1 - loading ArgInspector1 argument WITHOUT conversion allowed. Argument value = A2 - """ - - assert m.floats_preferred(4) == 2.0 - assert m.floats_only(4.0) == 2.0 - with pytest.raises(TypeError) as excinfo: - m.floats_only(4) - assert msg(excinfo.value) == """ - floats_only(): incompatible function arguments. The following argument types are supported: - 1. (f: float) -> float - - Invoked with: 4 - """ - - assert m.ints_preferred(4) == 2 - assert m.ints_preferred(True) == 0 - with pytest.raises(TypeError) as excinfo: - m.ints_preferred(4.0) - assert msg(excinfo.value) == """ - ints_preferred(): incompatible function arguments. The following argument types are supported: - 1. (i: int) -> int - - Invoked with: 4.0 - """ # noqa: E501 line too long - - assert m.ints_only(4) == 2 - with pytest.raises(TypeError) as excinfo: - m.ints_only(4.0) - assert msg(excinfo.value) == """ - ints_only(): incompatible function arguments. The following argument types are supported: - 1. (i: int) -> int - - Invoked with: 4.0 - """ - - -def test_custom_caster_destruction(): - """Tests that returning a pointer to a type that gets converted with a custom type caster gets - destroyed when the function has py::return_value_policy::take_ownership policy applied.""" - - cstats = m.destruction_tester_cstats() - # This one *doesn't* have take_ownership: the pointer should be used but not destroyed: - z = m.custom_caster_no_destroy() - assert cstats.alive() == 1 and cstats.default_constructions == 1 - assert z - - # take_ownership applied: this constructs a new object, casts it, then destroys it: - z = m.custom_caster_destroy() - assert z - assert cstats.default_constructions == 2 - - # Same, but with a const pointer return (which should *not* inhibit destruction): - z = m.custom_caster_destroy_const() - assert z - assert cstats.default_constructions == 3 - - # Make sure we still only have the original object (from ..._no_destroy()) alive: - assert cstats.alive() == 1 diff --git a/spaces/ma-xu/LIVE/thrust/thrust/system/detail/adl/reduce.h b/spaces/ma-xu/LIVE/thrust/thrust/system/detail/adl/reduce.h deleted file mode 100644 index 8a9673b3f957e590c60d7667fc57d4f50069c409..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/thrust/system/detail/adl/reduce.h +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include - -// the purpose of this header is to #include the reduce.h header -// of the sequential, host, and device systems. It should be #included in any -// code which uses adl to dispatch reduce - -#include - -// SCons can't see through the #defines below to figure out what this header -// includes, so we fake it out by specifying all possible files we might end up -// including inside an #if 0. -#if 0 -#include -#include -#include -#include -#endif - -#define __THRUST_HOST_SYSTEM_REDUCE_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/reduce.h> -#include __THRUST_HOST_SYSTEM_REDUCE_HEADER -#undef __THRUST_HOST_SYSTEM_REDUCE_HEADER - -#define __THRUST_DEVICE_SYSTEM_REDUCE_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/reduce.h> -#include __THRUST_DEVICE_SYSTEM_REDUCE_HEADER -#undef __THRUST_DEVICE_SYSTEM_REDUCE_HEADER - diff --git a/spaces/magicr/BuboGPT/bubogpt/models/Qformer.py b/spaces/magicr/BuboGPT/bubogpt/models/Qformer.py deleted file mode 100644 index e71b12375e10511858a9c505dc795181e6ce5603..0000000000000000000000000000000000000000 --- a/spaces/magicr/BuboGPT/bubogpt/models/Qformer.py +++ /dev/null @@ -1,1216 +0,0 @@ -""" - * Copyright (c) 2023, salesforce.com, inc. - * All rights reserved. - * SPDX-License-Identifier: BSD-3-Clause - * For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause - * By Junnan Li - * Based on huggingface code base - * https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert -""" - -import math -import os -import warnings -from dataclasses import dataclass -from typing import Optional, Tuple, Dict, Any - -import torch -from torch import Tensor, device, dtype, nn -import torch.utils.checkpoint -from torch import nn -from torch.nn import CrossEntropyLoss -import torch.nn.functional as F - -from transformers.activations import ACT2FN -from transformers.file_utils import ( - ModelOutput, -) -from transformers.modeling_outputs import ( - BaseModelOutputWithPastAndCrossAttentions, - BaseModelOutputWithPoolingAndCrossAttentions, - CausalLMOutputWithCrossAttentions, - MaskedLMOutput, - MultipleChoiceModelOutput, - NextSentencePredictorOutput, - QuestionAnsweringModelOutput, - SequenceClassifierOutput, - TokenClassifierOutput, -) -from transformers.modeling_utils import ( - PreTrainedModel, - apply_chunking_to_forward, - find_pruneable_heads_and_indices, - prune_linear_layer, -) -from transformers.utils import logging -from transformers.models.bert.configuration_bert import BertConfig - -logger = logging.get_logger(__name__) - - -class BertEmbeddings(nn.Module): - """Construct the embeddings from word and position embeddings.""" - - def __init__(self, config): - super().__init__() - self.word_embeddings = nn.Embedding( - config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id - ) - self.position_embeddings = nn.Embedding( - config.max_position_embeddings, config.hidden_size - ) - - # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load - # any TensorFlow checkpoint file - self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - - # position_ids (1, len position emb) is contiguous in memory and exported when serialized - self.register_buffer( - "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)) - ) - self.position_embedding_type = getattr( - config, "position_embedding_type", "absolute" - ) - - self.config = config - - def forward( - self, - input_ids=None, - position_ids=None, - query_embeds=None, - past_key_values_length=0, - ): - if input_ids is not None: - seq_length = input_ids.size()[1] - else: - seq_length = 0 - - if position_ids is None: - position_ids = self.position_ids[ - :, past_key_values_length : seq_length + past_key_values_length - ].clone() - - if input_ids is not None: - embeddings = self.word_embeddings(input_ids) - if self.position_embedding_type == "absolute": - position_embeddings = self.position_embeddings(position_ids) - embeddings = embeddings + position_embeddings - - if query_embeds is not None: - embeddings = torch.cat((query_embeds, embeddings), dim=1) - else: - embeddings = query_embeds - - embeddings = self.LayerNorm(embeddings) - embeddings = self.dropout(embeddings) - return embeddings - - -class BertSelfAttention(nn.Module): - def __init__(self, config, is_cross_attention): - super().__init__() - self.config = config - if config.hidden_size % config.num_attention_heads != 0 and not hasattr( - config, "embedding_size" - ): - raise ValueError( - "The hidden size (%d) is not a multiple of the number of attention " - "heads (%d)" % (config.hidden_size, config.num_attention_heads) - ) - - self.num_attention_heads = config.num_attention_heads - self.attention_head_size = int(config.hidden_size / config.num_attention_heads) - self.all_head_size = self.num_attention_heads * self.attention_head_size - - self.query = nn.Linear(config.hidden_size, self.all_head_size) - if is_cross_attention: - self.key = nn.Linear(config.encoder_width, self.all_head_size) - self.value = nn.Linear(config.encoder_width, self.all_head_size) - else: - self.key = nn.Linear(config.hidden_size, self.all_head_size) - self.value = nn.Linear(config.hidden_size, self.all_head_size) - - self.dropout = nn.Dropout(config.attention_probs_dropout_prob) - self.position_embedding_type = getattr( - config, "position_embedding_type", "absolute" - ) - if ( - self.position_embedding_type == "relative_key" - or self.position_embedding_type == "relative_key_query" - ): - self.max_position_embeddings = config.max_position_embeddings - self.distance_embedding = nn.Embedding( - 2 * config.max_position_embeddings - 1, self.attention_head_size - ) - self.save_attention = False - - def save_attn_gradients(self, attn_gradients): - self.attn_gradients = attn_gradients - - def get_attn_gradients(self): - return self.attn_gradients - - def save_attention_map(self, attention_map): - self.attention_map = attention_map - - def get_attention_map(self): - return self.attention_map - - def transpose_for_scores(self, x): - new_x_shape = x.size()[:-1] + ( - self.num_attention_heads, - self.attention_head_size, - ) - x = x.view(*new_x_shape) - return x.permute(0, 2, 1, 3) - - def forward( - self, - hidden_states, - attention_mask=None, - head_mask=None, - encoder_hidden_states=None, - encoder_attention_mask=None, - past_key_value=None, - output_attentions=False, - ): - - # If this is instantiated as a cross-attention module, the keys - # and values come from an encoder; the attention mask needs to be - # such that the encoder's padding tokens are not attended to. - is_cross_attention = encoder_hidden_states is not None - - if is_cross_attention: - key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) - value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) - attention_mask = encoder_attention_mask - elif past_key_value is not None: - key_layer = self.transpose_for_scores(self.key(hidden_states)) - value_layer = self.transpose_for_scores(self.value(hidden_states)) - key_layer = torch.cat([past_key_value[0], key_layer], dim=2) - value_layer = torch.cat([past_key_value[1], value_layer], dim=2) - else: - key_layer = self.transpose_for_scores(self.key(hidden_states)) - value_layer = self.transpose_for_scores(self.value(hidden_states)) - - mixed_query_layer = self.query(hidden_states) - - query_layer = self.transpose_for_scores(mixed_query_layer) - - past_key_value = (key_layer, value_layer) - - # Take the dot product between "query" and "key" to get the raw attention scores. - attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) - - if ( - self.position_embedding_type == "relative_key" - or self.position_embedding_type == "relative_key_query" - ): - seq_length = hidden_states.size()[1] - position_ids_l = torch.arange( - seq_length, dtype=torch.long, device=hidden_states.device - ).view(-1, 1) - position_ids_r = torch.arange( - seq_length, dtype=torch.long, device=hidden_states.device - ).view(1, -1) - distance = position_ids_l - position_ids_r - positional_embedding = self.distance_embedding( - distance + self.max_position_embeddings - 1 - ) - positional_embedding = positional_embedding.to( - dtype=query_layer.dtype - ) # fp16 compatibility - - if self.position_embedding_type == "relative_key": - relative_position_scores = torch.einsum( - "bhld,lrd->bhlr", query_layer, positional_embedding - ) - attention_scores = attention_scores + relative_position_scores - elif self.position_embedding_type == "relative_key_query": - relative_position_scores_query = torch.einsum( - "bhld,lrd->bhlr", query_layer, positional_embedding - ) - relative_position_scores_key = torch.einsum( - "bhrd,lrd->bhlr", key_layer, positional_embedding - ) - attention_scores = ( - attention_scores - + relative_position_scores_query - + relative_position_scores_key - ) - - attention_scores = attention_scores / math.sqrt(self.attention_head_size) - if attention_mask is not None: - # Apply the attention mask is (precomputed for all layers in BertModel forward() function) - attention_scores = attention_scores + attention_mask - - # Normalize the attention scores to probabilities. - attention_probs = nn.Softmax(dim=-1)(attention_scores) - - if is_cross_attention and self.save_attention: - self.save_attention_map(attention_probs) - attention_probs.register_hook(self.save_attn_gradients) - - # This is actually dropping out entire tokens to attend to, which might - # seem a bit unusual, but is taken from the original Transformer paper. - attention_probs_dropped = self.dropout(attention_probs) - - # Mask heads if we want to - if head_mask is not None: - attention_probs_dropped = attention_probs_dropped * head_mask - - context_layer = torch.matmul(attention_probs_dropped, value_layer) - - context_layer = context_layer.permute(0, 2, 1, 3).contiguous() - new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) - context_layer = context_layer.view(*new_context_layer_shape) - - outputs = ( - (context_layer, attention_probs) if output_attentions else (context_layer,) - ) - - outputs = outputs + (past_key_value,) - return outputs - - -class BertSelfOutput(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.hidden_size, config.hidden_size) - self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - - def forward(self, hidden_states, input_tensor): - hidden_states = self.dense(hidden_states) - hidden_states = self.dropout(hidden_states) - hidden_states = self.LayerNorm(hidden_states + input_tensor) - return hidden_states - - -class BertAttention(nn.Module): - def __init__(self, config, is_cross_attention=False): - super().__init__() - self.self = BertSelfAttention(config, is_cross_attention) - self.output = BertSelfOutput(config) - self.pruned_heads = set() - - def prune_heads(self, heads): - if len(heads) == 0: - return - heads, index = find_pruneable_heads_and_indices( - heads, - self.self.num_attention_heads, - self.self.attention_head_size, - self.pruned_heads, - ) - - # Prune linear layers - self.self.query = prune_linear_layer(self.self.query, index) - self.self.key = prune_linear_layer(self.self.key, index) - self.self.value = prune_linear_layer(self.self.value, index) - self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) - - # Update hyper params and store pruned heads - self.self.num_attention_heads = self.self.num_attention_heads - len(heads) - self.self.all_head_size = ( - self.self.attention_head_size * self.self.num_attention_heads - ) - self.pruned_heads = self.pruned_heads.union(heads) - - def forward( - self, - hidden_states, - attention_mask=None, - head_mask=None, - encoder_hidden_states=None, - encoder_attention_mask=None, - past_key_value=None, - output_attentions=False, - ): - self_outputs = self.self( - hidden_states, - attention_mask, - head_mask, - encoder_hidden_states, - encoder_attention_mask, - past_key_value, - output_attentions, - ) - attention_output = self.output(self_outputs[0], hidden_states) - - outputs = (attention_output,) + self_outputs[ - 1: - ] # add attentions if we output them - return outputs - - -class BertIntermediate(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.hidden_size, config.intermediate_size) - if isinstance(config.hidden_act, str): - self.intermediate_act_fn = ACT2FN[config.hidden_act] - else: - self.intermediate_act_fn = config.hidden_act - - def forward(self, hidden_states): - hidden_states = self.dense(hidden_states) - hidden_states = self.intermediate_act_fn(hidden_states) - return hidden_states - - -class BertOutput(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.intermediate_size, config.hidden_size) - self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - - def forward(self, hidden_states, input_tensor): - hidden_states = self.dense(hidden_states) - hidden_states = self.dropout(hidden_states) - hidden_states = self.LayerNorm(hidden_states + input_tensor) - return hidden_states - - -class BertLayer(nn.Module): - def __init__(self, config, layer_num): - super().__init__() - self.config = config - self.chunk_size_feed_forward = config.chunk_size_feed_forward - self.seq_len_dim = 1 - self.attention = BertAttention(config) - self.layer_num = layer_num - if ( - self.config.add_cross_attention - and layer_num % self.config.cross_attention_freq == 0 - ): - self.crossattention = BertAttention( - config, is_cross_attention=self.config.add_cross_attention - ) - self.has_cross_attention = True - else: - self.has_cross_attention = False - self.intermediate = BertIntermediate(config) - self.output = BertOutput(config) - - self.intermediate_query = BertIntermediate(config) - self.output_query = BertOutput(config) - - def forward( - self, - hidden_states, - attention_mask=None, - head_mask=None, - encoder_hidden_states=None, - encoder_attention_mask=None, - past_key_value=None, - output_attentions=False, - query_length=0, - ): - # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 - self_attn_past_key_value = ( - past_key_value[:2] if past_key_value is not None else None - ) - self_attention_outputs = self.attention( - hidden_states, - attention_mask, - head_mask, - output_attentions=output_attentions, - past_key_value=self_attn_past_key_value, - ) - attention_output = self_attention_outputs[0] - outputs = self_attention_outputs[1:-1] - - present_key_value = self_attention_outputs[-1] - - if query_length > 0: - query_attention_output = attention_output[:, :query_length, :] - - if self.has_cross_attention: - assert ( - encoder_hidden_states is not None - ), "encoder_hidden_states must be given for cross-attention layers" - cross_attention_outputs = self.crossattention( - query_attention_output, - attention_mask, - head_mask, - encoder_hidden_states, - encoder_attention_mask, - output_attentions=output_attentions, - ) - query_attention_output = cross_attention_outputs[0] - outputs = ( - outputs + cross_attention_outputs[1:-1] - ) # add cross attentions if we output attention weights - - layer_output = apply_chunking_to_forward( - self.feed_forward_chunk_query, - self.chunk_size_feed_forward, - self.seq_len_dim, - query_attention_output, - ) - if attention_output.shape[1] > query_length: - layer_output_text = apply_chunking_to_forward( - self.feed_forward_chunk, - self.chunk_size_feed_forward, - self.seq_len_dim, - attention_output[:, query_length:, :], - ) - layer_output = torch.cat([layer_output, layer_output_text], dim=1) - else: - layer_output = apply_chunking_to_forward( - self.feed_forward_chunk, - self.chunk_size_feed_forward, - self.seq_len_dim, - attention_output, - ) - outputs = (layer_output,) + outputs - - outputs = outputs + (present_key_value,) - - return outputs - - def feed_forward_chunk(self, attention_output): - intermediate_output = self.intermediate(attention_output) - layer_output = self.output(intermediate_output, attention_output) - return layer_output - - def feed_forward_chunk_query(self, attention_output): - intermediate_output = self.intermediate_query(attention_output) - layer_output = self.output_query(intermediate_output, attention_output) - return layer_output - - -class BertEncoder(nn.Module): - def __init__(self, config): - super().__init__() - self.config = config - self.layer = nn.ModuleList( - [BertLayer(config, i) for i in range(config.num_hidden_layers)] - ) - - def forward( - self, - hidden_states, - attention_mask=None, - head_mask=None, - encoder_hidden_states=None, - encoder_attention_mask=None, - past_key_values=None, - use_cache=None, - output_attentions=False, - output_hidden_states=False, - return_dict=True, - query_length=0, - ): - all_hidden_states = () if output_hidden_states else None - all_self_attentions = () if output_attentions else None - all_cross_attentions = ( - () if output_attentions and self.config.add_cross_attention else None - ) - - next_decoder_cache = () if use_cache else None - - for i in range(self.config.num_hidden_layers): - layer_module = self.layer[i] - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - layer_head_mask = head_mask[i] if head_mask is not None else None - past_key_value = past_key_values[i] if past_key_values is not None else None - - if getattr(self.config, "gradient_checkpointing", False) and self.training: - - if use_cache: - logger.warn( - "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." - ) - use_cache = False - - def create_custom_forward(module): - def custom_forward(*inputs): - return module( - *inputs, past_key_value, output_attentions, query_length - ) - - return custom_forward - - layer_outputs = torch.utils.checkpoint.checkpoint( - create_custom_forward(layer_module), - hidden_states, - attention_mask, - layer_head_mask, - encoder_hidden_states, - encoder_attention_mask, - ) - else: - layer_outputs = layer_module( - hidden_states, - attention_mask, - layer_head_mask, - encoder_hidden_states, - encoder_attention_mask, - past_key_value, - output_attentions, - query_length, - ) - - hidden_states = layer_outputs[0] - if use_cache: - next_decoder_cache += (layer_outputs[-1],) - if output_attentions: - all_self_attentions = all_self_attentions + (layer_outputs[1],) - all_cross_attentions = all_cross_attentions + (layer_outputs[2],) - - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - if not return_dict: - return tuple( - v - for v in [ - hidden_states, - next_decoder_cache, - all_hidden_states, - all_self_attentions, - all_cross_attentions, - ] - if v is not None - ) - return BaseModelOutputWithPastAndCrossAttentions( - last_hidden_state=hidden_states, - past_key_values=next_decoder_cache, - hidden_states=all_hidden_states, - attentions=all_self_attentions, - cross_attentions=all_cross_attentions, - ) - - -class BertPooler(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.hidden_size, config.hidden_size) - self.activation = nn.Tanh() - - def forward(self, hidden_states): - # We "pool" the model by simply taking the hidden state corresponding - # to the first token. - first_token_tensor = hidden_states[:, 0] - pooled_output = self.dense(first_token_tensor) - pooled_output = self.activation(pooled_output) - return pooled_output - - -class BertPredictionHeadTransform(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.hidden_size, config.hidden_size) - if isinstance(config.hidden_act, str): - self.transform_act_fn = ACT2FN[config.hidden_act] - else: - self.transform_act_fn = config.hidden_act - self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - - def forward(self, hidden_states): - hidden_states = self.dense(hidden_states) - hidden_states = self.transform_act_fn(hidden_states) - hidden_states = self.LayerNorm(hidden_states) - return hidden_states - - -class BertLMPredictionHead(nn.Module): - def __init__(self, config): - super().__init__() - self.transform = BertPredictionHeadTransform(config) - - # The output weights are the same as the input embeddings, but there is - # an output-only bias for each token. - self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) - - self.bias = nn.Parameter(torch.zeros(config.vocab_size)) - - # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` - self.decoder.bias = self.bias - - def forward(self, hidden_states): - hidden_states = self.transform(hidden_states) - hidden_states = self.decoder(hidden_states) - return hidden_states - - -class BertOnlyMLMHead(nn.Module): - def __init__(self, config): - super().__init__() - self.predictions = BertLMPredictionHead(config) - - def forward(self, sequence_output): - prediction_scores = self.predictions(sequence_output) - return prediction_scores - - -class BertPreTrainedModel(PreTrainedModel): - """ - An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained - models. - """ - - config_class = BertConfig - base_model_prefix = "bert" - _keys_to_ignore_on_load_missing = [r"position_ids"] - - def _init_weights(self, module): - """Initialize the weights""" - if isinstance(module, (nn.Linear, nn.Embedding)): - # Slightly different from the TF version which uses truncated_normal for initialization - # cf https://github.com/pytorch/pytorch/pull/5617 - module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) - elif isinstance(module, nn.LayerNorm): - module.bias.data.zero_() - module.weight.data.fill_(1.0) - if isinstance(module, nn.Linear) and module.bias is not None: - module.bias.data.zero_() - - -class BertModel(BertPreTrainedModel): - """ - The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of - cross-attention is added between the self-attention layers, following the architecture described in `Attention is - all you need `__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, - Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. - argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an - input to the forward pass. - """ - - def __init__(self, config, add_pooling_layer=False): - super().__init__(config) - self.config = config - - self.embeddings = BertEmbeddings(config) - - self.encoder = BertEncoder(config) - - self.pooler = BertPooler(config) if add_pooling_layer else None - - self.init_weights() - - def get_input_embeddings(self): - return self.embeddings.word_embeddings - - def set_input_embeddings(self, value): - self.embeddings.word_embeddings = value - - def _prune_heads(self, heads_to_prune): - """ - Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base - class PreTrainedModel - """ - for layer, heads in heads_to_prune.items(): - self.encoder.layer[layer].attention.prune_heads(heads) - - def get_extended_attention_mask( - self, - attention_mask: Tensor, - input_shape: Tuple[int], - device: device, - is_decoder: bool, - has_query: bool = False, - ) -> Tensor: - """ - Makes broadcastable attention and causal masks so that future and masked tokens are ignored. - - Arguments: - attention_mask (:obj:`torch.Tensor`): - Mask with ones indicating tokens to attend to, zeros for tokens to ignore. - input_shape (:obj:`Tuple[int]`): - The shape of the input to the model. - device: (:obj:`torch.device`): - The device of the input to the model. - - Returns: - :obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`. - """ - # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] - # ourselves in which case we just need to make it broadcastable to all heads. - if attention_mask.dim() == 3: - extended_attention_mask = attention_mask[:, None, :, :] - elif attention_mask.dim() == 2: - # Provided a padding mask of dimensions [batch_size, seq_length] - # - if the model is a decoder, apply a causal mask in addition to the padding mask - # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] - if is_decoder: - batch_size, seq_length = input_shape - - seq_ids = torch.arange(seq_length, device=device) - causal_mask = ( - seq_ids[None, None, :].repeat(batch_size, seq_length, 1) - <= seq_ids[None, :, None] - ) - - # add a prefix ones mask to the causal mask - # causal and attention masks must have same type with pytorch version < 1.3 - causal_mask = causal_mask.to(attention_mask.dtype) - - if causal_mask.shape[1] < attention_mask.shape[1]: - prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1] - if has_query: # UniLM style attention mask - causal_mask = torch.cat( - [ - torch.zeros( - (batch_size, prefix_seq_len, seq_length), - device=device, - dtype=causal_mask.dtype, - ), - causal_mask, - ], - axis=1, - ) - causal_mask = torch.cat( - [ - torch.ones( - (batch_size, causal_mask.shape[1], prefix_seq_len), - device=device, - dtype=causal_mask.dtype, - ), - causal_mask, - ], - axis=-1, - ) - extended_attention_mask = ( - causal_mask[:, None, :, :] * attention_mask[:, None, None, :] - ) - else: - extended_attention_mask = attention_mask[:, None, None, :] - else: - raise ValueError( - "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format( - input_shape, attention_mask.shape - ) - ) - - # Since attention_mask is 1.0 for positions we want to attend and 0.0 for - # masked positions, this operation will create a tensor which is 0.0 for - # positions we want to attend and -10000.0 for masked positions. - # Since we are adding it to the raw scores before the softmax, this is - # effectively the same as removing these entirely. - extended_attention_mask = extended_attention_mask.to( - dtype=self.dtype - ) # fp16 compatibility - extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 - return extended_attention_mask - - def forward( - self, - input_ids=None, - attention_mask=None, - position_ids=None, - head_mask=None, - query_embeds=None, - encoder_hidden_states=None, - encoder_attention_mask=None, - past_key_values=None, - use_cache=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - is_decoder=False, - ): - r""" - encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): - Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if - the model is configured as a decoder. - encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): - Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in - the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): - Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. - If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` - (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` - instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. - use_cache (:obj:`bool`, `optional`): - If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up - decoding (see :obj:`past_key_values`). - """ - output_attentions = ( - output_attentions - if output_attentions is not None - else self.config.output_attentions - ) - output_hidden_states = ( - output_hidden_states - if output_hidden_states is not None - else self.config.output_hidden_states - ) - return_dict = ( - return_dict if return_dict is not None else self.config.use_return_dict - ) - - # use_cache = use_cache if use_cache is not None else self.config.use_cache - - if input_ids is None: - assert ( - query_embeds is not None - ), "You have to specify query_embeds when input_ids is None" - - # past_key_values_length - past_key_values_length = ( - past_key_values[0][0].shape[2] - self.config.query_length - if past_key_values is not None - else 0 - ) - - query_length = query_embeds.shape[1] if query_embeds is not None else 0 - - embedding_output = self.embeddings( - input_ids=input_ids, - position_ids=position_ids, - query_embeds=query_embeds, - past_key_values_length=past_key_values_length, - ) - - input_shape = embedding_output.size()[:-1] - batch_size, seq_length = input_shape - device = embedding_output.device - - if attention_mask is None: - attention_mask = torch.ones( - ((batch_size, seq_length + past_key_values_length)), device=device - ) - - # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] - # ourselves in which case we just need to make it broadcastable to all heads. - if is_decoder: - extended_attention_mask = self.get_extended_attention_mask( - attention_mask, - input_ids.shape, - device, - is_decoder, - has_query=(query_embeds is not None), - ) - else: - extended_attention_mask = self.get_extended_attention_mask( - attention_mask, input_shape, device, is_decoder - ) - - # If a 2D or 3D attention mask is provided for the cross-attention - # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] - if encoder_hidden_states is not None: - if type(encoder_hidden_states) == list: - encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[ - 0 - ].size() - else: - ( - encoder_batch_size, - encoder_sequence_length, - _, - ) = encoder_hidden_states.size() - encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) - - if type(encoder_attention_mask) == list: - encoder_extended_attention_mask = [ - self.invert_attention_mask(mask) for mask in encoder_attention_mask - ] - elif encoder_attention_mask is None: - encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) - encoder_extended_attention_mask = self.invert_attention_mask( - encoder_attention_mask - ) - else: - encoder_extended_attention_mask = self.invert_attention_mask( - encoder_attention_mask - ) - else: - encoder_extended_attention_mask = None - - # Prepare head mask if needed - # 1.0 in head_mask indicate we keep the head - # attention_probs has shape bsz x n_heads x N x N - # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] - # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] - head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) - - encoder_outputs = self.encoder( - embedding_output, - attention_mask=extended_attention_mask, - head_mask=head_mask, - encoder_hidden_states=encoder_hidden_states, - encoder_attention_mask=encoder_extended_attention_mask, - past_key_values=past_key_values, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - query_length=query_length, - ) - sequence_output = encoder_outputs[0] - pooled_output = ( - self.pooler(sequence_output) if self.pooler is not None else None - ) - - if not return_dict: - return (sequence_output, pooled_output) + encoder_outputs[1:] - - return BaseModelOutputWithPoolingAndCrossAttentions( - last_hidden_state=sequence_output, - pooler_output=pooled_output, - past_key_values=encoder_outputs.past_key_values, - hidden_states=encoder_outputs.hidden_states, - attentions=encoder_outputs.attentions, - cross_attentions=encoder_outputs.cross_attentions, - ) - - -class BertLMHeadModel(BertPreTrainedModel): - - _keys_to_ignore_on_load_unexpected = [r"pooler"] - _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] - - def __init__(self, config): - super().__init__(config) - - self.bert = BertModel(config, add_pooling_layer=False) - self.cls = BertOnlyMLMHead(config) - - self.init_weights() - - def get_output_embeddings(self): - return self.cls.predictions.decoder - - def set_output_embeddings(self, new_embeddings): - self.cls.predictions.decoder = new_embeddings - - def forward( - self, - input_ids=None, - attention_mask=None, - position_ids=None, - head_mask=None, - query_embeds=None, - encoder_hidden_states=None, - encoder_attention_mask=None, - labels=None, - past_key_values=None, - use_cache=True, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - return_logits=False, - is_decoder=True, - reduction="mean", - ): - r""" - encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): - Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if - the model is configured as a decoder. - encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): - Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in - the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): - Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in - ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are - ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]`` - past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): - Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. - If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` - (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` - instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. - use_cache (:obj:`bool`, `optional`): - If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up - decoding (see :obj:`past_key_values`). - Returns: - Example:: - >>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig - >>> import torch - >>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased') - >>> config = BertConfig.from_pretrained("bert-base-cased") - >>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config) - >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") - >>> outputs = model(**inputs) - >>> prediction_logits = outputs.logits - """ - return_dict = ( - return_dict if return_dict is not None else self.config.use_return_dict - ) - if labels is not None: - use_cache = False - if past_key_values is not None: - query_embeds = None - - outputs = self.bert( - input_ids, - attention_mask=attention_mask, - position_ids=position_ids, - head_mask=head_mask, - query_embeds=query_embeds, - encoder_hidden_states=encoder_hidden_states, - encoder_attention_mask=encoder_attention_mask, - past_key_values=past_key_values, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - is_decoder=is_decoder, - ) - - sequence_output = outputs[0] - if query_embeds is not None: - sequence_output = outputs[0][:, query_embeds.shape[1] :, :] - - prediction_scores = self.cls(sequence_output) - - if return_logits: - return prediction_scores[:, :-1, :].contiguous() - - lm_loss = None - if labels is not None: - # we are doing next-token prediction; shift prediction scores and input ids by one - shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() - labels = labels[:, 1:].contiguous() - loss_fct = CrossEntropyLoss(reduction=reduction, label_smoothing=0.1) - lm_loss = loss_fct( - shifted_prediction_scores.view(-1, self.config.vocab_size), - labels.view(-1), - ) - if reduction == "none": - lm_loss = lm_loss.view(prediction_scores.size(0), -1).sum(1) - - if not return_dict: - output = (prediction_scores,) + outputs[2:] - return ((lm_loss,) + output) if lm_loss is not None else output - - return CausalLMOutputWithCrossAttentions( - loss=lm_loss, - logits=prediction_scores, - past_key_values=outputs.past_key_values, - hidden_states=outputs.hidden_states, - attentions=outputs.attentions, - cross_attentions=outputs.cross_attentions, - ) - - def prepare_inputs_for_generation( - self, input_ids, query_embeds, past=None, attention_mask=None, **model_kwargs - ): - # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly - if attention_mask is None: - attention_mask = input_ids.new_ones(input_ids.shape) - query_mask = input_ids.new_ones(query_embeds.shape[:-1]) - attention_mask = torch.cat([query_mask, attention_mask], dim=-1) - - # cut decoder_input_ids if past is used - if past is not None: - input_ids = input_ids[:, -1:] - - return { - "input_ids": input_ids, - "query_embeds": query_embeds, - "attention_mask": attention_mask, - "past_key_values": past, - "encoder_hidden_states": model_kwargs.get("encoder_hidden_states", None), - "encoder_attention_mask": model_kwargs.get("encoder_attention_mask", None), - "is_decoder": True, - } - - def _reorder_cache(self, past, beam_idx): - reordered_past = () - for layer_past in past: - reordered_past += ( - tuple( - past_state.index_select(0, beam_idx) for past_state in layer_past - ), - ) - return reordered_past - - -class BertForMaskedLM(BertPreTrainedModel): - - _keys_to_ignore_on_load_unexpected = [r"pooler"] - _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] - - def __init__(self, config): - super().__init__(config) - - self.bert = BertModel(config, add_pooling_layer=False) - self.cls = BertOnlyMLMHead(config) - - self.init_weights() - - def get_output_embeddings(self): - return self.cls.predictions.decoder - - def set_output_embeddings(self, new_embeddings): - self.cls.predictions.decoder = new_embeddings - - def forward( - self, - input_ids=None, - attention_mask=None, - position_ids=None, - head_mask=None, - query_embeds=None, - encoder_hidden_states=None, - encoder_attention_mask=None, - labels=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - return_logits=False, - is_decoder=False, - ): - r""" - labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): - Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ..., - config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored - (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` - """ - - return_dict = ( - return_dict if return_dict is not None else self.config.use_return_dict - ) - - outputs = self.bert( - input_ids, - attention_mask=attention_mask, - position_ids=position_ids, - head_mask=head_mask, - query_embeds=query_embeds, - encoder_hidden_states=encoder_hidden_states, - encoder_attention_mask=encoder_attention_mask, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - is_decoder=is_decoder, - ) - - if query_embeds is not None: - sequence_output = outputs[0][:, query_embeds.shape[1] :, :] - prediction_scores = self.cls(sequence_output) - - if return_logits: - return prediction_scores - - masked_lm_loss = None - if labels is not None: - loss_fct = CrossEntropyLoss() # -100 index = padding token - masked_lm_loss = loss_fct( - prediction_scores.view(-1, self.config.vocab_size), labels.view(-1) - ) - - if not return_dict: - output = (prediction_scores,) + outputs[2:] - return ( - ((masked_lm_loss,) + output) if masked_lm_loss is not None else output - ) - - return MaskedLMOutput( - loss=masked_lm_loss, - logits=prediction_scores, - hidden_states=outputs.hidden_states, - attentions=outputs.attentions, - ) diff --git a/spaces/mbazaNLP/Finetuned-NLLB-TOURISM-EN-KIN/README.md b/spaces/mbazaNLP/Finetuned-NLLB-TOURISM-EN-KIN/README.md deleted file mode 100644 index a6ce6ee83053afd48f575fc780e0c44bbf3b70db..0000000000000000000000000000000000000000 --- a/spaces/mbazaNLP/Finetuned-NLLB-TOURISM-EN-KIN/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Finetuned NLLB TOURISM EN KIN -emoji: 📊 -colorFrom: blue -colorTo: pink -sdk: gradio -sdk_version: 3.42.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/mehdidc/text_to_image_ddgan/scripts/run_juwelsbooster_conda.sh b/spaces/mehdidc/text_to_image_ddgan/scripts/run_juwelsbooster_conda.sh deleted file mode 100644 index d3d6a4f5752b149bb9cd0094328c1f9ecc2c70f5..0000000000000000000000000000000000000000 --- a/spaces/mehdidc/text_to_image_ddgan/scripts/run_juwelsbooster_conda.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -x -#SBATCH --account=laionize -#SBATCH --nodes=1 -#SBATCH --ntasks-per-node=4 -#SBATCH --cpus-per-task=24 -#SBATCH --time=06:00:00 -#SBATCH --gres=gpu:4 -#SBATCH --partition=booster -ml CUDA -source /p/project/laionize/miniconda/bin/activate -conda activate ddgan -export CUDA_VISIBLE_DEVICES=0,1,2,3 -echo "Job id: $SLURM_JOB_ID" -export TOKENIZERS_PARALLELISM=false -#export NCCL_ASYNC_ERROR_HANDLING=1 -export NCCL_IB_TIMEOUT=50 -export UCX_RC_TIMEOUT=4s -export NCCL_IB_RETRY_CNT=10 -srun python -u $* diff --git a/spaces/merve/measuring-fairness/public/third_party/weepeople.css b/spaces/merve/measuring-fairness/public/third_party/weepeople.css deleted file mode 100644 index 33ed7472967ade6cddc630b1a2ad62597c1cd2b2..0000000000000000000000000000000000000000 --- a/spaces/merve/measuring-fairness/public/third_party/weepeople.css +++ /dev/null @@ -1,14 +0,0 @@ -/* https://github.com/propublica/weepeople This work is licensed under the Creative Commons Attribution-NonCommercial-NoDerivs 3.0 United States License */ - -@font-face { - font-family: 'WeePeople'; - src: url(data:application/font-woff2;charset=utf-8;base64,d09GMgABAAAAAGlAAA8AAAAA4KwAAGjcAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP0ZGVE0cGh4GYACCeggEEQgKg644grdwATYCJAOCHAuBEAAEIAWFbAeCNj93ZWJmBhvNoxNuTDxsHIAID7ZzNqKCjRMoBrCLIFmsRdl/fWAbSx+vtlRiwYRgHiehmaIe1S1xW9y/toIZegmaX6AImBEUXWQKwMwpfrH/PueHJEX5EKmupu3squ9sUbFcpFWzu6S1LNtybEuWWxI7kW25ptlOnE7iyInTiEkllSMVAoGeAKFdCCHHhVYOjiu00J6rcK38HccdV/yTTfuqSrvTB1VdAnssWbb1CUAz3t0Dyu/iWyXdqZwWNEky0XxglOQDnn9/d+7zbVIRiiw0sWtakTKtSQwBAFUO2WPBJtCFrMo3ZxcL9pb50Lqy+P3b0q87HaXdrwWGD4YFhtRfWoj2bBJiVfo6vVX3wcxIlgcENsufOTRkwfr9r/X/VtnTdtfeFz6BSlhJABIuY7rtjK1Tp+HOfRQgWD4+z8iY3/L1i96nd1qnV9pwAKwM/qES1c44t26FBeUFMfvgmPHiluV1C8GNRjOOvGV/dWiJPWBEEz7QE9D/7y3PAuWbBxSdVHgx7EXHiWGzDWwByNQXrdEvssgDxf5PU7NlOqfTc+V0SudS6Tv+/4e2Zj6o5WAgPwFD7TMA+gBAeQUMtE8k6Bx3ma5MKXDoS9xLx15yjqvogoVu9itPSDncEhCA1hRfYewiG8iQ6zQ2oQOn6BJzkerQHmDF1v/9EBf5Jr6dVWJ4CO2LAAAQAODDP+ErAcD1M9Gv1+nDV22fYwaAHQAIBLWByNFLACCtC94KOKTXyQ8AcAc8F50magIAADjYHnpTdhnoBi8Bz/gfOvG/CcDdDt0nwKueAwB4hCjWo/l+aQqGIRpLDAJAIqLnIB7DtrvXY/RUeZYG/oNo9vddTILRBQf8yewvZ1+dfX729p/V/Uz96a8+nZseP94FaUKzEFE519GbnMXjHxCO8oLBaDJbrDaRSbKi2h1OV547vwD+BxUWebyazx8IhopLSsvKKyqrwpGoXh2riQPg+FpwXJpjAAI4OwtsgNV+wy0AgIcBmF8FQHcFAD1mAEAlf8K4fPhV91EUlZn10LkbrSZEhPQoOXPv4xB63Rj2WSpQG2ch/kZmZyKls59fhrN3zz44u2R2bPYZXZj90+yDltlt4uz2Wd/sIf/sB7Ovzz7xRsA7u3s2Ypn1m2aruNljsw0VRt9saPZtP5TsszuD3v+5b5gdEspnuw3FketyiWt20+zEe4ezhnBg1vcvV2v2w78c6d/N8rMVsyZjAW/mDQt7zmQxGhlvJJjQf8+r4Ynf36X3E9MO27Yxi8G8YwN8B9AG+eA1sGBzWqEDLTn/gu0HTFUSYG9pWlz0o5LGgcD1MAu4H41ZNwxH9adWifuifrGzcnmR3DCjvhpOxAyl6sUrwGX9xFdJgkpLqOfgCwOMbXMqtwKgDcvTArs0sTgM5kfX/ikzUIM0Y/AwRClybsGauAQwlIcVg8vEHIeibbmp1VLwfYmHwUi66jf5F7Q6MDvnRmaQIqWmxb4gjoCDXg4Xscet8d+zmJUi+UmWASiGhgHfPVxiI2W064fvPxbEiaZgiyGKRkNxwShgEqzltG1oKww9+TG9/SupJF6Wk9W7AxCVSJppfkjb1V/FcZxh6lLkuCmGr59KRomaDjT+BWLRAa2ODAIQEaDF2ebeKa6hDqGYthAFR8fSUz/EIqrjZz1sJrgJSU0Bov1EFrkbm8ujpDHFQFAf1tPDoEtKxZku+VavyGw4S7of3hRH1iBKQLCEeEVFQbFIIulmTzqr1LTXAyzqmSAHhNFq2/eTMOPIkKKroZj60Rji0SRSVh4lSiEeEtpk6msOX2Kh+kVmuYhGabMQZI5Z50G61orMumtNSdeOfuKihL4GauGdMpHxqPJvdBLDfSXvVThEScOKrQSx7ZAuzu06ypI6YwsGuMWZetbMAIESpjVESf89484AFKZM3pBUrCCS0px8l89ZvIsVD7BUjStclmGh+3RdWLJc54me0jd8jhp/qJEs2BzYkIdiLOOzD07qFaWoEvJD4y63nIlAU0FxptgzbAQhj0IbQRJVh7VW0Mw9LjQNssPE4um+dXmG2ESDvYl5DmirktI6LTXScu5ApZVaG4RM2zhcbAcMXeni3czDvu8uP6zfK5+wMCt6HboKqoNPSA1DOcLQqTx2cTSYSNH0TJcbW5TSzT2aNDgS687l1/7L1RU56eyYvdoPGMSU2e6iCmcyyMkePdhOubuh5bIuyxW4d2fQrT7lu+qICD3UkrLqh+T2OV8sq9G2RMxaL0lAVT9ULXVMTYqXWgxPe6fdJS6bGe0vNnNrTBkuW/QVfHAsd+ye4kD0tgquWA/MRH8qfTKHta7vH0gDuYEzEDUVrcVBJkBKuDhbW7xDn6gm7rXDFVZunJTeG7pfHBNf6VsJ0JgqCAGipMf5arrE1ohVpaRZ3c4hd7ycOGf4jBJqgilL7peqcIRZFU6dixBfe0Jt01eRcw1lCzteUJvKYULPZRqFrQMzOjNqCWAxuZIgMEyeDXC9wclP/04P4tvvXjZt70fPurwnuIKDQuZZTMxhdaRJnRkfyUMYs/cZGiW8NArykRsBnmF7qLsheRIC9e/IF4expS5ObtiTtsQ9Fi7xi6PrkevaWDfomi1D9SOF7hLLO5fCPGbi6FJDMSPN4ABg0WQTuzztWwDdNGaFVOymYbmhNlPxfo8NE7weVr+Dw9qnter+oN52jZw8O5hoC+sxR6ZcOshv2rUiFhBFbTFQXUum7oJ7g2DZbFrQZoMs98MEvIFBs2O8zqjCDkIEHlLvNFrysO9KybOhgkXtWFZSWwblLOVQWI0sDkJNzA0z5mKfRRcACdCBCFlFpX5eOVk712/oXWHaujNvfwiT7y5OHkKdS15VNaf99e2DBg1Rsb7YiiYSYb/sfrSQDFNcde9kDnNv5AW0jY0lAYybmpdQyC066aJW52ZYpSbYBpzCrk6ApCQ/jt96L3KDk9CpcUTqvHvSqYOZFUuXFE7qhnqga5IaKllIzZwy1gezjU8b+Rbs/xUv39VCydeMYLQreSW+OcFwCCbkmakiA69h6HfXVHt30Ze0vS8jz8kjtk86o6oMd6ijSZmVG804mQcad3tDOTyV60tTeWTV6ATuxbaHMPUGlw3FzWmlGCZqeFTjUoBQUFuCZu5Er3leTYfssWsneODc6G5g27S7cWJf1c04iQsceUSfEbPIikyZjsxe1vBGznPoyTB8UKTY/xzzut0odeaZVffkY0T76kxhBuLeFGjehbbBC6ZMXiMYHAisBT2HnUWP9qx8pQgVzemET44LE9JSu2GiC/JyX8pLlsLSgRKFdNLulLCxcS4BBEVm4iwpZsfJ27pgRqs264/LnTBAFIFy4IN+oV/nu3QAuZSR20FqnrK2j6zHI2laDn3J7grAO4UsDM9UErHgIUXp0SacidYGYL4P+IXkGPKUnpuH1EuMbXttZ0D6zPh0Q3Om5S2uWkWm76pnNLqipib0bktbPmHAZ0tAjtS03M8IOgapyixmR4gD/ILUzM/focu/MAJE8f92GqUSTwLCM1ylspIpL0FnNZwejpwfgcrrAkgNaFMkJoy44kmNSWrZ61a/KtX2U6kw3GCrvaPYyYcp28oL1Rsiw1TzaIkixDTlc0TMCKeawjbX4DzAHMzwLIrzPY+nZd2Y1qxFCx8rYQgxEDsraQkUoTfBNbvTYvHlsPtLgNdyvroo8zOVisTkkbsmpRCAfxqGHktty1mss4wNPL2dsTJvbB2iJofjQY8MjQSZMTS0hdMCdwnrprHUUmyIhM6TcgkWpWpUX2J0t/b0gw6AHOKX+wQUfTEICuTor56hgKj8ZbIbbqt64jh2YMrjmu/Q3KZ70pocBHshETpmVCIVsiEZl0+cyErqKKiXrWeFiKcsXMnJqwUB/LFYgsdVfKmuekvJZUFSUljqaqQlb7PiNqdNsl7ixL0as1vOrnPm4/dD6lla8xWtRntoaKtM6QUjuq7ILaZ6kmRVTqaN0/IyDZPSpmfAn2epcwBoncHmFbl4aGNQZlT348GGRBwxCIDOS0hOjTUXwEa6DGNMyspZwDZTDaf6dmV+qD9LghYB7xQRoVFP28kDozxeyGQenaToG5KR/SUpGBt0Vp1BjGY5FIkikX6iw25hiSrtDZza1Fg1FbpW7EAw201CwJlMlfoRpM7RbY7D4QMc4qsHlZCNGPIjrkxcp27UF28n2zkAcF48khrJaqbdUE1vgv7xe7tpW2DGrPDIAo42BjFnPr02kzOnlxLn+XybSZEKOMUarfAXUTt6cSU3OxMxM2lwep4Y0iQseagskZzVFzcXZBoe4hc1zoO2sW9BOpVnUhg5C5ONQUPwRGk7kkvH50bDwC/rwpherb9eP54D+Hc2KugkTvLFF6mMuPkNZUbPjW6L+0N5W6yuDp1RWfJRy8gWVFp30IYqxEvym/yN0s5t2sQFW8QmDmLnzbS1dVKrDh6I7ixc+8P2TyI8WRbvp4RfVFRxLEx8VnGxUu70Xe5mqUON7LQvDYdyTcqUMjgIU084pHfzaIxxpqnI3laSCg+QPrHWKnDeY9Bpt9mDEsScDEreBKLLkSMWmktbJwVR8g+VAhfLTQ/aSdg4MohuEC+/CTR+VVwPAbE23obPRTjpJWhCG72lFpu9mMhrdRdznM7yLQCeIqS43l4XuOWeANGr+cE1I+QjyQND9Jkn/fT9q2u83C21oYox4pg2uWg7c4I4hYXtQuimHEx4jRYZHuJfGNdb5RiQrhRC3ea8tkppkVo61ufxd0KHIXeJwqq7ukhAdRiLILJz8W3HJrpJPxctRJF4OS2+EumE2TrkG7xJMH4un+16FomxNWswFwQdCFxOZVY6bovrDeRrxkvhkC5A3it3evgzqAO5hM8khVkt1W30vNAwinaSzJ72fjJnSp/EQWn2WQNZTxsQkyLha8EehRSTe3KVqy8TrcdmAIkirXki2DKc4NlqhLMOngAoB9PlmbiLmaR4KG/ExUXgTh1EixOoZu41tXBW08ZrW/VjSOpI3b11eXQc4rTo9InKzXXv7uLVho7xjaiE9vG7r/SZFRlCfTnxC1MvqO0FNx2qJG2h71XF2FLKwOZ2TS5a3LtqVwaAxoSz3jCmZOUxaLDtSGUTZAUxE1Xi+jAq/h2cfp4wpb7cRtkULe7HedwG4sfv1a6LW85mgvo0otg2j67jlW8KgSDNbKGQlFFd8dUOTo5F04O2AgwZZG/8LFbFy8XN+Y1H9R4rme8VzJ2zjdVTK4kcMM7EQrUaBi55Mc27zYprbhPDTQWbEDcbqSovwVRxDlFmQdA3eq7m2M5+Q2+SS0Knqvj6dE+sKBgWqfk/GIO+y8KUnFCpHSQ2GdyLF/KYDpP5sssZfRllso2e6lWRzKdadzt0ud3q0J1bx6718y/oTAB9FrtKUex27c5ackie6CzuRfRh6BCbVw1t4ziNAZOJeSUWMWuYR2EK+0ATVYXL+FZX8nMZtplHH87vvbMQv8zewODgjW6M/4XwiMCsguRWgU2R5oFTomK0df1Z8x7eysiXW+TLlnGsozqA1Q5YoDiiU90sKpYuHx48bvkup7VGpSAmIR76er3GE/KBEcfiLHVUbZTd5/cJ2hxtWcYzlLKYAVursG7xvuis0SsfJEeRa4drg2NXbHkYasfVX+zlTi+L0SamgPqh7k6LdTVprDZ7xsla2Aii0m0ro+aUFSmxs+dw8jyX2ec7c0y8g262XCIpRlzgKo+Ntp8LOgde++X/nNZVQZ4xiGtAbKO8K9Ad1OHZ3gOoc5vVqM8CCsgmBTnYcyYeqbb3W4aV29eKkN1c++ygDnmt57RaJC5dgZEsYxixeutq55iLkdnAfo0Cn2ATa0j3Y1Cgmd0oxkYBIlqrmdG2RtiTmlmYRUnAQXUZBqLFzpyAbdM+xVoQFz0Pope4kKOfABixLZuM3kgST2O33dmI3FIqYSPfQ/eNo3Ima7bngvXiMwaZeXxN2sZvHm3N60psj+MfkDMTxgfO4Xsrwz50VJ33b3vRcHnRMaAUsBGTYoCRCKgXFO6Jj/VwRZdEu0r44ioZmkAngHuk0wAtUUhvN4VtG8ERG1FsmxaBSLYbu17dJ0rTVNqmv6h8xGO+i8NekCMpe+8dR7oaogQPjr88nmHiwwaonTl30Ijcctptj8NT2ZsNmyaXjT5D2ZLx78PGeDHs2ybn3QBYYWgT6vpmoPJ+xZ6hoHWX99pcnJvFvik2xKObOsasTzLkJE4XWziSgzgiiuEVwDU4B94D/E/ZxOErWpuVrxugYC72sMs5f2rd5x1lmN4AlbNw3ervyV2rlnqA+hqjftk5b+8blsswsTTNp937tA2VFGzyHFhLyDN10ToLtqMW+AB5iMJb9AyiQKzIJapJxcd0sKKKFNnDNfG2JkoRyg1bDa6rEx6aC9+rjAFXpnpqTm/n46i4RymA3LtBH6khj4gDritp2zb4A7C7l/KGUuSR4sbsZDs3aQ02gdFLUK+xae4KGVzLxbtCiil07XTY0WQtHt7Xajh8aeelu4tuXHoiaUzcHzXkYe/H5xlKMWPTiivSeYvJ/R2J0kdLJ/vjE7Eii8fu/27ksosn5J5lww+rdj3tWNTFHf/R0U+UfSLslm974Rr99OWT/7x8f+fhBjWa2nwuQdKT4oMf/SwHk3v/2ntXbNBq0vYBVpNmCOEkIPFJ/7qZOiu03VFWrKcWzeHrnNWJZy/RlpSuR5ERopz01s6I0bewhPyesNlmRIRoVDSZI0Az/ZdKhAbTBA0roYH0dQn2wvazZoamW5Lwx0yND4ZIsVhMV0yXrZl3XNTNsx5gZ4Ri/sh5Mu4KHCj6Z++OtQy/Nb1BpTe1W57MzbftT13WFD0TaZpNW3EeVLybHvwplkdiyT9lHCJTyjMmRTGbThxcG8OgyhC2ykCzx7dJsmnwu8BcGG7OEvV1GYXRQzqZlDEln5CVIFi05sySYih288KIci6vodSx6F1KgWQ1kzK0MTbbTX30lkB4Ze5/fney0KxR8fgbv3cC5K62wvK5QPPhs1ASRacDVMRvWNzQWzMN02C3Mq+U/gVrohu+yG66T9EPqDCakNEus4ii578NRXJp9OVkjSjBQ6fIMrF4lUFK+vi0xfUwXvf5rhgGpV7rOMbL8KGaLozbRL3bRkul4FpO5X3Geaddvc1L8m+/XXzZ/UTbz+7Z4zutWPFIoX6Ac0Yz3VTQeSmpveyV9rM2x+U/mx3mXX0RZD6cDdJ2iPlBzpyyBXYDD8wmBLWofOxV+qiWztZgX2m5lAfogs3oo1yncqYZ8WRNboIkHG8xa6SiwwfHvhvzefsvURa32xCoHdXJo9/1U5LhHAKDtCRxvCgsTW+ANoUG4Yr331lccY1MlbwUKzdMX4jTJwkpssNxcXKTg+qpbe5pZxJP+Tv0tjsQ0/zarJ1uriV4CcfzdnD9VtQH2bUeVS/Ytu784fG1dpImre0rl4e0kg9FrHYF9tHdlyYqzTmLiRoyA5BWDQKJXSXzNF8cP5ufQUDsrggrALzU3E9ZTC0SlS96iB58AIYL5q6DNhtqfj1VyAOQTXq1/RJomgnxMSJGT/jKdNQfQZ9mwj5AxflmXTgeZ+hhNNqpC4aVO9QjpDKsR4tEm9EBFyMLncgfJV+0Z1lYLrjS9/YDb6n2+WMMNSMzo2Bmh74t+NnDj21XLDJrGcoXaaR88GzN698R3JbhRxWW8ZGgSHlc9JGagjfU0oe7dq9dtediJ6SwBSGzFTRwA5o2n40HvugYC6rI7sPtrFCUxWQUCN4srIUV+1PgK1pJwRrt0JsTOEhtN/Cg+8gTD9SS3+okUWTnttsDYs3cqGEE+UPUmobF2drLI63wTGAU7cCA8SD049FaS2nCitFcROG4UW79m2VbK3/4pnoAFrLetCDuzRohpjNO+6OHszsRaISJE4jgH+Mwwf+RG4bqSp3CtXCFBlNiVXHcOnsSs4Q4aFXIShQ9qcFZPPRJund+8f5Tkb+bRbQtUcAjUsa+QnOTeOD5MDzuvqKteGkUIuikxi0oAua6oZm1gaDBQvjsOzg29DFq9BlYUh65WAOxc/Rn85NYasHSs3fopy7642bAi7o50h7xFBGd/A1n2HVNTFEAuQkJxfX11SMRC8aQz66GFT+t4sznbLqhzdLBtVXeYGNl6NGpKvkb2ieWRMGNu8js/zTZbCT381Nf/8P4uo8WdsL0AlAYN5dWuWPhq+i5kiKJXLGLH2oN1ScwjHQ4vwxfQysYG5FdD4A8RxrySBmZ4HmsoBCKKW6RfVwpzP0oXsHjZq6f2pNCit4c0zk0KRWJTRueRnbNvFbTzi3F4gVr2fXt9rFCgV8ieiA6dy7BJvqpD2ysmMxPRc8wmbqtvtPDFWfvKqV0moNtLd29Kwt5JJE8F+mKKXJ5qZpo5c8A8D+mf0K6H6/+hksGjYHMmNjT9A3QQewaHuPlEZzaYLYZ9g5pxCB6xpx0ga9hfkjv1cZODurNLKWVToeU99jDzAddHVZ4fyxSBgRRsYVLKN93r3LTxKSoGJyOF6sgDXFZXGFib8w4y5FciUTC4THAxn6SHEc/eEw8lcNCSzokHfRQ6tQ2km7ozmhoPAHyDYPfWTdyfYbY4ia7YtoQN8K0gpfKtbm+a2vRLxWKruCilN952Gd1pFpPiIW53gCIWCvWhyoNvRQ3IO9xq1pbolYV7A//+ONdtRIExkezjMWXmW7jaOypjT2WTU79ccBk/oV7tiLbNHjEtmXM/w/4ckjQJGjwiLgxNEx8lZcP3KRuRMpN1vXW2xvf1bpH3gnfZiLlYdKRX0bIhqaXJB/THzkKac3B/2dthjojWhqBri5W20FpKgQNpPQGM4Midd04yEB2rmU7gwRCgtEkpxKN3mlH+4Y8at9r0FD+2sEsHF+NccjsPTC2AkKfNfZusIdYqSORzCVhtjF94iqPS/6LRBcLeIbWtT5FROIZfibA1dLAMJZqM03UxHPo2kF6VL4ndERXnWNAyTmq568sueq68g7ixWQ+16xR21hbZmODGdQq50hjwW+KcpiEMpfJVR0L/0mY3tg2uGBxY7x8HhQdK92JerVYegRTFBYw6ECijyNoobGj78Jk+kbm1qDfEiUojMmksJyILQsZemg1SclQR/reoB+i89EP8XZUr+YE8o6lBEo78jCx0SZFK+todi8/+72J5Os1rqe9h9S2sBfstU+acy012oFQwmWF5ce4tdkh5brLs51zHigH3EpN3ZRJmYQZOhRO/WY2CAFTjQ8mQtjaoVV+Xwx1ZHwa8GxgV5WKjbBdrIQH4DdUqepw8GAt8LBVRraKMvGHwyOm37HhvkaxDC0/zuQKOoUJAMw0fvPCGIdC/BYCSR0InGkPrULaYxzTsU2z5aDA3EBz2DqOouIvqqHNs89fMQhwWO4d85mbK84yfonIXHhJIAnrkBoHo1xdIFXArFvoTfVuNFm13EOg09VO+WyrbO6bSuOGJMwWvcufi54tg4DkNvmiT13UxdL+Zk1bdLBXVk/951uZwREnayxeM/sfqXAp6xp7G1HJhWquo5QwZFkGuu2+XuBS/IchBChU69JGv9Hxs0ssY8dlZLHCS9xVNPezr9hB9PhJhIICzyuUrUp4nEN0JsZvI+WrXZFbegcAtTlyMHZOGsZpANJN9+AnQKfnRJ1rIeoTADTRghNLhQ7Mk0gBUZc1LEHege3/Ntus7jJyrme3wEMkl3E0ErpF5e7RYkZp5y100ZDcHz6S2XjpaKCdaxOvw9vqVEItJv07atoARfA4tS1AGq80h06jvvIfX3xwV3LAjM4eTXc08mU5cUxYdmNPN/dWqoavuuTj6JuUFQbtyKyPVH0tT1p5f2Bh5AT4PIuMcxtM6lXKrPSwNL2f/TVBs1zHEfsNxeu5qE2x0YfImp0rZuj4HJ1bhEi5HXYgMujqKLxcKUZra4TIQRTnyzD/v7qarM67YbgU6s4EZZuMY0vrXtKc3ZKO3ovhhrCdgzmAvmIdXevNoEEqoIzLWB3tZPuAXbWanxgqIulHOe1zElB7ETArEeyPWOutlWYP/TJOos02HdumqNbdBoBncIsOTLtoGmCsbbHnxhRtx7Tnc6vVBJP1zZy/c5Z4NlTlmsZ2mxfmBjlXc3WFiQOikmtRIKEppBD3wHyCNKyuJ12Jav+HONvwiT/8sdYNZp2Tl3TV7tU0LoHVkoeGlQZfgkbu9+xrObpgQjXQmLsN75rClecT6Ay7KAP9wfxiIA9i1vfu61R1JX1Ju97+FW0UkODHnpOVpcJjYBzrnyl8hg7Qqy0gCPbLBGZD/sQYYW1+2XYid+r+IO8CNvu9kJWvA6WNxMudicWkg/MYANfYkCK2dpxZlQXczsLb6m2vgDGYMeoXB0XmKq2HcohKS8pGFLq2TRzo5gF8OBcNZMTQn7VflbvFv1x5cD/GJWshLNV3SdnDR+puYCNmqKXAOZAnDsf48NQXzReAHI467+uyD63NDuDozzOO9aXBlYlZLY/POSf14gZ7IXXx8iJ28Eq0KBQvP/F0CpBNI7vN84nshYYB8kKcvGaWu6dIyuVAafbg27f3RLcgSChdkrfE12gfh530Td2WsX7Ffx3o6wzBPb6lOTOCTYbV2OIbdYv/uh8JOfM4/w9K+BUiZReib5SMJmkZgo+wmWA6Iobgj2Jdn68adDi75uYabFbxyqJqR6qUgjA7xidwWBCwBVaDMR/I9D99/0GP/Nhq9dVOPGSASo8NuT43olwTL399d19il+VKmRyHtwLBDJKwtJlwb41//Joq6/gXBnqfifPp2T/0Up9Pe5czvnCJg5OAQ7kpL5ty/TXa558Wm/2VjN+9Ym2Q7hovqs/1cfE12db5DNLaZsal2dz7T6zG4VhsnCyS1alZM8/w3gnngnm5slauKaju5zlRbWn3Z03AtrGDqfCXnxm7y3VHkyYs229ltzYEg1z4ffcQdVUsBE3ZCfpWM22CceQ0+skGUVEb1njk6iapCdrWIY249+wsN/kr3HUigu43O8PcnDXv2cS9YjN/eD63sNF4b+dh2zfTAZNE6KRzGm8ZqOxwRrhir2F25xdMf4fRO5eyvt5IMxTsM+YOfoKXE+chaF+28S4MxiwfXYtEp8Hch2+uF/JYPsuH1NQBdi8kQENuKKVkF8ygzTJljvL2PQnNtnk7iUQeZcxdAEyt0j4pt6ZYgcp7lfc2LmAWnjB5GKP+OLKuG5ZDvJ7Vb1icPxhj67WjbUPB2ZeU1owiskmcSAFJ9cG1yfV/laEx+6QMUNspD8aExap1RObC8UBDiaJQQCQKLENf9xGQR76d4fCfPMUiPbNTp2PItoNgvwlClgcNJmhoGYWCB68orrZ4/q2V1PZ8O89cLZeNgeoZyK3IcPccZZVjQvpTo5j2mNWqk0UDZfcVXWqOMCYh03KMJKjbkwByomJPtVJ1wkhk7wIHpGFOadbg8r83uZu3yh+r/tYpdxar7vdi8JJhn+uVsjDc8FfoHzBMFeJ2vuJgSS5zd1rq5pbFWGcPSP3OsqmbewLLDYblI0ulYR6W2VT0Nsnl7UCFOIEqQLlkuLQ2nN7feXR5YupRd275arUGK1D2cdxa35ljtbdsBPjk/xJExdZwq8c7+Hh5pvyY7YdJDt3PnpZPDfsjZZd5rkh9MddYNBuGmEDCv/2dum3THWirDE5jKgYgx3tk8AgInSybAFhoU3b3c6KeqrZ8+wHDpJj22zZAcA2u2s99zUpRbMfvuJnF20zT6ouY2d3h9ZyyNZ9zDYiJl+jQkU19DWqnFRX5pmoLc4/CE67jPDzuc0BNKDN1Z1aDbmV7qp/2Juqdd0lHW19KPEM7mEa9DtGUwrhjI7VAbP8KTQSxotnbQy6mpay00VrXRfug7+SuuTAw8ZGDROXPNpxjBbC4iWFu0ng9X1UdrtH6n5CHCpdLpmeIluYqOwlrPu6bGeEIYZvEFMFluHaQN89R0sw8Z6tD9FaHXGpz/sitQdLnTSHPxB9vIdcKpLKamnhJqxKXD4ON37ODA2035jWcv7xpltTssAehPNPYJDa7LFVDt0p7BA4tRGbYl0ItSDx3oqAW0BM6oSQswqI6yBBCl8vojOJXDmJuKiZO0RRe5+SS7YuAzZp5kDOd99dvn27dsjiNsPsYik7zxBc0BJTVa35pv1IyHDQwqymRwpHXZAmHTWPfsHz9Mfe1jOExABH2DBfJr7QUJqqoV7xMP828nRnf1IPZrdHXOtjBqFxhluE7Fy7k0ytEdTd90nc59ltPrBnct7GorXisv0ZbkMxcELWqANQ3cnEfmWMWz0rHJB88TOfr8Gc7NQ8BHc3fB6w6ckdvgOZwzVcZpgyfpd/dfNw1sxn5ajj4EG5p5cpOd561YrtGMEJ2drXN8bEAFiMhnHfR0H/5obG7ZWjQRXLf5ua8tWUQvScS9Tg43W7G8SMDEoyYU9Bo051VCUla1UrqgnYvBGDpBGpXlKKfA0X0d+fNUwPbKQCIrez4RxQpphurhWbMxVHhghM9lYqABzMGmTBSoRkT0MwgM4MOfcCQZQNxSpEcDWuXJALk66xPVlyFs78qyQPdJF/h0+rwrWxPahn/Mx76bKDQXkcKMAvPYddcRFR3OfwxP73LIe63qSKuBo98iBQl4hc+YRr07SUdSUb0DoYWWDK33o7fBsldlc6e9g2rrLSXaYlwR87hB947NN/Z953c/5z+yq/9QExy1f8yP5XaR2KTWgVMPmX6Rhd6d4Dp2YrKp/hwU5wS/dfCQghCG+um7b3bhtrVOpD7hj6Jv1eirb+hU22vRpapd5oBjtGliNFN33QmLtBBjQOUItcffs+w9FRarPo7fMxnO0D9XygsLoH38H5S7n6NWUp5WJW+bnJTmSsut5Bk/LT9zxEnUtgt6b+QKzTWD63yre4r1tPgmh58Qt5yOFK2gDtDUnCQa+qSwY6cisT3xLA6dx4PDtC6o9qbJC4/urm3xLp2dtm6N/sWgh6BsnVzkiAbDHEr+ikvNBtQfORiQRocDDDb0QTBee+1AliN5DZmzWv2Q8TDcJfZZW3aL508aR2bXNCc4rqno76yawHq5njSAo4L+oM1uEngWaGRVzapn/YxatX4jd4zxnXYrpt3hqtjoPEVMsUN3CRi9w42Gk1o7uOogfO1+L3NnKt0OSsZ29aisH4QJr3oADOd54YudQmmB+8z3rmGNQy3401OG8V7YfirMX0ytyHNm3n7n8RLbYWZQys2Sw3Mupq+NDV7RLrUTxH5a6lhFIBSnt22EwIDsXeZtkHW0a4L8kwDl7CdfF1++MF7VM4k/or9h5vj8orc7yxDHoSdxCK4FVj4eSUMJkFOilgalegkAlHv2Kp3wprXl7OWbgeIJg8eqEFXSaHrGiLjypRTxOu2PNoppGH5gw1JQ2Yis20YUZuwaOuwYPGkzTTfD3GZeX9hbluZVBz4iWzFX5QVbu9OvLf5VBkq2UsY9h1tlpElf5WGPdXb2V9zPaiG7BGjkq/CiBC9+0ZKeQ9/Lu5jJgVuvL/N245jUKSl3Lq50GmnRzR3+b97EgLvZTrv0P2gV+DmOq4ctlm2wivtbivMIN2Yd56ac8x1uzgleJn4GywUi6QsFlZXcs/BiC4U6OmCTgpqVI3XJ59qzcP2CgRxm6NoSB+P+cUgewBYumAk0oRvn8rP4QAX0fBfGwYm+FDuDjX+YrTYHzMlTSs2Y57q10ZJ7a8BtMD0dLQ5REEdZfduy8mXHHoTBum/594Bu9JknpREI3hcQv6//qvG2+/RewMMGvbqWfP87pv/3nxHtSP+vfnwaw3k2X5svjg4fAt7FYUHEuvn/jH/oG31RbK9e8NlczaL4pO77TAEiIxmZ3/3Omyf2/jhry0+2He1f+U992yrXfqPpn4Uhyu10vB9vjq4tJG3P9OgN3MqXl/vJgHRlo94/CPv+O6Ub/dtNjov6TzYelWZb4dV7B/HcO0AzWKyeZZh6OH8P3mmoX0eLboXwsmGX8yUKrv1+Ly56OfJt75x65/2ok7MzLv9Pa2xOaI5q+uL9JWf3HtpEMJfPv1k4No+/6/Z7Z2L42d/9oK6EdfX3En0u/9weTMhIv5SlJ99CnHK8MdwYXZ+cm5v77priqJgG82F3L/++++PCO6FJR0U/78l/Zxf14cGAGIElJUCmx1XFjT9PqstSCsP79VPVpuy9XQeNS9akPfiI6cLaEuLJLgQljwMl5b0XMvncJvadh1J/1sgV7wD+n01Cg1qL7L//jz6x7daelt56Ekh7KYlT9aelM1F3Jwo9diFIJ7bjru0+FQB146FEqTbhOZFG3qX8i+X/gkJjvb1nk7eXkkTuMAahyuLuZj21oWdQ0rNRPSDiPyP9IXUIa0W+qpE3AYJTLcgsJnpWI4Pi7b8BK/X5FkTgaMJIvWYBBW1Qr1DbnLqAlqpzJRAbi6PhTz/gj1SGBfmARKz3fPbE+DxO6xIdvMCNEgLoQuSs7rhio6KTyUvgLUYPSH9CTGjwnjxcpdonixhY/UlE1TGGSQQgKqVTRBhpg7IFuwjHkQQzTfeuDsN+wRqkIfPKqkwE4l9fmBlEPkReVdM3RH59TLPP8lkNlcNP6L8VZaAf1of3fnI59WnRlCyG+9y0Tw4kL3ylviBVRb0qRTKPgUc0kClFd+HT+FAMdCQijRzQyu2sZskZc4cJdUio5AkoW33KbIX06BU784/M2JKGHkxaLLdOr8BBD9cE5w+Z/OgcShSuc5lm6Zx1SJGzBkzcNPkS2P0ruM38TP+88WHF7mcdxU6Coy+ZNaGV/b1MYUYOEcuc/d0vQnlmABHkRiRwUVBlWQCZSfX0AyJSCwWTP+tQ6uFp/BBwtCXkAloDhTizCiTbs4U5zogwN00HsIGYOhf8vqFLYuMBnm3Jo28IeBRhz2c566pdhQgvNGBbfnyM9P2pXWVX9rvjV/4Ixjrh4GQIcEdJDL5DmLuwwKvQHIjJxXwjSGg6pkETq0v44DEpIyVYdGDs26j3FN7rayCJNeMuhVWwJP8j+YF1ar9f/0VPOhjvAOqWTOaC2UD0qFMcKvafxAOZfguYM8WJMynpK2sKQgrBpyNi+OnFLPRvCoWPottCcKNc+RGTFxWDKObdbOoyCL/Puy8/ba6Be9sz2de/lQyzouU7gJSErawoYw3wF8/AbmcsImTEegFnZI9cBRxS6krgzwzAKwyQVCk2bgJdg5yAPFMxDwUyRO8/+l1HkHo9WrRK09DfYqcwh0pf9CNQg4gjEAH58GIeifXZkCQW4UaSBYoGElh4Mkt8uNU8pejdfP8TapsLNbQEhSnGXzQXF9DmsOYLpR69EiWm3R9edRcVGWYJqFFmJ7m6WqsONEKWNy+qx4Ga6sWRlYx4RxcCDmihQGpBNpfkpmxCZdysJwUbqo+6TL3RtMalF0L3Pc2NdbT3djswvEaWOJvk9mqI6HPJQ2ogXiqF+fVfFBDan6AAm0s5IUPsHELcnIWLonR6z7swCT2iI8o4Vj1f+aN1BDIONeNu7o9cwocuGMHkmUscEVwZKnL2frXU3TT65cr2uwd9mk0VuftbYAOVU6EIGBRKOtQQkqezByc+iPZ17w7mqp1kVJW7h5uthkaO5AdTqskwcX12YDJEU+qHMLHKX8nA/v9muCGhtF1qc2P9TpZOgr+8yhp/jO+gnqMwChHlgHxpLhHY69Sw3p4UpCIquaDoEoZocDD/UhLvLvf//SRfzWVeevwwj0Q4iRE95FXBjuSxyOzoy/VzT1NcZ4tDj+zxq7ORw14fjFuvPpQfmGMPSYLHt6Yvx/14N7wrL0GdjdtdtLheK9cjQiWmB8wvqR4Pn5zQ5HjzpXdhdLzizprlhq3IQlLvN1WSAxuFKcW8zZ0zJy3PF4eqtq9vaDqoJaHbYLBWCFjuHIVN/ezO6nvoI5LqBHv7XlfddJDPw2UGO0jyJVH0BNo2oEVgr+NaxcpwMyKTM2Tdue3BENnqBtyVkTLuwx2AZ3LM5ZhVGg1s68n8GJ5+DhLCiVBEggI4rT4+dkvMgIGfmAQEZ3c2OwRI0Bc5APEIPZwAMrhbA0TcLUofme2/9dCdOtdl5pvk30tl+N1X//mvohAcRaCNoTZFSAQQuEc3m6gU3tg+kkkyi894D01dVohzpbVeFzgruhI4Epks2D2RlhGZMevxJBsMcsBCGSAFERyOgVaH1A5JmbhNCj8csmrG2vTE05mPk4+uL/lcx2PP1G3efmqxOOru/1jx42CGA8k0wYUFrz/CR0LkYDjcn71UCFk7iLZPY/72UV9BBMma+2swEEGzyJPsKtteXX8ZJ8dF2N7gVPwCEansz/uPFmdHoCSjX/pzo+Pn/92+81/By/M6ykWiW2m84a4rJU47aWKhll0bonIFHvTGDWbBqbzo3pvqS2jmkkWqO4KU4JDNinqsiASwOBml0iKqm1ZwZGCenvM98KsHTWhtgp6+JWNV3rrEaUCvUneEyPJZPay53B7eUQbjYibDAge3I2GVWSlVVHr1TjieaRgEZk4bMrP+zNxxYXXbRWyOs8uCVJ3acW+56G+SG4KzaC4t3hAMUKkr0tFRgiXWan/MVg8Mxp+K3LqxZGJfQf+1QyhVMvFMO5AWk8MbyIEwuZR8JcyYuy+jwixxf8+kQ1ogXe/azzOl21CW8PPCmejEmZJQHWiM4yc1JsRPRyBlHETJMwIVJBWNgFiv/zD0t6bq+O7mky3I2Oyu4hOCLx9C9Rj9jqkl2NVq3GWOTDEToNSF87//JEh5R8Sk4ncLoW4Ywi+NGUlBwndl67DlDNcvQGZTZPKd7bVQ1OeoYA3xjmXSvEIzlWTqxxpczn6AGAihhx1n7y6GOsv2LuPVTeiDk43GVqlLic9jJDNvSR336rmL+83PnPde9Yb16Qf3oGA1s7x1bzqVQXDELa7ZJFKNdA1reBVMJ6ljU161TcqHzRZllI2D0K1EKM/fMKjXZgc73U37/awXchM+1ctnwo4FJ3nLgRmiuuegcP/uo0ZdW9zs0RGDrQonZCsRb45mt8XTD0OZfcknRa4nEfkjBFx4e7s1Yi7W0Z1xIlNDjHODKLaiuLqsBnldBFWKH6TFHzwzWnY3CenUn1nA5cbwsDfOj/+yfiVyfvjFxdEyNXtwTOYcF0+uQCxFPOOdMESCU+Ep3rYXiGCdBpJBsDi4zjR9Y0d56FAGiIISA4JME2dOkzIM500ju4G8Yz4XW9vkbpjwNVf9a1W+Uh2UfXr/7R/qWup17hp4h3puBhykhVQiMm2RJzTxeP52N3u7Jh7JN3garrMiiTDUFYjDuhZtXNH/Lm6Iz+Orikug8lURIRFpLRUEvIxDuXTLkwsqLOk1hylLYSM2QLHUIQmEAPSUnJluceRIXm4FK2Z56Ft1vICb9pTCAEt3zK65YFXHp3OV+M1gtBnrHHAsJtsJDgy4NAbCro4lxvmyTiB4GV/NGSEoZpFXyDS7AEpI4xTuNIF1QHjXELyqnD3N9fawrFw5KgYjtqcGjMUHEAZzyEMtABHVuUgoqgI2RyGJW4Q9l2IcEGIE45+E10JtSPxxrke8Y5mj4YRhH2QZkUpzvRyvoYa7wqoc+Z1EziAJAP0VDhCIi17p6Z+4p3Mt/8h2F//7VA1czmO9FlTEbcuSP45hReTbbpa3HksWvZ3xwZUW25k0FBkD8WLW4qLHvbPfUeSJR9yZDl8Ipjrd+vQUd06ObasBYggMJ7q2vbR0fMH33QV5KYC6BmC9ayEL8kaqoGEo3QBXDBRMQeqozEC5CSjLhEZs8QttSLXjQPldHInyhbhKGCMsyOu2YM9T/x2QllcvsIog7gfEgog1vxCz1gETvWRVYTkMIByspRfAcmuc9BnELJhgFQoQzaQNkqwze2qHniXSw6IuiAgEQJR4HuADgLLoWDXzRSlBE1jEZCGMkYRBlUEy2GOmfGr3jqvR9WYmaM4J2ZE0uvxi1rzbhSWFKj3skmvpIoZC1rjwPNqipdFoB+ZQYU0EIG13ECGNJAYPMvM19848Vg4JOKXeGRtMwYH4L0hW4o0VZJwG0c1bFVrXaBPC0qR+gQynJXIs8OIdmhVaazQOMR/jbQEum8Ub9apeWAc+D683zZv8MaPnD7BaldwRvQIeBtyewc22zAkIPvWK/fM6yBLK60Sm4eFAjtB5DQjcW0NYj1iRkUTcDVWRDApsy/o19d8v5jY//rVUXxDTI1PWOleVB0EA4TBBKJCRG+0CA8MxYlngjNt94yvekf5sPmBztAa7IGw6gJgsWPdKpgvoXKvPSg0ktRTHgARpoO+jFJcfFspMy7mhC5L2hFjlUXxrGMtMUysh/+zmEiztLb8fGp+wbKNSqoqVO6c8vpcaO6xO3/c9puji1jEG/P6j6Apmd9oudiJEQSKNKXaRJ7/L1HjpkHniCxxz3v91jWoC7YgppI67kKkpRYpvPmSBOMQ3sCX/uPXXmulYOdQKYU7JMKZTB2Y1e7uKVw61LPhWGAhAhgOqwhIkKFMeCFrK4ap+b7xe4tNkht6RVlCWnCEF+JlJxDpgDXrP9INKoaBvJdNSA5YJSijCD6ZwzJETADUc5UyMRmejh9E0tKG/HqXWxRHMzlRdeCANwNFZOeMI7JHun2TyG+kxQvvaj0nFtP1FnySI0QSZaOZelfLxkLBB5EhLse5KETcufUH0l8AvToFP1zbaRYHvg+L3PL0/iMtoqbKM7fNQV4IAZQduDRI05STRRY42jGBahHugAEsEju0E5WnpcTbE67PpophZzGQlpixjyuSp22kHBm3a6n6oUA+anKUCVsrptdO3lxPLZCfh44XlMDsyKnLsoRAj5hsZ7ER/0p7DST9XPmT50T6bVQe4OLbZN6jP6FwgqKmcQKXP56+By5UfOXEYkamhJ6chphVS8FTIhZlzHkF7ujGcp+/btA72xzfvaNaNI44Vwq3jYyoyioUDQCYQKuQwyUerfR6IJyL7T0jAX/ItQJ7SLqzwiy7LDgcnpaCnfZbnzDMTfBBAakIxIA2tGAuytwTiEbek5W3euMIeBgdHNA4ESQj1YRxKme6/KFVdXD93IDMLTVyF69eB4KOVspohde7+WS38GIebbLX+0CEqtXEheUFWsBzYxrsn8IvGLrSGmlfGI8dXN1lyVdlRIkxdDCYRhEBeN9JRl4Zc/y5spIrc8JcfX3bBr2hGsR+hnJ5jqmLlzIdVlML9mqARrF3BlmbOx9axjY5DIi2wtWcsAMifI/Dfu0KTFChId+1UJ2rZWusOIaxizCrQYjb8n3WuMbl4Tw2wt+kvFodeOyc7RcuwlXLyz37wIcga0Ruk9zePbpqxG5bQczTvRTWl1FOWyQYXzaZegvktgPgQLqd+HVpOojnfC0xWR8cEVFQ8nEcqfIhK+GbluUCJ/b6sdA3mfkvUbKfm6smdsvOSdJEhBwETsPmypZzqTfubL4Uvq4DXsxw1xtD3XKdmNp9zrAwhyIu4YGQRBD0nOJEQYAwcr0aIIVgEJar1UhYbPaB8M8Ty7GRnZRW431aNrPZLkgyhkHEHFweca+dDfE1I7BFJpbplIqQ6lTmziUmZCeG0L5Dt5R/VGsoOwM5L0WyL3wKh4KKB6L+R6K32UkhscYSFjNDKnIVcJdXc52DjjJZ390yUjwCFwIncH/Znady3GsfHHQN9VchAFUTgwRCBk3JCMqrwQ6zNCUwnw5xPxIPiuOjdb9d7hJnFf6IoSD7qtAog1aJaAMk3sMGIPPGtpHAKdCj0ZsgKCptWO0JBeGyybsjMaiKHL+U6xPkSKCNtBTuuDuSs1CnGoLpFU4rDdWhJdTPtmgBgs0GGKeNIzU/kwYunPUWdTTwKJ4RIyJCFReLN8wZSwt1TkcCcYjAGEQmB9dvoO8UBhzcdk4cqZXiCuyTkcNtdberOI/60pXhaw5kMRkmdU4RCbyDm4FOseNouWnJU5qhhEud3c8K7NpcsST1995beBzlja4Qmh9siJWjGnvID+OKKXDT+iEEpC/UypYl9iZjDpMBo7bhWBDeQPlqnC8dt5y4QnBArorL6wBpc+64EisvPiNWHAZv7X7vYVPuTFgj0GiPfi/t3erVqc2xFviXf2YbLTm/R7R63l7U+/DhwV32ZdBhyaQOjtRF6+91EX10E6y7Ix5POAVnTbcDp8ZgZA6cRm6z3TGylbh5JAsoD7J23/Fh3FZjnijw/y372Ik/q3e81WIQfoO4uaq1fs6B5Q8+IQ9ydvPJWFvhj+/7E/us0PS5z+ych3D2NhY58Kx34K0A6VixxZAtKGmJTf4jGhHNZP1ayViMaDspIHls5Mnq+xZM9g+OoqweSQAWCYugQ76H6dE0Fw4noYnhp0iOsSSxTn+BvR3eJDQ6+CcjLOnxakg5lZbCE2v0O1VP3zmtdS9Qehd+cWNi/+7zzWY5F+Z0QuI4IiJvjrkI8lzxqoSD+EBnMr1nobelkP0NJPFehSMfcZMQKnI7l4Fite8YthAeQozkMOGeWnj86ytcz6jJU71W1vFcNCw92saMju489+6ykKk3lNpkmDV0hBJxYqYOz4m4I6gxAjyiqE0oq08PVM79a+R3RZkBExdCsDKSG7k0PI28L3VBSBl21hhlRS3WMNeE9YSk8tAR7HXdMACxgUcrerN9pNrHF7FhsyV2M4OdnjiCDhSXpSMS3LZGVrYT4Y7JKyt+1Nk37cJd0pmNT7T7LQPHba5Ewt5SoknUgu13S9tjxZicNKwJLDGjjUhgNOCBN6a10clhdyNsxXX2v4jahuF6XIIZbEaUu8gVPAu3C67Hb/d+Fu169O8rA/z9k6/wgVdhqKb6/ffzLS3/YN+bGCoUqCyfNKlShIgLt4nQC9FCDi4baD49QMP/Vjv30kwyMyVn15tcOfwUZ38EbliTQSACXnWpQtf5V2+oyR/fuxsUgnimN4/7G3n16RWXhuNNlGIYgWxT5A2XJ9ZbrTZlnvFv3NjLOVLGWyFWgS+2I8IfT37s+WZ/sJboDUhBZg3dQZAuZ2XoAvDkB96vM5M3PfTTJ//YooyY6DO/4GDH1UXXuE6LmowbPZ8Ctpq9+uX5HRDL5iVygdmIVAEsqaYKPg5FU3U+ghBzDzDRaXe0eEc5DImLSv5Gak8DYsDfa1v/TeOcQlWp5gqooYmg9WE5+rk846h29McKG/L4AUyTnCXAJUo5TsRkG0a9K8k0sx3cbbo0pLhqNmJfWa1nAke8/UpXG2cnMMUXnOC5dhhyTx+93vjnP8j+5UsBdOajo3z+td3Emgh1I4wgXCDMiRTd4ajw6yqFTPD86s1S8aUvvmE15m+qIAfTMkYQUofBgTuShGZoVRWSGUTEwUHScceRJfn2Qd8FJT4C1z+INs4p7Y0mpOe6E+Ol8B7RIdVAS5NrGppgmUoMKrmRiEykjFJMR6TdkrSOyXolbhGbocg5kVnF62VZ8YoQXfnNTtTyqujxiGwAbpw8F3L/vDVJuZe3O+C/qltLWpza6euRN/7ad70dkSHNK/DTcaHbqoo2h5pFJbh0jUxqCoxA+3AZ7fWMe2ivZww9sEPTIGA7/G0Sg8AiUozgICKE1ZC4MBkX0LCwmelUVDCe3BKDuDksesBMdqJq2o0Crj88D9f1o5IS7hEUkgmSgTSGcZ6ABSPHdTL2s7jfl5WpPIIq1iiD7bqXAAiDb91k/FKSICSU8hzFEKIHw5gDAEh25A/5klwzIilqQAhwEZMUUXlRCkL1LMu0azgamFmAWwPGcvdyf2zeyWDW7PjbmorEElxl1gtWHXS525o/+cg199rA2OC13wTyWOQmcXTFn59PPNflyTsLxvsQyaIcSPkDfKQ24eB6yT9q/AZSAosn2Bvwrga+yFz8LJGdnODPR6oCp9eaYOpURB1xwMO9vSg+bBg6NX/BYMxqFKwWgSIGjUIathuZijUEZayTidzIDOW6I9shKm1EI9QCfwfhNE4JwMOJOD0RGAEDgKkD9U8uJTvJiIC7ZtD+77HiaIIRE8k6YnpuoXmkA0PPXEKI/StEGDISGcqIEBA0sqx3IlAtRIDn9b17EYkAxBDQSY4wpqUhFgnsxByRqcbFvAN+CKGCiI44lgWiB1JC9VjcXZd0kGeIM8bgnzq+urznoLeDHIwg6IKyBSdFrGbQOMQ5iDl5OuVvSSMjPuC95EAHA2k/8oYBmk4HponsMk+Pf6cd929dt6xZHPD+et1Ii2xHmIe1Q0qB0wlWnvaAHjNWu5yUzBVoH5WNXcFXkoUdrPOVBXhf3V1q3tofHT7w+z881dmHlWh40vNgGqc4t3Gze0vYeZt3nrHbg0EQk5UwMH9a9dcrQozppY8ETSV5c5x8E1aVuDYfG1CWhda1ebhx+/Jjrsius8ufoepfn/P+7tf2uUs/UOqDTAvKRtKK8KvBRczab3TbqY4FFRcRZ35q/FvFlbr4AEFUkr8FA66zSOLoGwVHK+ufXKjC+XMheDHk7P9/xdBj21SO+z3MEV/afJveRCnSMaKQQZBGHuDXN/vSKALSjCCU1TEDEIYhR1GEIBYGEHKRCAF1+hOb2hduCbFtfc0erMEAmv+rpw/8hBTW2QKGLnHcngmW8jmDd/nHH7hxu4HW3gPsRaVisYGjRoL6iimWq/LuqLGsZXMNQ2iJcZXI7qbdDifoMRuD1sAHnFjvF39ud3z5vVaZvkTrEniyKwYT3Q0SZ+GPWtKuc9vrhQH51dbfbhStrU9qZtS6+NHgmpLuxOGvF+bkVE0onoVcSMKD+HbUewkPlBbtnGzljk3aw42tRrNkUKLxAZ31KK5XwOtXn25oDI6CVfv+JXWMyTOiIT7NRdID0jYFdyJl8qr+gQDrI75gAMmHHqpqa+wZn1gwel5Nw6taVF9nMklcMXbU1Z4LezoWysozIBTri3KQQQQQMEAGGSIAAZ4QRJAMCIAMIl2XoShiiADSGQIa9KIIo7+kCiOsmBz3LoFHbQW0CYUxyYLSE0TgQp9p8vjTqde/Pb6qBy3tX+r0+YPK/yHYuMAfJH1wMEgfwGTsXhDmOZy/RSJi/sJB14DnKRKKaLY5nBAzl8whNoNbwmGOykIexvNuRP5baht2nWnNHj4ZusnBqIobAiNy9YK6wwsqI2dawNwuIFslXV18UKCr8wylOGOkLn+2CLfO+9sbT7gd1/dixC4jjCEeGeV6xr2QcXwA2wYKd8k9pJDwldbSNrK4Rn7svgqkIZRFcejA9ZFM+w1Mi7CEWk3UAAx10h0S5+RsbhhrVfqsZBv2+aQf1Nrv3J+KZCBCHElRAGIQqlmYxldRiTHZIpusuIvZOZJlRoAQJ0Yyu0GA6gHvPtC48MML+zq++7ax9GVy32/2JDprr3rx29/8pUqW2sEmcUxLx9qV4RECMckDsIvK9U1JeYvgziheqWHAV+Jz1fz7w+tTIrd7LB21QwFyxH7919mH3hugWpdSv+bYeY76Zn66fSuYqloZV/ntcmvluifC8KL8t2u/GTjI/bbcdNUbAYgIiJouCZeZgQIvg2AdQTrhs9nsdY/KJs6n3xAuXDWquszFJaNZQnYXdr//vNbX6KuuxcCMVr1wY8u8Jr41EQNaS4u2xiZGPBcIeZxzQoI5zXOiIUGLTH/b9I1+rahQ//UezRMNrHeq36d/czzdvvm9ldX6xzc1HQA3E9Omx+KPb4NIFgwLvgUpqkbqLfkP7P7njmm0/TllQWPr0ezV+rblP/p0vVYk2JbJ09is4NhpEGv5ZJk5G3HJuJDQe8GNqaxRvI6pgCIRJvAwDRnWIKBrfDdxOCPxbqFWvx3oeYQKG5rCtC+Sy2BRZvEdvmqsix7VhVW+xeE2bkKLo91dhbg5sNhm2XsO/cOO59zIlX1ld20hebd/p6Bwd5Nh7Fvotde74YHabcb6gNi+H3XSoD1wf+3crRHmHzn/UYj5X5SFS3PDYngg4xxa38clNpeMLjkefresJBzMOtF1O7mNq7CsvpW4Pvk33PCWgxVYFT5rMlYXQ9iK8FJ6t6ERKYFkxdj48txQtabV/aYTfPJ5tst8kRdUvMDh0w6Jb73wi6/ePP3fnQtMVl7hhuul2mG4pjmnx2I13rYkDY7I8cQPs/NL3lD5BLX85mykh4OgevwfFf+9BHHN8eMPsr3rfbilzB4R6ATfdSF9ZUyEQDwuemQK4kuECp5vtXYLVnOhudVi+w4ek1vfG2xbVK5ueapZ2Dj5V24q2zSneFF1WRN22SoM5uVCvU5gs6nwu7CIRQXMLBWZpeaFpIx01dplhSFOK3mAtLbPWRR45SYkcEKmtxWveDv7QZvcQ9aUq3JHxDrS0X40X5gWanEzpVnK51NHdaw0DoFBQkYrZtMKJ/Cb87i5r9YunVepjIQMMiEUbYFaCTJAcGFCaFpCuS5cUAKt3lKLwdp2Q+ZUyfY75u+sbTm4fbMw6ap6e0pRNvXYjOlYApr5cv0qWjasEhIZSSDsouq46ClQjW3sBtiRFDeNJl/f/53nO+xUZwRoH+7wjsWG/UC3iXEoBxHu8EomxnHcKQtzGe2e745Nnn3cmGl03AG5OymHwpA12eLyLWYIuylIFZR2vga82e2iVoE4omHVPsMGgmuFy47atm3S3XYlLc0vnmyqfLKjUqqZOXJMwsXYNa2945/+4cqn7VK2Tl5zCF6S48gOcZwhH5Sno/wGVuDaoe3Xp2ERQh+UjsT5RKYSN3JYdshKKAQdnOhAEHY97+fGYO3Joc3hCohCHH/mta3fK4VsDX32ypmVSi0DogdqKVlHfhR40Gswcd7kUsjNYnW/x49MpMy4zOobKPYWJMT4sAxKB2/j8rj8jYt/8XXnTTUuVYV5FAv1qhlDEQOICNQw3ArtwAcplUSiaRRxsWtZZMkEQroBluMmg1LPeJMy1aG+MG8+yjxbr6LbOfwLbkmhcd3Aams8p6h/GsN9jOHj97VXrMHDRA2BCcWuyhOPeGnLBLD7t031fKC/sK9sgb7TNyBFRc8/vf8vem3JTTzJuaez+3NkIpXLiGPIe6sm9h4Q2X/Zrs3X7h7+uf5zL3CHisd/l9dzwSM2lB0PzCnhlrN6T7zUW+AQ1K5XTnpP6rYs6AXEHkP9DXkFj3CUmaqmv0VwzTzC7eR0g6jD7xpr4noWLd/c8SoMYiHbc/3Xv29JfbOnHUFCS9uhuv7uxSFTykKLBGwsGfEPMeDRCMIBzwNz7QnV7JPX37RpQ+Azp1vXPqGNyskGWGhViu+yoyD13SXDYrhDwXlImYlpAHiF906cb7lZWwWqZ1ZHP2lkdpogQWg+d1+4eBltwU0Y6TBa862ORULZzWTVcq4SBwS5Zo3cV5MtRzYTO2snq+0eVHPLmdutBl/aGcJnJ99Lib1iRECXMDNGcMbL30Fnst6v/rr0TGafR0ICB6Cr+lbOsevgiMyRLMchsXfrjow1A5ORTDMRUUbAD6BTxZcapp0MApwZzXn+MfePwPvV+qe97++fhidu2EmokSgEASkdvHv3JI7VQfl0TQrcVXur17GCRBZypdBgcBkMXE5ozphLON2AoC+tF2BEh7eAjye7ApA+R4p2yA3yaAD5laxmS9Xba5StZ4v7kUGuaDvEhg+O/gymFy6Zzf6WddTEOI/InF8HHZtVLqDJOwYjVDHqBD70CdNa0D0qtK3mhipLtztQpqzD1XimlCr19XyBsPDqu80nu1LPrFkbv5L8UvtObYRmT0UhhCiVIVTHcP+e4jiKmPiSuQBjLuPYNv3KAEP0Ry86qodSn4R+dBZSZiQMaJG525iXQRishS0pJfA3fVE9hJVqTIAMRRGEDBHmo4ZrWQCKP3SodPfjAnPA+CC41iE+u0Lbn17epmz7yQnHOZ5GdBun6t7doHlCahIuix+lxtqyj2q/uNgkrEOkmsHZq4JBOFPDxHr8gFYxNP3ddM2FsxGWJbk4UiALWEbigrx4C8s1yVkRYSkID/0p/UYbWmSusG/w+64Utr8aMUMZ2QkMCm4O+mMLF7rMHUW7iU1iKzt8hJrdaLk832DKIP/BeFwqRsblvCMkxPo4uRl4s0b8MMzO4deejKuBq53w9zoj6nkTHIhZcVGD2W8NEgfkZ40U2+yqQti4n4m4DavzV77fFpmSLKH16QV5+GFi2astD5b+7jf/H3Kunu+ak8RPxthbj1Zf1qw6E1QOwHF6fu1RqXCU7LoGkQNRVQ8QnFxtzBTeOac7mVKhH9lReGNHktu4U9csvh/Psy4wSobc1UvkOEdTiECIdRPfFYsg4NVjhAEqcjxW8GUI+pTcpj74E/j51evfe3LPdZPRZ/jsyk+uXjr7cmj36n/K31AyBcU7mnvNHEkjIY4c2KjiDtjPeuwoLmmCIBFkhD3A5n9a2IdLi+XbD3fGHioOudl0YI9/h/+Wu0Byen+ud1fgU5PyiWyqjy4xblciiZIbpDtW4oWynX+MGqpIvv1Y1brzTcK1VyaHIQt40wRmFTWM8qcvuAMrS+cUbXFX8a2Htuk+urSD4580yYalkaA1u3LAdficBF2OgeYH5q3UN9ntbODw9I7KebXZdIUfwySP1w1wQYch/GrMh7rtVnOKzUftnmzX10P+aMucToiKZemxjO329EwZ/BwMd5AwEhnCPZPoBo/8zhVLxsjHkmw+UHrWGJnCH47BGqiO7HttPd+ix+hoCehV4+UB/aTjBP4Kiy+dkNjqrEcL8v9SCIEoiSGCQETAnE7hEUlWLS++IQUDb2UCBZ9+Y0TnxzSUgkzge4CYZKMCNy5MQwERJI/iCKMRLxQNKIMS1PWbqRO2iLflvmdZx5qXgJpFD4poM0luBBhghJAI0SJ4YBzZjT36bz1BxMIiCWIHbzQSoxEoQFvpmRZhs4gsCfutKgGhDyZwTpsWzH3+Gl4o+8EO/Z+PbR3NbdKUt4+scnDQ1Njmfzm4NIriskIk/dlI9G+9GVrfA988c4iYdtQNOF13sc1SMUIO6mv6g2AvRmQbV8tyQiYDRT9Wiurk+wQu5enoTCAYVzHqt0Wftw34Ng6oqquqYt5vbu3Jug3L8OKo0dROjDt44Sgq3OEuujVRYI1RuGoRVyPnVBZbh3CIaukKw4yvUFnvcBjRjrK5A78dfln/tK++TG2LBR9GDTcq5kKOtnCRRDkH65AqpSUkWWC559h6Z+auSqEqhE092eO6NCYf4FBLnSqLImqW6XahelKig1aPI3qVOay1H4Ma38Uj8YP6Pf7AE/vqHui1jkXaTIIVOZYeeSiXXo8EM+e2k2JsrcvVbnYiByFlSM4GOmUi93I3qM0tg1cqCw+iroPZOu1CoV689F3hcraU8jwXrYQNQ4HHWzvLJrwKJXWw90qvyT2gNgwjJPUANA9aEpb4URbp1xQUh/4n4RqrOmMx9hrNT5uiWW2JX64t9OPYizvZ1tta55KbjNZ5ux+Kp6mCZVbjHFD8uAahSp77qIdvbp8fkawlWOYdqDit15D1AkdgGCJrnhapx9IZPYZhKXXlYDnMoTACObsNoaNcbXXcJv7nrS7TpV1T0jqNVeAAFoxcABlPHXT/yXvxLKImrA94owubwyZz5OcMbdN+rdSc8cgcCCWJbYFPhZ0QreBcD718qZnfv9dc3CCbE1MtjN8PybSeh5EVvPV2bNIoThve+20fhBBJY+JvX/Brdl6oIIfZ7+K5lRsnnVSWmnkICfN6kxubw363IveQPTDsY0DcQ1Q0+Q6OAIBg8l2EY3qPyLKdutczQo4nHxSZCCD09oA5fzsTdxA9PPI30Fqz8vbNYU3DcK1dRRGguuKaGKt10+9zy1A1qRPMT0qkBJeE4hTZkZEWFCdwor7YKFLpTNinqiNKtUPEVlhaNpOzByO3ftB6A/AnDc2Uz8gSCa5VA5jnfGhIVh3GglsQad0sDX2dOBNxO2B1DIcUKsn1uhOXojwogg6pknEDjnVH4JDIBfsBwwwKnAN5nZyrK+HQIWvKA7ygwzm0LsIMcMJqXHfrMm5j9f5TyQElMIlHPdGetXUNlPzeGSceyN0k8EvNxmPU/o6Bv62s9SK3FCYsVxMGZ4/mh0FGyec10krY5GgMVtbqMQEzFHfACudQBXHG0cliicLXveJGndObgcYl34p+YS8Qbx2T7qpHx5WR/35ZGUtLETyDzTXhX4dzuiF7O60mWhoZBAFGIAGbvMln8UFPVlX+sLE5mvVumdy57mt99NlIUmI+5ru1JzWx5ztCljUbPJFKjxSAdBLCE0ZTzwgRKu15lciqBc1I9DKxBq6EWkp7Xx29bVzsuR7GFnTvIO90qEDTFSQgarbGkE6MECJMsK0eAYQUrAKRZglhAGIIhmUbjjNmLTQO22nMZI43PtllFU8GXvCfmfxS/jBYHUu5gQjFtJEaPNgbiYRv84iXwz4KD5HQGtmlMoHBId9AWddO/w+PIKCRTSB6mRZk7DHssVvFxHKjpxfoGWOvR/T5xJiqBFdzEwt9t7S4d1Wv5Hi/MO/mJxfUOxHvoAIXlrUQ4VsxPef7+R72vSC/BOWYjo3XVpUaDUPBkZlFVs1mbif8jOucPYQQEoOY2omez20orUbEPVx0z1draXfHVlS8WkFVXF7g3vYfXB5++uDzDySv9JS5VIwJi0EJExOfny0qhUNS/8a88Mi0YEhYPK1lh2qCADnk2rsStnM3BuZ31CwyGF+OEWYrCxQJlFBrw0mDAAcgmu5RIlG595neU/biJYgQEkIH7+uMwdvucBj+4Em/j5cvs43Z3jjxZzE9lVn68snm6nqITjaTv2aX37J+rpTYOn/FwpM/tY760qkWHzPDcsTrLfhNo+p7QJIYijFPrFd8RWTTub17vb8sWmzNmj+hM3V9NW/GLSwhL19h2kRMVsoi/A7MfYbXLPfVCl5XakVdW8COIeymShXhvHz8TS0IiAvU6c8tIvi6WqBPHrocu7bfYJxWFCYjjqjiaIAJUOTcMpaxP/2OkRzm3Cf7q60l2F6Ztf6QUIlr7N0MvG14G/SkULn2DiGU5f4ef2Hb2uGvLzqJQnF/77MsMBZ72GtvXfvak+cnB4xcO9lUPLp1NV+/FGYhCmprMFw2ta1TMJfxPFlNStJZobwNLsuQgmPvvrz/zLFrP79/2FT2fy9vvn54bMh8i0TlFu8oCJd5p8Y8PgwlKBnc9MrLBW0Q9yPUJZUEoVxupCTV6TLdRs11ikHuivSJuJKXu4AW23RtMB5BDkSFSE/uhodUaJXJV4h7E2b/My/fIR+nGnSaC6okSJX0TrBe39Gy/6bjPa2f7j6dxyXDf7TYz1xT2HIwsfxzs9/NjfuzLfrLHpNv8OI1zihEKH72Lw7AWjEgstxSwK1yj9jLeOgJg17zful1RH4SmRi8c61e/HH7IC9YBPyQJPq8q3IxAQkAe5Ax8jVHw1CEl5GOBZZLgeakgDoGus5qhmkjHVgn3f/CEf+3LKBO3xtt8O6K6c2RU5n/Xrwndoq+hHbzwG6KP+UVJYyDocWL99e/aO6Oox2a+88FyaJ5ubwNRi7A9nqiGx9efued7qD+/yWvvmzOjwWcmTPrnnq0cF3rOyM11gc9/F55fKRfeoF0xh4YT2MNcifD4p4JrOzCgXwBZqm6/5XmUT/kUfXK3P37cqNQmqid1/lk6OCRKsAcd33VeeTuI5nRvQwkN9SUjb+/2LjSYKiTHhCJ2hsZRS1vPBjs+OiNponEj+d2Fvbnqu5r2zz1Hjfn/D9Pret5vXHrF4N5oqf1Ub7s6MiHO6/OtKU9Wcvid05t32PH2Jbfmm/wZsLhggSYt2q90GGnVQa63Go81ZK2FdCqXhtXWFC6YPz0/zg77z9KZ3gaVpS2vyuGuweuKZFL9y0AxafHhh/mNz9cKBhm7GfYMgnFuTDDAK5B/MG9HpSDruAvVu6UTgVL2mdg4Tn0Tneo4cbba0SyjcwYCUflEuoBAAYQkansaYN5MD9EOKPMpVKyBDnbowIYU4/N4nip1OsWCgK7+zbXJ+57+tT2igLPHxqHaO1re8OCCsW2YjmUh0157tMnvzInuen04S7kb5cxz9sMsjH9j8lzbUYVqZLVWRtuJbbnG+bJZtxcGjfcPF4CIevf9ONpQ8rOJ9zViX2/e0D6cvSrzmMvyNuOR74vhHtfHF1cgmOCydrMyTGn03ryGStXOGhZZ2nPXxI3ztip1anqATQYia332B+v/ddHKBT3B6jq7sGx5CaDjeiAB+TcXP1ipRV7dilEJQ6CkOhU5Zw7jh+3n3AI6r3UkClJ7IAT002mrvsfvwe+v/r7Cy+czak5lPPs9ugmZbU8XFYpeIytYMHwJyBwvmPvf7crYh4N6TtFTm2DaNQkTil0mk2kEc95IIexKIG7wxGTerJZUiCGvVokRkTS+Fj64L/ffGjv1T6oT+sbDajUgWzdWPbVKga7LwY6VWPeRij8CuWtLuVaBEcGe5jMFUO/H4kQKcPKCLh6ZkP8wqPvfTYFS7ac2BCz+PBiB0P4PjmvIbDswM0uNAcyqEPVrahI0oFs4tQ6x50Yv0bwRdxEe/XGAcYvmMQr3BapeIw1BirRLqv3+s/yDOqES8aYElIvSbpsVxuhajVOyKbY3BEo8Uy0oimvBHsBjjKgdaJbkOaFjMIlIaIaOS8D2RpGyjBhGZg7pY1RljtzWzjP27E4Ou/AYq429H9DKldUhC5hToIzCDp5aix4odcobKau/tLQ7uZ2QtDF/Y/3K5lXh8rm1nwxDfSNwuSe6NCm7z9QO6er7uWWlkOzPxPbalF785KX91189LmqZx/ZOROyLwxxU0O24ErGLeIrO3ek0y3Lmz2Q27fsp0XCr6BRRy6Jn2dorbK03OPhcrx3aNU42fp0oVRYUvLN0mKbKoeHU6/vqeysiZJ55ubof/tWAJwXH755y9ojh88r5LGufxz56vITv5VWGC81Kv9eVG9o34KgFA1PerRtoKf//AsH1+xabTjgOInR86Ftn3iTnsmhLAtPy7Ij6+3ALNRagFiwuPPHv1m1fZ9RcWLuPoweAft/5rmYXU6Mt1rJQaRcWxYqJfo4O/zmx2VfVPCgbvz1rxn3jzWdBbG2n8Pg76hQu2NywfzNN297z1wQYQNWkhQ8ecvkdsTlleGPBisf1bZoLdgTjT0/mDFE5tSflbr8L91QZOKXTM+7Zbmv6HJv3ZCRe6htAIH//uEd7IxbnDBJW1u/w27Wlzc3t0i0ynHVvtXVNBho8NXs+9EfXZotk1+U0Gg+lQYFXCZoOxrk1cyVgC0tLA2tOvu2HQdEcOD5z8senGAchMJfC2KZNdlI+OzjDF5Osgav9m+djfe0YktiuJZMUuc8klDfKfXd/2+19xebUsG/Zp+O1sfXjWsUHXLZNx9a+rECI543ol4H3gOEYPKUAvPoutvjFN6U5qxD/nvkjUTNwU1IJxARNqHBcQQ8byKJxKKtH5WzC0i67J9Ob2L6PkAo0ymK/F0DBjb9ubGM7hMjYd27pVfkbtIGq78B/vM9eed8Z0/mV63bELCarFb17uzeCNUkcbPOQ5a5cuLNoa2dQyKGhTyv2NTFX0TzhQ6E1/P5r1fs6uioKXA4JxyYi4YX8reSe3+97N4Nanmqh4O82Zh1y/PvPldT+CwabX/hTMPJwc5v7I9ApC9nn1aW3xxYcrBsydkZy56IO77uyYRWvqGlAHVMttD24JajVBhd5qzoueLMc3U/YRpo7Hhc3Sf8DBfSfefFsZ4eP5QO/9Jd12uoeKf4wJJINkKRUos6x5OtsSsi0wgXMNOcEDHQrCwE3lfNyPeV7C17n8jMPZhZQ3/oylQWG1oNQ5/qRqcYcWBtnK3ef6OHyHi77hvXp1XuYrM/vo0J9Y/fX5VU7e0BEL5JemzFyUOWqP49G7Y4oicLvnTZ2HxS+BxE3I2Xu59wO+cWFuwMBZdHDThdRRw1kkFwU4fg77DeIKOlwCg5eryDGx+uY65KqOdhMcMkmTYXOHBy+vKgsMQsj7gbnciQIokYRnpgXofP5afVpYKcrMQGDojv97Wcjt3rXRbv6/9c3ue8gOjH2np2wd5DWNxoYmDbxAHXbt5c8CS8d06UwT4p5jRunjR7CDUPBn/ltfZkf3VeLysV5qs9wnKO71XmlSfO8oehXxP4ooH+kgW0CpkkuVihEHAGBGvx9OVKhyETkDwOI1zisf19/vHbeicSndh89qVMuAXCTcbLevNywl2+4qEWhtgck8TCqL2UBqBqQxSV4BPytIXYQNmK3K0fnH47tZMbnKDZNuPJmx8POiwTDmECCudljiBP9/NFZltcKo2Qxq/h9CXfN6N7d1tHpAwSRXgKXq3L4TU4ngUNYFRBgWnIAW4mnuUCT6SIpkHq6XPE9RlKCXAAtMGha8u76jrl5s6gud4A01xH4YBN5gMojmfUACstW8M2F+GRRYny+3iuAEHMI+eqC+R9u/+g16sy2YabYMBj+ey2jy8syqwoJVRQC8royRPZ8ZTgInYT3wVNvSJCAcShoCpTBKv44HafRYalQT/rsbUX0fuJVJxy5CE8sjQoz/yueEf0VzDpQzJw2KYfm3Y6//fhvafe1m4Ei4Gu4uwjQHOgWD/UbZqmAwlm9btltxpV7ZqdHO0nyB8ZBwo8GufgqPjQGliIyAKCYJtNhC3IeZi53COEiXGuBoCIKvffLpPxrz2fvD3++54rEZjk/ZfxqUMORq7EUYQSXpAkPTP9ao4cR1a28SZ6SdDFzCPEIGwmJj2bO4vf7xGXjP5EoTDtAw4Xd4l7by089P0V+uvfNnLNOMc4J8lbjIqqVHvWXuyViy+Df732/2vY77VDg4+G9Yktfla3LDD/85m2Nb1//2AzyKB3I50RZbKLpReXFJei0DWwdDHq9rkw3BvBiznFg0ynXvnr8p+2/eEt9ruQaeKsgqUUYNGk1pbeks3pTGXxqZii6CkvBN5r4oQ2CiQAoM4A6vFEdNASHs1GTrHpWwM+Cfyy/UXmWzXgq0qf2y4dmH5JilBxjdBX63/fzuVe0+vflU9YcJ5rGbV6hvbnDxsEk/7ejgwvQXqDw2xCRCQov3Xy/tHkT7M9hkULuPkuTIj0G1O3cf4cvlONmRepokwZMhbgZYP53XffrZS1Ftzn9cGeZbZtO3JrPw6ewySchz9TwR5f3nY7ZLD4vl7Mz3M0pI9/3O5b2GPznfUJwDu1Y/Qg6Dlh/0rmUm0XY6EewPa3qv6K/F/86PGN4luX/kK65wP5Zryb4L0IX46/V2rvJUX5vsSXph6SXIGFPSiDl0HQqod/mWOKBjM65YU9o12mmfFpqPJon7Fh26V3n35CoYja1dTPi/pusGULi9TxtXNDToHt/WX9ov8gMcLo8SVg0eBF7ttywRxFalbjYh54M981VSXiPU6afHrKmC0CQm9kh189YiTv5A0MwYwjPvPF6IrWrsF9DP3Z26PkAymEh4TLCz2bPJcW2DdlXtQfXAZhZyJgWpG3xLcVrz5bd8/EoX+3hro0EPgyter1Lnpe2zudJqDUsDMQ94Hx8whwjs+zSwRwOtWeeub+isYfeYD5OJbJXq/PvPiWoMGO6/exT1jERjZ47No30GBSHSQfdtcXc+vLTLuSkXq0QMj7LLf7apnRBZ0qXBUnkUZbS4WrqAySkqVElUrxgzgxx6XBrV7jHfES37XvFty0rtQh/punuI6gVPR8y6te/k5rsr2jLgpRXIfVb2yKcMIURhOaQYJOQpFEEm1tBDOPJ0LQVeTxiQcjAkogGKQh4q4VaIsd51NTiCvhP27c8fTN0V+h3X9S/LfBXV/rFKUBDi1Wgu4a8CY/rd1wtuPXILS5/+rzgQgzElkyMFlHDAGmIahBVk0IAxTaZK07aJ0U/ff5uJvHLv/Lsgr2BGcCrd8f/OWgZI2Xm6UrE1CFyANuVmMKgFaj0GOQalDAD4lQxLF3ISxm1naL2WJHJnOeYOCGLQJqLUdvFAhPG4StG430XY70+qAqIu4itK7lQiejP+bMML+81o9VBIs4N5f+GbTkwhHO34HVXcNxR89C0wqcXFxfAElSRowAPoQaSjCcmnGZ/8gNRcN9c6RPd+1d9CFwyz1ZRgCEkIsQgCFAXEyWQxoClBB4AMJJEoWQIF3zQx6fRxRFYIktxgjjBCLjanJoVM8wU/DB6L5NH7CwAH6ps4f0R+h8ve61Uw+M7jqxp24OJB7sVLljnMXY51vpjDyM17vm1th63+l3GWfQUkczrUWCM1Oxf9CzrLXS6rvLNT3nSL3VfaMqGXqFFo7nk7wwTg1yxAhnwQ+feD7cIY0c1Oo2BGp+MSuxx44q7Yt6C391eyaiAS3laU56/KiFymyzJJ/v2YnSuh3KUM96BE9/lrV49V6GQvuzFvNb2unpnz540zvpUyZURYPdkTccdXKNBIuzulBrLyuuV5xx6Zc5vu5p8PDwcxAktQDurKheirYutkr8K6dBtKIXS4yGtL3wl0ycZ6tce2nQ1lo/o/SpjTMhw4SZLhCcIw4FQmlLs9tE0K2ceeKHL98jjoql30Fw6UDPt66DDi+nt/R7eyp9xzQBBPctRETOFIHVv/B/J5lcQV7jJgM1Rw865KvxT1n7XAk3V3b4vWLBbWfRq5nK032FXXrJFvWkdOXLJ0VxtGbPndJ/Z1pxV/Lb59dsb2kYfPjcqtJUjfZgJHXz+p8zs2uJzlx41Rd/3cvRBxdxHx4ucun/t9NgHBO2Rt9yFpPl1o1pdCKSGVv8QR37zaaclzmMW2KxFpF8sHCZrDNmMAir0Cdc5adXPPumJzfeMXAksTDyi894ZnCZfnujIDnqhG8/xa/PjX6Y7a3cevDGoXb9m7Ee04FlRfS2Zk5IsON/8axo3vi4wG++eiG+W3hepQu+sd7eNd9f3DzwBUVnb1uy1/dGc/LMpGnHSzUhOUPWQxj0HN0xVMl/tHQZdzo1j+OU1hqQOj0W/Kawq6/ytPZw/a0G8Vv5vStIYKT65cauL3ZWKz/u+SFuW8IlzXlLLYcT/qkpephIliWeJRGgnTj+fdvtld2KYej2h801tMVieSNugfDKhrfzbwGvTAXluRECbhtcaBZw2yUIquaneAL+GL1Euf/7TIwQuIQvGVzwFE/hf34WnCEI6I0fvfKzNUttDV+B9gBw8f+ElwKzu7j9VHCE//dbEgLc/X/ORA+RgvL/WV1fe/1BGgAA7QCQQ98U1bwtzrv7tKx20pRSm5wz591pn5/pEPHQdqfziuCEUbboW9HvaXD/Atadn1pkzFxvuN30l/XzxLeWvLUb8tjdB/ZisyE+JsfAGr8L6Y0Aeqn3r5Pwg7gCAGYnvwgAnAAAWeC2d/EpDJC+vVqBPYzO3KixWSV8m813AQabAQBQCOyLkIS8ABGrMGK1GpHQbEHOVrcjpUVEgZKok6ratAE0qcku/B6w52l8C4TzNr4NrPnG8H0glvDITzqQV04QFRZRy2OhTTaYl202Zp3Vxo0asEKlhaBtg43WCYwe+UqbJ47aJKJSGI59pkR23kD3dgEf61WgBVxteRj92k251Xtv8MTSsbDXs59jM3DVhqlx0vgWVKysVQcm1o+ujatjaqKSuuJYdEsxva2mOq5uNZbuBXlUTK47cqeWpqSBnpbR1STk2R7mhuW8La6aUi035uqrWNPlyo1k1hsnVm7YtLIKlscmqOROUd+8Y4VnbmwdHj1s3zdWX11rUkyNe3J74PIVE6M0ladyXGvbtHH1oibXFnJH7+RmSmjTIrquNTki+O/qCfz9dgALYoD5A8FQcUlpWXlFZVXcvy6qV8dq4rV19YmGOenWtvaOzrnz5nd1L1iY6enty/YPDA4NjyxaHAQJ5CCFPBSgARqhCZqhBVqhDYqQQYmuXLdt46qINT7Kb75hdTicDIuaaDgMEYiCDtUQgxqIQy3UsZOiaBs75qR/C4DDGz+NAwAAAA==) format('woff2'), - url(data:application/font-woff;charset=utf-8;base64,d09GRgABAAAAAHo4AA8AAAAA4KwAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAABGRlRNAAABWAAAABwAAAAcgxtSpEdERUYAAAF0AAAAHAAAAB4AJwBNT1MvMgAAAZAAAABJAAAAYHJYlnpjbWFwAAAB3AAAAKMAAAF6K26sXGN2dCAAAAKAAAAABAAAAAQARAURZ2FzcAAAAoQAAAAIAAAACAAAABBnbHlmAAACjAAAc9cAANc4BKegHmhlYWQAAHZkAAAAMgAAADYR5QgpaGhlYQAAdpgAAAAdAAAAJAuBBZ1obXR4AAB2uAAAAKwAAAEcKkRAzmxvY2EAAHdkAAAAewAAAJDMYwHYbWF4cAAAd+AAAAAfAAAAIACfAnZuYW1lAAB4AAAAAXAAAALsHaNuI3Bvc3QAAHlwAAAAvQAAATbMg4Xgd2ViZgAAejAAAAAGAAAABto4WnAAAAABAAAAANXulPUAAAAA1pYy9wAAAADWloq3eNpjYGRgYOABYjEgZmJgBEI3IGYB8xgABqAAdXjaY2Bh8WWcwMDKwMJqzHKWgYFhFoRmOsuQxpQG5AOl4ICRAQmEeof7MRxgUFD9w5b2D6iS9RfDMpgaxi9Me4CUAgMjAGYvDc8AAAB42mNgYGBmgGAZBkYGECgB8hjBfBaGCCAtxCAAFGFiUGCIYqhiWKDApaCvEK/65/9/oJwCgyNDIlCMASb2//H/w//3/p/xwPKB6P1nt7ygZqIBRjYGuAQjE5BgQlcAcRJewMLKxs7BycXNw8vHLyAoJCwiKiYuISklLSMLkZeTV1BUUlZRVVPX0NTS1tHV0zcwNDI2MTUzZ6AusCBLFwCa8x6LAABEBREAAQAB//8AD3jaVL1prGXrdh1UX/+tvtlr7ebsfbp96uxd7am6p7333VtVr/N18+xnJ35+RGmMsRzHcQgoOA3BpEGRpUQ0EokUIoTAilAQDkGyFQkhAhFCCVJEEixBkBNEkFB+EBEU8QOJCN5ljPmtfd7Lrbp1ztln79V+c8wx5xxzrif6ydefPNE/437iiXkSnlz9qnry5rNfC/bJP7r+Ve/+589+zWh8++RXDV92fPnXglf/72e/pvj6TXfeXZ5351/XZ995qv7cd37O/cQ/+Ytft3/zyZMnRo1f/FXz4+6vqF9U/4n6O0+eqHnw81Ar+auDT38Wg59+GodTNR/86Oen6joMfOFELeb4Db8sThy+2+/2Vxp/73b3d/e7h/s9/r/d3eFffoOfb/nileJv7/Bnz33UClsdFtz9iG/8sRqwS24ZO65N8DtuBn/eq/3t/cNn6o3avVcP2AZev7t9uN3v7m75ljv58b26u5c9v1c7/ubhFoewl4PBBrb7Wx6kkn/eq3eK73vAod1xg7KT6Z+79+qau72WV+6xG3zyM3wQp7nlFrnx++vFzfXdlh+Yj8PN9YJXcPdGbUePazLgsqhFujzqZi5feKYLXMK5u5erhYPGFeKFuMC1HWqNT57g7G+u72/mOMUrg41f3t/xkHHib/Q9vgy+UdgB/sqNkMt3c403mh8zRimtjLan3jini8zmtvPWKeWdLoscr2mljTbGl8ppVyvdjtocK28sPmfK67PGNpXzThnHLRU219pabFIp55yxUSv8zirecK+qRrtgHN8RjLKmKqLXNtja9meucNlswEa1w0eVitpqi9/iP5NZ7bEh6/iaUkFHj8NyGX7AAVp71Bypl3nX4Qel8SuV29Ka3BVqoVShG7xLB12Ouow3OlgT8CalrI54O7ZsrOGu1LPV9/lM+1w7i7NwJvD61Nod2SaY1kYfrapL7LQJ9jt/ora5x5FGbMGXVVtrGx0uU9/7WW0Kk+PDhtdEqXedznhZNK4nLg12i0vKy65zb7UpmtxZ+6QsVBvsad1+5k07upW1gyv92m8W1bu3xduj+bHHpYi5ttjs+vnFZxtnYRE4yKa19bEKNjj81PZ1b+rcWo+zm2lbhrzNBlu27mQWzkO2s0U0tYlj7Zcme6GHJlPV2fDluH1xqmOMtXoRFld+Y47rqGPm/SwP1RG3jKuPPcyOZittW4WLiNuKY8E9wn0uVLMpscCO6tJXuPgGt9Z4ZVcWe89MdO1z9Y0wxMK1M9PZplC+afMb9b5obmwZM5XhrHBuuc1wTTKnF2pjTYM/OsdVs1h6R7hUefDW9AY3AguWVxD3pML60q/XJ1p/K9riyPGq42LFoJ3hBS94n3Ff7aiXPix1qU9r8yu1Gzemxw5yX6jofe5qG3HTtZ+pZxfxFB/2GZajN6b6fGiwHVfXWNG4BVijuYkhC3uzLjehLMo8qHBevQNUPlHqG1/8T+bPuZ9Xv4EftjDxxQAk2Y4w8cU1DF9g6gLASbs8QOcohinfwN5PVA3bDx6WT5i4uZ7fzIFHhMH7G0HSByDJ7lYsnQj5cLu4BnYBiPAaAOqdkq+32GXa6DAnyACi+WF8maedETcVvxcYIZp77koDHW4AwMC66zvC113a8P07fXslCJdQdnebsO7hfgS4ELNk2+IECDeLOQF2zwO93d8R3O/SQb5XhCiAmezjVjD2DrslLONE74iq+Ag+wb/Y5e2JlhPAeUy7wfUZPiieKU4Svx2SB8Imrh9uAM83hOIJwN/gOg43PNQdruc1z4cX8547lgMjQO/ly5W+uFIXdDd0EPQiuK47gLgcpf69u2WOpQ+UAXp4opHvVO/dcejyPFNLGH7msY6N87iWnoAUfY5lqWFMAC6sWAAS/sWSdgpYplr+owpLlCJE4GX5hfdERwXowJKs8ElBEGyGmFVGPYd1zIuhaFSYY5nazJYKMBOJDtbDNINJ6KaN17B/Y1xYcCt49cSZWXZ2ii1rAD2sx2msdQ8Y9wBwIK8yRN4c7w34AijBd7AOfrx2eW5dFQGvdBSO8GcAOA7YgY0DErXJcPhKy9lr/vE8Hs/DwZkqswmm82Zmy1LZjy28jIFFAUdwGiqGnfv5ZfUWJ6Fmed7EOFZKV7pSpe5MqPsuvnXz1csqX56cxOMcCNAAeOGcXO4WT7dNLHhx8Wlcvd4qwEmlWlfB6Da4xnUIcsi8kLXCEecz27giOhyyacJyb7Mu25/OF4vXxZdsP5tt9tH0MbwtXgUXa226aNvKLmI4y5s5jrbGavAxK+G87Mro9TqGUdkr1x+HJuJy6tLOAIldr02Pj2Mvvsx/PNtsTr663pZO10tXPSsXY+t1UPBzIarRl7rAgdPZWeN9yHTehys198dl6cxKtzt8LGt+RxzdcVFmXh2ZprQ/4+x7A1fIm4e14GwXrMpXz16cVcWm64PTMbQ8T1xmU2GNajoPeGGHq6RyM/evjf9y+BJvUo1b62Oh88wR6JUNurWncbysuriyema8s6MzGdbUGk5DxVLRJYfzvuwFA3/HF3/efO5+Wf0ifrjdJcZ1TwuFEZH6EZMCeMSCBj3Cogkg+93FlqZNeiNYBKoyJFIksDUkfBEArDXsH+8ZyQflzcQ0wbADCglfNAIL+AR+Jzzo8PM8/XsvHFMYoPyT0IloQE7I74XrvVcfgJNeaK/HwY7puHBEI48C4IKfZBuPf67c3S7RQVAxoba4DAKtaWdkkFcglbcCr2SN91f6jbq9Jp3jkcj+06ZUIsvAIVyai90tibPsS6gnNi4AS8QCmumfsuRAtGL44M65Jjy9tCRxeD2z1nFRAcCsd7FvVkpHOGqVw9ZhoAF+DhhCUHLJfMlVYICWTAbfRcACOSK+BWwUoGcqUyRhWFawrJCZE37A3I7zcw1md24rbBJ7B+5hJQEYKiJUARJgbeuADNhHjHC8LgpA4Ni+r33RfVyV+NjYAzUHw3dpIT5W/vInHBMoIM6GnIqsEYgayRdbvo2OP/fhhVYr7FoHWO/a93q0qi2UOzUleB7eFkg6SD0UbUBzU+7ndIULhe0aksTKWWFq5dMWVAiQ2WgCS1YWMC/VAXmBJrk/rto32TMbKxvmMRxZkFOvswieA/MFOtmRjMzvgTsLb4506GyXt7bwunLNxXh7VNnZiB3lZ+X52ek6bG2ZW1CY3AXQoU2sDTATjM0ORRjDZTgxfoFtESMqvZrXX7XwCDYa63M7DufwSGWJT/Y+r5vmo3f1wmQlyUsGUlG2+VKPAVf1JIDFgM/k3Ar8WUlGbOhocDML2y3deIY7i58ysOnrqvyt521+utJz9QxvxkJp78vlsqejK4HgQIRSZX01m1eAr9GoqnRd6w34uOa1JMCTWxd6iDM7+g3sF47GC2968utf/OfmN/kn6t/FDzCGhTh6+nUEQnNEONf314yFDjGSmOIo7Gme6Iu/SGETTJ2AEIgLggkS9yw2mrxkSG+lUZHGiNG/EzoiRgRCIETlzYGbjdwLg0s9TrGtYEyiTtwyY0Na9V0y9tuEJAxiBT2EhjDEvSXruCKhgNlP1ALMbf8g/Ao/7ME89oJ+Wxzexe6FoslzLxcCBFOoSh72QSU8wCvTJTnFJbsmSSNxeqOE0wiI6EOcig/tr3l03Bq418V2v9sipsW1eKMfhJsBgHmA97IFHOX+4Vo+rZ/MhpPVrFy1WAxkxQj2Clt5+Fx7Fo6ty4kU4OhYE7jLwAZbFhuszYK3XYNa2BaLKpI14fYTHrCJatFvc7glXcKbeBWTWYNnZKpHkIY1F+FhyI4YT9Jx1zlCQYBQl/lanz+vadmAuh6kggxFgU94ghaDMjg60H+VAtNcS0jaBi/hHbZtIhlA3vjIMA3hmRcMIs60M4aHJFE82LL1p+c25qV70c9caZ+2TYFQkCwCpwnXB4aXKYSava4Q9cV86EG9Qmm5uVLQAeAhZy1IR/dMwoV9RPe3ZyXIoytftC/PEXrPeAFmGY5Gt7rLi/tg1+3HatPcLytT2Sqz4EOnXh/b1VksqrmZfevt27BQ32x3mV8/+/FhBEYSCnXIs8o+O2tjGd80s5Pq6OHLxZHu7UUMTQPwgmeoyhf6pPp0jNWzt31x3GbmyLarvDgZjF6u/Los/QBvgfBosGbwy8XRmc+LUJp8zcBsGRGf2s5mleuq/rQAw8jOsvwownLqmT8+yfeZfl0UJRCzqbplMLVd48RiHqzn3a4Z1zleFJX+8TatLCwxRPi8d1kbliuT1VpF4PoQjyJvFL1V0IyjEYp1g13nYZzpsYVH6RhjYzvgrAjGgdo2lsJ5NTHKIjBFSMqrj8157syTd8aEP3/3i//B9O7Pql/GDymJdH8zh2NnDmvKXpFXNGq8SdB0oq7lK3Mr4wJUZJ6YCP8ZhYXME3iRMgj1IIMBIIGyHHIyQlvmsnFx8IjmiAspByWYxO+FQO23CQWY2wEkYUfHSnJszIqFwd+c6IkEAfa8REmJDNUIEu0b9ZkiDlz4tO37tIO9pNyIFkxvSayGP/iyl9Byf5sgi5/k8SU0k0gNRwmYkLNt1H5CyylZxi0zlrtNn0Bspz4VVHyYh4sAggMsOsASgYmItt1LqLefknT3tym2vdgiULvh/iQ3qP7PKtox9BrusS7GejAB5hiCLYIzIQcBMA4YFM3ZxQo2X9o5bv9GwWZhdZoMBXc8B/w4Lm1CkaRkZK0gRNHHeZ2vyzFrVTl6YEak/4VN8X9mB+BxEddhOfpZy2iH+SgtyTCCiMCRI8Mq8VtBwtVxLD3pVKbJvZwvTeaMNSGln8BAQspq6WQIiAOtqoRcYXOAqkxQzZqYwM0GgiVMxDbGLIOt7IsSIULW0nKwGUnFRfkunL81tsDlsb7NEXCZuuyOuosY4efhkXGZrESHxFjPSI9nGdxP2i5zYC3Yua0H2+Tzy6wEVOLsc9B87ciXqmW9d/P4dHOZn6h+3Q4/7D8qbIlt4Eh1Hd3qSHehXvzMGAdQjcyG4yzrYr5aLWI2MG2GcOujMtsCCuKoGlyVzjUtAj5r8iwUw8z0s3XeahdaUxYxbmbWfvqbfYmb5Nevi1fOtOAtlaHnyDvX3jb38Be48942bcAvF1eLwgBUChNC9J298LzNCAsrYgfYIb6Up9nKfVwPvoAfCBFnrEzh7aYqztzqWTZvX1uPaDUzTVHD/dtj28bWxPNnS4eP2wpbcMAP8KispZcZ2zK4kKte4SaDYWe4fbggvJPwTj+qfV3OPCh8v173EciYqTw3PX+PN2Od6UyCZu86H7i0wJvLwHjqd3/xd8zPuJ9Xv1Vy7xLuMD1M/jGXOIhmyCSSpOCZ0RmmHI/EKWQujIEIRn6/vTKIM5KB3x14gnpIqSNJ9/BXQnYAdn5ggJUyOgQsgo58I7gnv5kiJ5ANCXkm1nMr2xGwIKwQwBKwXZErfeAuJJnOXSfmQcDb305wI6miKUN/Ax4yB7+5T0ERozNBjofrD5rM8IO6/1TN76dsOYJKVgbmY5gfq7C7EEoF4kYQF8R+uCdhSkTQ/KSfwzDhl+mPcNUjzYLhDO8H7BAw4+GOxRphKpmqonre+czRqEsabTTApJTqsMSECCIiWQYx6ryEzylhNdgmVg12kjFL/axYXDYvz8va5Oap0VVOu6VzmtEl6ZTDAEcJMHsuDtMyl+xVf86NNjX2kvu1WxmBkWh6xlTg/yVsvcutGV2PY4T5KFIpsHl8LJLkRBAEpk0yX9qeGXX+5VdfCjvRpsBfnCRWNuAqNzBq83+cDfEyNmflc51ff7MN+ywE4NY8HOnhlR1nIXsVByz6zOVvN+9zRC2ES8Bw9DX42liPmSNbDD5bOmw/2qbKC70p8vcxf+ZWPmA3w61berfPgQdSlnB+PSyJQKGpXQOLuik+NzXAQZu2PEVY1PnZGoeR66WBhWrGu3GM/tpn2ZFBEKZAxHQOIpbh1Jt2aNc+Lor6bfjhnmkj/fpFMB94t3nvGpjaos5H/8wNMGLfZvlCFXlk6GIRS4Hk2tJc1DNnF7PaysVSUnCIumzoPzJTxDbqrIWD8IiMW107OCUXWQXBXXC5H47PJT/yZ774u+aX3E+rn5xiHS7NRl3TtF9KxWYUw9Ujk79csIvrh+vFNb9LdpLcIem55GgfbiTpcM9caAqPEhP47h8puPGDu4udT7legYjFAK4ypoDJH3LDTL0ATE5TLmV/Ze4ed3kw61SjExfPzOmVxuv+o0QD8Plh2tg4lyw3OE4CCkYu5BuSNpaksuR+hSi807e7hEqHuhs/cai0Sa55ogwTwvBX79Qe+8IxCTu5u5aMMfFBfx2wvIlrW8DVlxFmRv8tRoxAtYcDjozQWWViNsPj9o6tphGy+ITFb1khgpXndLylwT3VfpaNwGhvj9u+gY3B7rGxgpSbmJFz684BGjTWB+lFoLMdVic2COUQJoGVHZREJ/gcjI2pGIJFGchsjQKRYcRj5EsMz4qqYKAPdqzpymFXRCVgD1iEMlws5BlyBFiPHoaOn9pytIO1Kx5VyLxhKQO0GRtCQOJ+GP7f61lct9sIC3K1tcdMouiyVjkCLWY85Jjq3pZ1McdRel484KQCkIFWgVBE0zDsKrQ9HUEoFmftPdDneJxnLNIY+vjy+ctl9rPMLDSf4lK21xtbLrNP1q9clbdWn/a/9X5rWZ2pynicl50pwHbgD1vVgriYCPf+rPqmdZXJ8wJkICz1t+t41AOqtDlz93fdondHAL6i7MLnzeur3qkT1YUq6DDMNsvKx338ieI+qm5tz63pl3H47PjE56U6mhk44MGxohb31Z3ceJCCJU4Pjnz5DWeL1jZFGcjyKgSPHXDkeEFEx2X3HtdB0TE0MWtK1sh/P2x6dL9P/Zr6B+ofwa6lsiLhQSL/4rZ3t6k2s99JgnMuRXM/UfVUnBnEAJMp4rf7lB8gYd/Bg7JgcggPUuEkiHtOKVD5WHJswzyZdwouHgs/UiKnoxfnL+xcDEdq5JJXZIVmJ8USMvNDiADfLrHIlCfZTQUXVnlYfQGoIM654LthvruH2/spH3L3XuOML7i3tKF7Bhq3BzNmAYcwdnc7ZTnN3e3FDoGFJDdBU97wcu2k5H57sPub6/T9dUoN398QKBfzBKNjSu7ypOH+eVn228BLgXfdLOY388O7JO6qtbk/BB44rDe6NosTky4m3iWnuBgHuZMPb4HDd3tQi8U9UwUsgpPaEleOwNSZBqMRMpsOK3VSBWG6XevK2lD5slWvmzkMK5P8okW0UggqYC219PRGqjuwa2yxMK7Skizx2B5xyiHOkTJ7gG3V2cusimC8KelRMhtBVGHhR/xZiktgs5n1LKxECX3J6osmIIpmWl8AB16TWBiYysBvI/h0TAlXj60BHuvO3XNbPBLL6he37BgQKSech55Xw1YNMaSE3TOpxx9YotesrUdGTogxxBEG5nAMybjEK7QoXZ5U+drOVNUwP1I8Y3DimG8mo2J5SDJJlAGATjmmj6Ru5mCu4A5/QePwzSZ+u/w5NdvEZs06Ey88t+89D8++iouQX3lQ8MbheAzikzruP/Ivq1CESp/vV3Of55f9KpYgG6zbPH1/su6D9VleX7y8jG9NVuVa522Iy2x43q5cwXNGvOMRXIXh22uLcNH36UYAAk9eAICyuDoeEEoGphwMwyNcQOd9ONPtoq63c/KHuclZh7Fd2XzS/qiZH4ftsfanVpLsgbltc9Yc7d1JNc+5EyU5Wd4ChfWzcvapUb3tX/6sck2lLjI/3Sv4r7zX+3U+xt7rJaIjyZPFYNRAjUcWgNhS0Na801aiY6xW3FOmrnJeeywVrifea9EzpLBRM4tVgfCAGmUdCWSpalzmzDu3wM3NdQimUrG1HwX9/QUcj/rOP565syt7vkcEZGtcVUWJBTYv5HPmvrRm9eAMfssvlVDv5n0Lh1dGK5zpxRe/Yf4N9x+qf4gfJCETjjUTLm8UA51R4C7xe+LmiWK96MTgtYdrQUV1qE/DY/It1wdaFLZTqhmREbZGzASgeW73jUK0tjuA027iUItrJoau7B2xD3ACgCcMnzLzPFGvxxLUvHYESeB8AhXEIok+TSWdhwO0S0bmSk/AvhMqhQORFPYQDnki0rJRHMR08LfiBe4fUhH7QYpXKaK6P6RrHv8Hzt5eabx4t01FrymjnLLddFVSV/fJeTTCrJLfkEqcvqGaCwdwsdviMgUvKL2fBFMPey8KBQR8N1LtEm6aSmfp37t7yQNJFgsgn3JNn9LFkMTu05v1u2b0sZaFCCwC7cLS88fV0eg8FnkKqVgYZRGeeBCkbqWWWC6+LWdNAJkITK/YwNib+hRmDwuqZwKifGWOwcsLx+9It8i1EEQQLWC5rpRsCLMiJi8ETzUDQcv6rAqhZWQXZoDtY5ARqVxHMUOAhGsyEcMg6Ccxil3B9CarNKAzPNaS6RUEgIMqnl4OEj0iRlGmbYyUoExKYxMTB2zKs/ZrmPTxqRTnsRVSPRx7YQSgaYgGUJOT/4V6kI3QnAnueaQVRZwN6BKCMTLIlNTW4OKRp+yTACrn4aqZz90LfRMQCZP8wUNoAWAmk0Ai3an782VWL7OLWM9ZdlMk1WuGVkaSam25smenTf/DNquDS7BfSPmQ28opF9jCccQSNwIhEu6Xj6EG+4brsXRhuHoNKHUewYFB/9amPXaAsTwWVuoIscrUxpiqDyFr7EzuHeAZYewqe/ai+0mfndSXoOlNi0CwdDls8U2Nm2DK4/msUp2btcsKQfFwZNY1GOPzquqFD5cA39jVxQtmzw1ili7E93ZcFlnGIPmkH+u1PlGD7rx74ffaPjdDLEFUcd2auN4/u65PwXpNEdSr2aUvcSH65/2safNZW3uG9YWny+u06gByelYNw3mG5QBohdM3oYRPKWfGZiByS5FAeUqbLmsW6Jmt1klKQj8YY2ZMW1VrEOZzeNfWLyzTRjq4vLRdv5p18Tx80tUgKitfg3rYHhcRAYlleg0hz5IOtMI9ck+eaPUvffFfmB9xf0z9svpLQFaYOWtdPnHQIWW852F7EYaJQgqp9Qe5ENAAaCGZaEk6PeaJEgqmNNRiIovDVJZnSJkI4l3CJqmhTSpQpnzku1TEur2aquoTg552/MiUydGmQ0v5LYoIDvuRAHJKFk25aEae9yl/TdHRdxHy9n5/qMwD2HhgH9SFVPSS0IrvuvBJq3Q4MH5z+0HNT9UtVVrXpyJbuk96qfeGqJawj6D+hjnupOMiTk/QD7h8uE8FTfEO8E7DzfVUP0gM14Ij31+EFBHcm5uaNNBXfcwypqQRzAmOaUlDszCVMe3DsNBMAGE9cUMskckiWK4DG4Gbn6k4W+5jRgMbGdl4BEeSg6Ra0XqR+YFAFQiZDPWcnbGFsNSRXLBwOXHaSPGdWIEISuigyyJX9/2sHW9XjFofupz8mCRXseyONcygVdRDAGBBtaTmxLHpvJD8GvFxBkah57kZbK0rzwSVZ2L+0g6d611R1sRJha8AkUBCjojOlQJ0PEGbsug4IoVD2nR340wys0HYNaAzI6OBozl2FmFj851fx8aXIJ8kbParal4eVd1yo87Ws+gR9xo/nC1rkwRRBQi5x4F2vsqCWRm7wrXC2c0tVmuBq9HB7jOdZ51pSnMLRrdsLILZwsyK3G9MGzfZql6WOhZY1HFj+uNc+3IcbFKPgkmXbhkWIPW+aqvWd6aaO9zvZ93QOJ85f2lLW9Xure5w+K6qdLG56SP1EHA1iujhddVVw5hnuu0Qkbs2L7NltixLRLB0AMDdzMRV+7rpO5Z1g8FvYj43nRuP6mObV7gRsQ956E6LMu4obMBZzo7aQre4E2bmciuyNHBdIx5vtPBU9Rjd3i3B+XJfxMBkCg8Gl/w0tus2ANNx50abU7HhVQ2i7pq8Hmc/AJzLjuqKNZmI+0QnVnEZx/Y7v+7KzBbForXVJbHr3/vir5ifBXb9AfVLT55czqd6vEjK97vt7uFA3KSKtUuJJkkx6QtBskOWill0L5xRhJhJFTnJC2p94e9u3zuJZm93h/CbRXy8cKXvUqXqjZKgmqmyK/1BCRV6EGm7mDor8Qha7Z3EsEmASITxd/uLu4e7R/nRQtLYxLMpUhX6BVzYexEZkfcBHoaL6ySRGIdDiu9RJsVP3CQgRhTBvD9PK8lMJ6mkpCWo0R9rPf8uaAPDUsg7Js3UxGUF8YewxYdNmLAUGLnd1cr8ZMTi3cxHgg2CHtCIxhVBdIpOhag6daQ1lX5cIBTVWglDKb+W8MZn/bE1xVHzBq8gmlb1ORaJcyGQ2zFolUVK/Z/LVEZROQGMggIGdlK8MqIS2Efqc5SPvtW+NqxgRSas4DGxRN3xkSttLYfhaitBB0ibp848FrRnZXIHepfeo2xvRY85U7ZJMiHWgbxIIYV77dsbyoQQYoluwTlhcFmWzbABEjLKHGwutNJTGJ+cuNa/zc9JKGG8ODESsKFYuELrrC6fGkSPxp635iv58ryfxSMmFXJQJn1jvxzDZo5o67NN3vnCYGFaMj/bN3l81hxf1NHg5SNdBmCB81VlVoB90ZP6c48Qu7DxtH++WbQ7WxKSCxj67DgrrstvHdsAHu2iHRHOBlt/3PezIStbswDB8q61phnjh44RrIN30brGHuqyDT11ii1T4Vm+I7YtSlyHwHWgQxNPbd7N+irrpBbPtIZiOpzQRO04vAapLQix2paAAj3gHgAkfIj9jMuhi66w5WftsxBK0qAK5wgHkIEesrDn8jgeNa+Xq7xcME5lxTUGh3h08WL28Rx3rvZlBWdg8tr87h9CaM9sf5mVOFyJKd998ff0P3H/gfqD1CneI1A5VYG29HYM14vr4WKQUj0oUQIFSRrdiAD7/pBon+jQiZ7EQpPFEjcG4UbphVPl04amGAuUKVnaJEiai1xpYMRa60ZdHLKBF9tUpbsy3xPKUaUkGurHvyRL7yZ2pA8p/Hkq1E3i7pQjZMcMAkufOmtEcX1QAwB/bveH4DRRovdqd5AiTOKj/cU29djciprpbhumwv19Sh0KiyPu4WLiMl2nRJ+eZJbEX/VfbbvuuF7pOcmIkAmXZH2xg8XA+lcIyIYm9LrVKvOVZXKCqlbP8jpICKyLGW1YSiCLCHCICVM80y6OhCgydnFBdH1eYjRCBP2SbbQv8vNTyl69pLVMMWPzBBNZVmRqtH9iDgJQiZcYf+mexwD0kZIfuIbsMFdxXJndZtYZd7QSgYFxrc+kkkfGZYWEGOmXQYSALTcUh2uR6imAFNNpVvV5lZdACssoEyfADorcZF2V1esCpyP0Dg7eRDPAer55tINVAr7yRSsRrhJZuSkWFmEJs26Sp8uw6zwHRWtHEI1jb2PDq4GtYPdaagG5ZfOO8Z06fVkddcugu23D4KPzsfVjnPWILr52Wjb908pnQL9xnp2GfW4ajziYzLLKdWzKcG6WIV9VS71kjGJMXaza+uIu+1IG8GIjBmjiJvpFHqpmvnBr15ZsC8GtNxHBWBwRPFnXdGU09Sa/furHzum1b9WyzaqqOMX1yN3saN7VwRQgqm44jXYWinxpqlD5ilV+p6IrfVczQT/u3gxvQouVUMzOroJqQhztRs/mICEErAqrIyt38ypbtCdFl2tqFKhULQIBLoPxNnFDjPhXv/hvzF90/4o6xQ8XjEL8lqxgkgON8n9KQIf0I+zthm6YeiC4X9IKGuOxktDlEJosUp+FRCFT5PFBTZ42peuZrVF30iQnxT1pjZinhMpktmA679R1kgnjhYfvJQ1hFFxh68jNNTMsMEl2fn23dw2oYhM7wg4u/HfBZJIW8/+Ha3aXYCv8+eaDAkakJj0RV3r9eYP7kenavJ77ggsEvrJ2iGBtASYcSe+kmwo2HEjuuehSE4MzOZePNu28OHrTH1XgjqxlsdS1KAosQUp8Gn3Vn5+wwgYXRuUabrWDxWs3xNYyA8/cjUI4A4/govdB+8EO0kzBejs1N0wlF7rkzp2tY2NEGajZdcHQAM7d9XCANh+Nx8rKFKAJkfIqKxwphJyBidRClyQKmfvq8922tX9PgyUrVVQ2K7ImdDr+5otPXn0yK85XbuhnKi9iLDNQWIpajK7VEkQoxM2K5UgcTKG6aPvi+xe/q+byuGl9cTTk2XO4cV8X9eJu/DA89/Pydv75Sdc3+lWzOaHz0mzI0AF2d+R/cL74dv2c2Smju9JdvZoVn+jwzB4fdWV7ed32i+cMhcokL8pmLQVSUmQwpCMUKWa61zjSJHFg3Q7MBey8VK4iW3ODcnVJ6C3pXxmfUZugV1TPUVJu90ffZ9TXT7qn1NFp9We++Nv20v3b6pfUf8xM7QJeMFzTBqYvL9V2HMaNqhVcCQN0StL2UqoSN0kCC+eYEpGjNPwEKjrkl4vvbWQ6VLXvGLpPPktytXe3SVI7NZXKCmeOEz5QjFJYffKRbCgQlzxxXknobuibp3L5/WOO9k6idXpJMnkzuco7UclhdwNT0VcHdv+Yjd3ycKQb6z5x/wfZ1qGxQLO2P9X/afo4wXC7346NuhtE5TdQX0fScar2A3vDEueQr7xaFxIrXE9G/ZgdoVz6XgWqfnd329RwJkWxURR3lAk/3Mzxj20HBF/SCumogGNEXJH7uo3P29L5n0bozXsNYwBjLo2LLf0a1kouBW3q50zQi3xn3NnT0/6eRoH17UsfK6lVSxICXo52hP3MsFrZgBkYuFsWgjJqzfi+DGQ9MKMRy4fq0kegQR4LKc8LG5eSTi5dTvC9LJhHrlks48BWUvxpcSyekQVoL8JVvc4NuGrRLtQ6VqbCgdmTq03nWl0Fpk+5XYqA2a/lck+lMtDLwcL8gkIeWKmEIla9Xo2uqILtQ5apnrKcIxDj6jvfKL1NvAHHBTh7Oh8GX85ixPbgVv6ET7gjIpA6MN1sJbb3Bib2e2aDeWkKU1Ko0hyt+5f7by2Nyd5mq+VmaStcAhMj1TSigw7uyC+btdbrtsd1iWxHsGVfLPX5eb1VvgfJoJAxA09v7UDVgo77IW8i7lYwc+tHh0V22q/LijCcS6Ka1xPIlhe4UPCXCMvbFy6uLIKoronuJCLE+KwMA4KZUNl5YwqfWXebr31TmnMgo58rrCGsg6JsPjcvwkfdRVsUXB+gbfD1xy/Uy5hfZbo09SsXL+NM7pJeLtxTxFsLjaNuwLXsUMzB+rq8JYfoPAAS8ZzakKYwxrL+qlgN9sKHI90WJBpDrMHocFS2V5QjsB/lZwFnbcA10zMdjrq5dWPLkAPRxkI6sJLe93/94q+YP+3+uPrv8UMqJU+cm+IxycJNUfWQsObwZ3zsZ+efm0PY7feEEv76QspIUnzes11btLr71Lgd/MMtxa1sP7jdT5LfVH7hnuaTcPfQsJ2OSjBrkheLMIhlIQCL3qdEx93uIoERcWbqXn9IqQ4hD9skENQTs6hFkjxOlTAi3zyJdvapMP+ZYpFIuuWnRiOgnehq7ve7QwO9NDXdscSzP0wBEOGxbOb2jeA3kB9nTa0zX97t7x4S8u4udklsSExN4rz99wgHp1L/u+/RCEsbw/3U+yAFpH1qWOAp3ydKgxA55K42bWYEagzrRFpS5oj+Wh9XBejEqOebBiyx9GqO6LxWeqGrplhKeiKXIIKSHwkM2DwuFfMgVXkl2qBqDK2E8qkqykJTzZ5JkfFEZj0kgsDCZUV9tUKUrUn3GawwAcZeRTAOvjFmtW0amoIkb8E7yswmBRKjkTw5adMAt5q3vW1n0pPFtyZxgZGMKWlO5r0pwAvguBHZ40BKqtTY0aTTUTFNnIoIHiAbpJlKDgQvsc6DzZQN4Bb4NdN6rmLMG+deR5au3Mz1CKDtcTuvnQfaWsqo8U3Qtu/cL2p2WxTzsHJPKz2yyrVmsdvyJPFWoF8X2/bq1r0qfMDpZctq80P1V6MqpOXbe4b2ZWOPSwROzQj6hoMqLiute26lXYU44JjgQXIEL6dh7nDGdrWvXzY1whBYRMhaa066YVbbszjGxfykKn6LDdfu6tJvXrg5zgHYNSzds67NgPkuD50ZyuwMpx5cvXDBqptYX0Rg4h5+4qE5OyZqqKwyr2P1amsRoTRmmPWqzeqP2nWsHdNPfexCLNpuLPSiXJ9xdIGliMPHLNrn1y5ch7YVomWfOTVma1C4+WpYMKFTj/r85MXTm9K8bsrbl8zOzhaqanMQQw2XAgpdq67zLKR7xob9MS5TI64GER7eUdsZ1oPHMfLFjunibvt6v+59712/jvqF818aVgaUeKHBmTXWu45hGWNT+xZ4iwVvisLaUBeIc37gi79t/qj7GfVfC2sDylB0e5N6qyY1EqvRU3WcusFFuB7DDeW1O4pi8K7rQ/Jy0uEk7CSOGSqT1UuVykTD/FTdz0/1nDJdYiejqv1dDazaSfdS+vNODV5Uw6kuzk4CkQhe7LaSPr3YHXSLCYMF4mo1VZ/ksAc/FYMOcdc0IeQgLxRxccrNCtRc7CYOKTM/7qbu0NS8JeNKBLiSbHH/MNJPPCongfzv9AXIFvZ9vWDHP8guLtT2UcOVGkrfqaRn3G3FDSQ+Nima0zgRPRXO2fMhkslUPaKruNg/3NzupW/0g7q92D3cTerre/0aKIDFwqJNIRkPttGkLmy2IlPlTAkwK7M6ygwFq2/U0i9YaNcI4gkphWgvFAIKkc9o3QyGKTlOzXDSGWqqLG9Su0IhmYlUeTJpcAfxiEQsVY9zCgdB8hCbO9MDAWGIvsVXVp966TJwda3dBndp01DFDHRafHj1tHRnR2B4XvTU4H2inwEY0YHjUCr2FJoU1hDbAhCh96LNZLKTXUB5ZFXUSUGMEQuiURJKZl6w0YabjSRiLkx5WG8L7szz9azSMzukCSQwFDYGENgnIbiSMhSTo511P8WMNtVgpeiUSmqftapYiVLUPwWqbmDUTNOQwC4k5dmp1i/a5drM89cn6njB9MaRQCvbJNfRZDn4o2YHCrNWMwBG4TJRIyP0oyZKcapKYG+9YnQs9JDNu73vtJ2FnMk10zPJxKa3Ps7szLetnc/zV6/GO19i3ZaLobj67NS0lT3JsrxcnOcxXyM6zcqVX6dxKm1dHtc5gJz3jGvG69UGvM0tcM1drjKiS30c2nL1JTesfDZWiMDVs03BbIrfAObpgooX+Vi12byuq0hNNiJ4nAPWU7Bl630Dnq2LxpS+0i+1esb7zA4tHCMpvo/rszN37Mpo69bMM52ZUhfsqKVoqlzBXzhHRqlzaUGBX8b2camBlMzhfPTFf2d+zv1b6leot2YO5PZ+XMzvFkxrLIbrhzRmAyZ7ww6Cu4dBgCl1aR3rYR8Wo2RN2ao+TNUWbmCaabG4uX/gkI7F9jM1RX43bFadCsX3k9goNWxMc5P8Y11bXmWtfSqPH8glyV54bFnX/nuE3LdXJu1md7s7zOI4FLjvUhQ8tbAehjRNpPImJawGfxjVQXwiT709pGmF5n1vH5eMwLibEsIIdbe7lPmVLjHZ/RslvR+PGJawiyRwLx3uOyq7774nrbwXGejNB3UtJXimsNjdAUBnq9ctaBAzmDrNnDAuKQ1FKAkGJZoUSQTjP6dBHnTjzjbPs+1KOhlgmcyiRqrfjAPecK1kVMuJII6dn2HjOh/3AevTNIj7pB4FtOhoNV6nxkREvKYCR+oRxGYcbLSosqxpXo2CCaYgsjB0ghN1LkmJkv1JaatIE36k2JVSLkBQF6j+sDEPpWrLi6ZxvmZ/kDRtqspIZVWnD1H5ByZbSGtYxv78CmdhX3phf6VZtf25/4iRK9VJTvJsotFjgtxZaUARQanwQWENuFx/wk5dtpLTwk45KQj7jSxfiQTQyXAkxUw5rI6x9sKusNMGhOQi+0H7CQVYir1PSsYzUTpjOJspAGNE/ydSFUTQdpbDXp/HpyE7FlF9rMrBgjQGnR2542zZRepPEaTWWfjGanHnh2/ub/rm+Q9dkLCd+GLTrhdlNcsvjUz1cSMYT5nPy/3586Xe5I3wb6C1r56NT4u7bK6H9VEo8+Iqiy3nG5gKnLFbHi/O3G0Y6xyBLHBr7Cs4yHnRjixVwrXkdm7sEKptsy82IQyAfNParJoN7fOrAU4HDqvVl6sceOr39bvTly2IWVUo9Tb+M7MxNy12hCBhEVsVbcteUyODqFgFZG6SjFmUZN4md+ymUSxYHNSO/0df/DXzy+5PPvlrT/6hKoBRJylgAymqpz6sEx0kkJpLbwUVjBTqgCCINJuia1KJVJ2myTHaTLPSmOBKRSzJuKVWSlj1laad8nsaurSUi9XbhykxfYi6rm9TkBqY3nqvT5SgyjGYyEFYDnpHaHKLK+kM5RFMyDOVk9OEoRN9+djZ+oaglEBukNkZ3Kf5M1w3Mx295GURojRlVrM2lHeiMjEBZEFCLMT1far+EhAA+7X3RQrgDKM63IDT+aKpxgrGyloP9SRZDddjOxm6sNSdo5H6tQkvbHbml70bpChDI2TbE8jys8+fM6fg6MitqLLAPI43G2v33Xf+HXYnwj4iewB1KtvU1kpGOSvUWwv37EKhaa1gH9LUxHE6dKGIkv5T83G3WCIYUUImKrz31DUv2stv/sziNz2s4y/kW5ddlnaZI7xR7bpb5c4XAKQsmNs3xacP/3z3Q9kL3fzu8+6D/+Zy9eNVvqgR1ncvXbfdrXYz3/mjV2Z7stuWzXG/q5bvvi9/t365YvfqUaca386qz+tXofjRcs2uN3ar0Gbsyj7/lv4txdnrrT+53nfNrij7u1lcEoAJkHGsmp/+6JeArDPbttly6T4aZSAQ29uiHSObQgLYQh6/v4GHbyn0YJOWrcpyNl+ZRZ5VuBsgT4s0M+tfw/r/pvtj6o/QP0sp8/76get0vx39NmzvLqT5eYvlDn69l6zxFsuZoUcA4d7tL8JW0ruHIudUmT1Vw8V2sqVDF+P3iBtkChaTrEnpIFqy78YTAwW4YkRDas8YRHPmp5k0mvOtmAe5TenhqaX6g3QW8kMpWc10i3Rx3VxPE2d21IowrrljS8fUS5WaMhKX39++P0ynuN+9lAFf93d75nInKdvtoUybhlZohhe7g7TljU790tuUz2HTiAQ29Ln3ODyZ/vdwnThCmpG1078gzQKJ3QaZTyWSUyv+IpUfyfLFqflG9IYGVB6eUdFm2GVXGx1KVwFAwac9ZWG6Jm8Uel2oUFADxMwBowmRk9nU9yy1VJJxNmIejXPg7CpJWAGclu3S+UinBz+XsYfA+Nz3xp+a8bI9Bk2ts4+0lGV9ACUWRQgtcuq+4FQrXYg/FlUY/ELQkm5mjojd0rIv+r1I7WxlOfeITdYytIoyJeVKqkPICyQLKcjNgxGxr/uWDJlgZSjN3VHJ4Ypuk7xARtRIEczpDJex0VVz7FvTEW6KmY0bwyYS39lsZtr9WXYxBlcZ1bQO7k1nu/DUSqAVHGACQUd1/kn+rLtx8aNZ897F+/J5bdscKNPhA7FAKFIb12RVswAoDvGjyBFcx76cjW3+sj3dLm5VgVtT90/Hk+cxvChyRApAF1/a0JRxgchHMqm21DN8C660M82yaeB+T118UxfB1fvheFszMowqGtyE+EYayPxocX/JcIzLAN96Pndj7Y7sYM1VFkovvfQu8XvNyCpy/IwtakvzZBiQslKM5PrxiX7yg1/8L+b/cf+6eq2+9OTJ5YlK6cRaTa1KKUV5FWQ+ZvAfDamMI9afCDUQoFGSMPXsvpoyibvDFDsxm4eko6ctbMPEm5mYxJftMTGEdvpOpRmcnKDy4bFMupDYQQbhUZiVKrdp9gpI8vuYDDn1NxPZbul0EwMGbEgCYkIbL6M/U9pZCwb5CbXSsCv/3SYufi/tmfJRM09ul8HE3PyDInNlddy9bDgeqVTVWXv01R7LPAhP8/NOIkb6tYG5ScRhq/uzuoYDzIIIu09OfVFLY0aAI1NP6wjLsF0cFmsLbG/Lxj/vTl8VJSm1L8pYyoCVwGSerxHFh8JVsSjzEYYgelJdb2b+qSSqQhkd1ehPZYZjaPRgitSuQxlHG9uhPD152qwM43xd+XaxGT+uw619v3qawcTKwhZRkmCp6JLpImtlCBlHL2T35afh+O+J8r071/f/Y7jM1uv8xXYEe94cZctzW6y7zcsgQnmwDVd8dgfrL9QVp630fyiue3fOju6ji9JTaAvuCcIKFjdnAmEGali7oqjnPvzxsyL7A+3vWo7L4vOrdx8Q6A7lKq58HmWODStfmfB5RRE2y11BsfblQoAPdrHHheKo0nrmGst+QsQpTT1s2s/LOakCrgsuqrO1W3L6KQ64Kv1p+dQUQznEi1l3DeYMm3VxPouX9nl3Frql+4XjePVCm1mWtS1vOYCyGvpPNktTVK5tfLWL3+9a/S+YNmt+CvehcubPsm7bfvG3zG9zf0idqZfTlIHF43C1C1JHLvr72xs4k7vbFLTu0sSA22msmUgZZOYtdQVbP2mO0jiSUYqPSew01Uw8a7usEdySXnJkmgwHeFQ1PRpiqgVPK/8wSngnQeteKq5iVQ+HUQNSb30vis9bCUUvHvtd6FcP2TZWOGScpcwVWJxYVlr9NDxK9jaNmxJn7xb371QKz823QsWkbsEwJ9c1ZTCiaWM7IC3K5krqB8HC90Qv4B+Z7JDRE3SCBUMC9t9lueKoENhXy1aRo7I/eR4yVUnQGVkeBYyqmTaFdSBglwW8rClLmOlAPxqwfRPmR8OJIRYrsOM85DC3VoVKS6GUuSEez3DjLgJnYdiZsc2lOqnn31wzXjZZVTZfrvI2pvQdnaQXNSD+u7gcOzPHKVWA6u980tkmB2D8xvm3+y9Z01jfKZx+aizWLNd2pOm+YKqrNzD1zGbKFmPI3rZv2RTCfkHwZLJxVvW+Hstf0Fez5kear/iTpn56bPb74eTEl/OQL2zxyepKf8hmLXsy9CI7Kta+e7bTR2ER/E1WZFr/9vnHsXftLPbFR+cfxT47qi9dOUekp1Vf5m/DPDOL4JbudfXiI5e/eKXbF7E7qYdvF/nlSpoL34YP+ekct9FeDrm7MPnqS+pFPa+KwDAQl/CoPWpmPvq6AvLhloNMZAVnNALuQr/o92FjIzCxOprHja8yzv9UooX4ddjU73SfqpfqLaM6LcNykrT/0JMmTQq3yU6SHjmJHThdkFaW5pvht+Zi/091jkmfBA0w5cX3nIWYxv0sxo0KqSuDMqCpZeKFkX6z9weJj3w0zeOYUusilXivLg7T0Chl2PvvDga4WAwX47Uk7LejTwrmaVQHh13LqMPHqdg34lrvHguG0gjxAL8c2I43yPkyRUUt5PhoZgrMfT+Yz01b4ts8L3QWZD4zxxO6wpWgfyyMaWGdacEivgra1d5R6FdyEOmZYmJw5q20LhraITOzMqjHk43KqBv23npznOlI3ZCDwfiaLFCq7ZTXsSmKs6pp0SbJAqVDC3SqYkeFprjfirgwU8WClFFUwKoJPr+IOSzIreLHMWr4Ewa1dH/Gvur2O64dOSiZZOU0+8KWwR1n5eY7/xlg4v+ur7ZMlOjL/vZk+HoZ+iws4smCTXDpQDj45jQ7CZ/Xp1nz+kN5XzcZKLW/yMJxJSxY9wDYszqrXNnavD3Jqsv4pcF9pWw+Vad6FhEa1tuzqnMv7Y9kYXbZUZJl7XxZH/uZDECbr/1lQLxaVH72A33zKovHVbbMnn9l8W2Pq0VTiAiVTd4YOJ45wsf1N55mZ5vGbRhkwsfmzIotYosrAoL3XJ2ev6iPvr/pXf5yXb3OhlUx13lbuAFhQgMGWupyWQ1Lo16Ooy5xxzjxWana50E3iMKFxPvs/f/3v3vbp77QPwgb+2H3R9SfYuy4SLFXGCZ9zHYPI3i4Yfv3lLWdT4V2rMCLqTFpymIckrKpbhRSPrbWUzQnEaJ/7NOR8Tvi32RExXv9gaHUfZpkwQ6iadT8lEJlviPlfg89UgO7/9MY5mkEgIwXlR8OI3UO3UlCVm/TrNL7aeqnzHu/u1KpCv6A8HDqYno02VEkwJzzd7e7YBS9SULEU4XdXI/smQ0v1faNepyJz8al3cNj9el2MudpxD2Y6oPI/6/vP0ydn7xcknCaxgjd69c3C6ntyPhMtrHrvNJzswnP8rWUhHNyNvw6U52MfzOuYpenpHdwdwG5ATyIszxzx3Z0mCnsMWa899FUs1g9lD22BCdZSgzHD1lOjMtE7F+buZUjkBxq6oUEbLB4LLIhk1fPji/Ot0Ftynwh/UGcXMpZGoUcAoK+lu1JFAazRMFwDxGnlhkynH7BA5E0nryd+2RfC90v2Db7mWQ0gLwjOl1y5pdlzpTsQi2o24e3bAI/pHJpRGU2mzATCoMIzv2ivvDrUdmmBf4EdgWCBeA/vcpVYbb5Vb5t4qfabViX0m0T7ZvqWr8JX6uLnC1VhUIszhSan7ddDJvsNmYhN80cVnac9R+aNjs1oUWQaDOzwkUdI9xa6OGQi1z3liJOE6l4sHEWzvrjFci7XerW6LH4LIayb2xDJSDuUmQ7+lKrbX3y1M10NuQeJATIHILLVlQqhSMflgRT6dQQFZd3Oi99v32r/FsCsjOlLf+5+mKZ9cdly3bPInvm1u51h9fyLqz8qIpQ4DpKHkIKa0GU3dIeUnpd9bi7ma5Aal1BLzB+Mn9TggHluIcrby98c1K8jjkO2Z+yKIrbuKwpGfVLdop7G3K9sOGJVd/64r81f979s+pj9TvVv6j+NFDl6jBH00wtOtcP9+PwcL24T8JYhm338yQd/G4zzvxxGKCXGRR+vjjUpKeazpB6x6VZnUmoJP1N0JUmnPvhVIYkp0r6IKw3qYEfU0AiEWZi55AwOny9w0Lj5kLS+3ADstP5QfMociOJMhkaH7rR/6lU1F0SLiaaPb2UuqqkKD8dwGGU2CVblGQQhzySgjOLiBnTNLGrJMAUEGIKDxeDs1BwHNeL6xNzTX3xtVxPyWCn8jyOWP00mJfdrGY/FjmWXJsWVNk0WHrr6DOYPrw33WjOxhBOzoxxc7TDCjUdoqDQ9L4AgJj8kuobK4DgqiN4Oqn6MqvLpEPMOJFdHH7JUvMijhW7Ies3jRvbbFRplGjImbqixZs0sJxrukg94iGLwbCeU7PbRRJk7CLXRjJMSyBMN4s1LJ6if8s2G+k4MHnBbnZHtp0535mMw04pmAyDFXfPQQyB3emrPO+M+j5sNJKpcIJIafqcRTGQ/iXYams2PpNuJgCe/mPsOv8WLh/njYB+aA841cxtRybLomkReP5hdzFyhM9tZUU0kMlURBbcwYoIPxLbp67NWjd+06x72+uujCsO6QmIbktd57+z/9LZs3VfR478Ypqt4QQKG+Z6cVzUL7y/Lt5QObrYOH/sh6yFkeeBfUijzwbFESQmIDKYcYyZJrPQvm7q+LUqtJnJ89JxHj07MLDddSgBBrbuZq1/la1B0X3jhko1zQhKhMXGLq4M2IvTdW+96Tppg4WPUK7NTX7aB1+6hSqr/nz1URiwpvSgLsJZVn1SPnPPR98Yn81C8ROLHTCl1Pl+PArnJpyG7BSsJeZJ0arlvyKceT3vFSd1IJoCDVy5zsU/xTCH9bDuD8sUUjqEHnF+X8Vlp1K7yaI4IYc5/eKPmtH9fnWBH94r9g+cKiaT5qIdlOG+U8b5u4XhIZDOeOaz4cXxWpLhTXMmOBj0IJRmdSZNLj+RqV+NSlWlx4gY7OSlAlMgAZBpeqKpu51SVmLbnAm8ffPY4UOq85AGfj7Wig9JNeqkr++kzH6qUm2dObsg0/mSfCZp+tgrfcEpQ49J7TRR4krfpWYheSDQB3V9xwQdFS6n5azOQDll3ci4mjyLIkbOB3eeD8CAIJlb6eDx5XPGmTPJafmo6uhmlPRSrRdkBI1p6agjE9najCsASHjTjfN56xt9KQ+y6ZZnu8VSm+1sObe6s0XNIohWLTX4fuYoqhI+48va5aMr92HgABmsTrhTySEzd5tltIkiiLrP67is5prjaF1tqlxIjB3ZtisPy0FM3lppdZ4mg8IiS9+538uO7Gia1zn79DrjwilbeDRH4HT16rx/dm+3nX+h8/woy39/9mXNsvVTZ+vB+CKobFvvVDh2bWkr2/OZP/PZ19Zh4VZH2d5kwwl8IGJqxAFd02K7z/nQHhXnczCYTHtX4Ipw5T/VX573P+9eXL1Yf6zLLG8WxcKt1+GEmWZF2fDLcgmLMCBtzPCKPIi9AiYDq2Pc7PqYLw2vYNsuxty8iC0byWVOc5GLrcj0DucHWwGdOSqCoCSPwdBDyDa+o4b3L3/xN8zfdX9Q/XWpG323yz+1zT8+KQT/T08CGA72wJG913eToPVmIR4Vy1U856mac/avUPSFdCOEFBLU6vE5KUm4q2V6905Ks5It82lAwhjSZIJapcFfUkaaOP7jeHLp9E1NesMho7WXglAa9Ds9l+CxaXk3TSSnP/ePubHvfeZBeuTB1WPtNs3wur+ZMt4SmtxOArbkzdMgXpO6hw5kRQS5W38hrQvYO0KIh93FlcG2LnhgdNOcSDiJ46Y+ZIQF05jxNLfzvbqZhnVJbvz+Lg1ekGYKpuTeqS2ORP+bOiv45CaYbTlqTlt1fFZFnfdJypOxjc/Cw7hw6demKagggP3JDFsOvZxx1geIK4vrohehcEFPAAHLa1zlunnHIQOmhOu3k2hMOlvBI5242A7g7VmAIhPn5zhrRWZRKW8Xue1nG6caokXhpVsR9gfu3Yn6TeRyLA65UIvSn3NeOJZcwYcgsG3OsPdxOzi2HFWF5XAlSRQE0FpSWY6p5jNbCo5JkcdEifrFUUWBHdU//GN9FsvSHvFZW5x1bAUyZOCf7tQATqLmA2d81aou66rI6ll2VZ4gxKnMKrS6pR4Gny6d9DlK7w5TcSbCzbqF932dUc2CoCpT4DhwW8Hlp7MluyRA9r20bgSmwxRYwsKXZvayjRWFDW1EeBE0QhNQH7ZrhMCni+A6yJiKMsYZfLJtZ571sqCPGrcIs6qqQwv/iOhFMjO46Yv4mVN3vutW5epUk8A9y+y4qDZA8Coreu2yyJnw+RjjMttdyoiIeQ7e7+vwangGkMj1Z4P28wJkqLbtcXV3GkYQ/LefBnsW6ItxwC1IjT+Ow6J35iTWY/upXbl6mLlFsc5myyyv8rq3cOngHJQUU1VIIvPqdd/0rhEHAmZ0aW/10W9uXtUNO1o4y6o7f97/vsqaE9txSLw0WTHhU1OEDqzXMrdZHpvldKPrGFatCVi5Ja6Czvt4nX9i1zhKJRjJexNNziTTAnyMc7p6mysD3PvHX/yX5h+4v8BZ5f1wUOdKRYhANEUhJylyJ/6J+DbJ/5NENgyHRAZw6GEy1bvb9OAlceRJnv8gSl7NKQNDgof5oWf5MNrpPtWe0pP7OCZwd3gAH3mDzNtF4ESOkbz+B/Ve3+4uAj8l6c2GYYY0FPJ7VhdYrktYNIi6uFEHVW7YAohSR6R0O9zcX0tPI89UcGYn88OlFCZDf1Owc3iQxDTHQCYohPkN0OlxhoI0QWzD4yO59lt6BHIgCt52F9KJraf64Y7V9Ot7zg4jMRJwlkPeX9wyaBn+Lw7clDpyYWRUfWYo6QQrXEW/NHXNOjR5v86DPBGOIi/OMt9Uca70zIzSNVDInHBNObmC/1Wg+k3wfUb7csVlSP2UsAqZ8UcL5WwUeHfWsbG8/djOzY0dRSRb+O7ILWe4tgwF2IcJ0Ko4nlg6AThyOpPpDPOsCnxeCZ/TpO2Qbylw0hx+FzitpeUI/UMa0omYhW0Dx/7yuG6F4sgYdB0CswP4EJ+CYGWQodVNPOLk1HXZh4otFwWzr+qgIsZbOD204TVLAyLOc9kPl79U39kWYf9yHG2R2dU690v2F0cQMZCuuuuZ9z31RTG8sMV6dvqLw0dl3pgimh8bFn8pZF++rMJVW81bTt9eY/ejVWA353rz7GG9e/biql2Ubz7/Uj5chLfPXXhbzn6qD9+O8UVR9SrH6XBywgxgF4vl5uobq93X69F+NdrXz1xYHlFq96OL+ms/oj9Wvo5H7uv1C/YVm+P+bAXWpWtVZDZ1X8qg98pXvciUjxUilQLEpvCwdb9x7iQ8hQfKz+HLXl0+HWIa1ahz6nnDEVskjGgLSsdBpjNftQEMJ9frdQ7iJcNcY3oEGIfYRj3N/WKbie5N6XMZHWT1l36wOJdHfyVpGvMpqtKLgSN3ADZBngEW4WuLgurCKM/pQMQX4BaTduebX/wt/Tfcter53IRDB+YbtZhm+cJ2t2kCMAGE0zXTIwPAAUCQtgfJ/piap0VoM2cKhOTrVA3y8IVUAlkk1epIKZxP4h5GMoJForu5v5MhxSwHpubl2w9qwrn0HBgZUDXNM5EMcKo5PHCWQrLi9KCm3eNDQZOg7TD8gCHLB5Xw7PBemWz6sE/fah6I/iOKg6+J/bMq2rMiLuC2huNsTzWMNNWmZ5QhRmgpc1AVohje7T7KiEXbMGfAJ8xJYpC3qeC8YMrY4PDL1sQ0+c01+XzOEXjy6B6Sa5YrvMzaYQqBYld5loY/9dXTmnpQ+J611KY5x9FIAZ16V1YBqRPnLADELTrIA1RkyeBI4O96yzI+wuqf5SMQYzm6RczKurSfVCTqpig/Wbi+aILrQys9QpTtUKSrPSL6gAW0yZ9lqxO961dGLxdP23eRAtcXbsU8hjzf8ww8K3MXKqNmgLMX9s31+99iuyWOqa3BTMx8A5OoTf4DNv8sZK/1Sbluu/K4nge1OvYLO4ydrzjS1Lg6e3tx4uoFOEFZFvGo6IfCFSXBfWWqE1eC1dz6vjxmGwynIhhTqUrVdbY9ucrO+Cw/yRdRyfb0yNu2018eFi/jpb7wz+szhi+xyVZ8Pu6Tv//Fb5jf476m/mX176u/yur542Mx6BXMnA/ETU/Inebyp+bmQaYDD9O43uupzPb4+A5h9NsrfSvl7e+Z/jEJtaWNZRri+45dyGk53qYU4PDd+cIp+J/7x0k/HESyS/P2D9MHGLanOgA+PT21ceq5ud9x5setPNTj9lAx5G/DdsogpF0e6h2Hyf23nDtwc39o3bu5lloCh4I8dvpNjzW7kyefyMC31MUsHYopdhJ8OFU3J/ZahiJJ7+Jj++U1S/NkPjuphcgTSfwuVWbAFVLMtMX2Lu9TTHWR5ngmVe314TBwfc1PZ3xyjDHrbCi+FaxE6qsiuiBxBJtnJO9HYTWHsvJRHjrjqAO23pn5SSw9R3LDFRdORpvK8BG2zrOeTlk1aHvmpf+34FNslZfpvRxpw7ADNoywGOyPpve8GJwku71M9jW1jzIqWJ5iQitPk0q0KEptejQbE402lSgzKrzb6pubj+nkDdVrJs+zvjHrvJBODnCAogPSqKVppDOMyTUZkY4T1kVVfiVb3Qx8qi1chtQ7alVVNnJEdt0wxwKQaOB6tgE7zvrvjIQ5rWdKCiqRz01jFJDJWGPq3qL6m5IQqXXHooqecyi+t7n7VHJjuAy/Ui4zPhSucTk4gA5l1uhF+9nS4jNZ6cs5blCdD6v7JQDOleDdnrp3ePxuyHZNs9J15vPZvnm64nyoggLcWQPm4mufs0iMmEYP3hbl0h+vtWiMSzizKlbdR3zCLUKWM/1p7c+q4mlRXpY33jTOPm/u7Lyt+mw5X7vzogHhiJdhsLYL2RJ3JfBhJu7Z7CvPjtOzd0PEwrALV0c6VlMXHOfMUcIA8pA/hYO1iB4HAKU+9pmLTVsB8U5dXxld4+bIU1bKRsOXM4wiO5JZ0LnUVaT0qxvOFM05qiJ/1pP3lKt3lsKQnkOzomVrVg46wSmdLjVoqbxxs0Xtiv5jlsqwcr8S5FlU0swFBkXnL5JFmWQt5IEJclI3+IRuw6l/01MB06yH3/7FXzf/m/ttetBfJ+pNOp1DU94o3tUfnikwvT7IjLH0XKQxiRSGRJ7nafJa+v5YXR8aXgQj0lM/pmeq3YGQ7+VBt1Oa8vCocD6IYBqpwpcnKY8IfyUxKvtLcjsZpsvEC+IEyfRMT2LaifJox2fFPlxPUc7tBcur6dEjUw/id59PcnfQ/kqVlAj2OC3pcbzk3fTchi01ijsvHdkCTi/VpMr4nt6ZqRlxcQ3Uu2em5VhdsCn9UVE/dVwLjeInLB+Pci9PiXivDuh6u99OD45LT1dQOIrbq8OcORzmTYLahYzYGFP6aDHcXB9r+IJjvd3fbo/pFqZO8bvADYXD9Di8sB8RxO1TUYtZ3qkCBsb1QEn2rciuZGLmdhJgq5QeS8+0kBwVnIX5+yy4tOfuNuRLLsEwaYJFP1spzo1RhaxgyZmTt+gjwotPomQGyuZpXjsO8GFqwwR51Crl+hnHkbdWckU2yXBl/IK0u1zoc89Cspdx1vJoBqnDcPKcjH0Byi6OQhefrl44P1xGNoKAZVjgJraS8WF4QBc+XWUONseONm6i5gdl1LdNbUzcsDwrijWqivVqzmXDsVa2mR8zBMz5+Ml5e3phR83haYNtOLDXc0QOcYlZF1dyGjAiKMpW4jKG1obSSnujyeGSsk16OpOdEfGjUrlPaluJBmTSsDP1GPKw9bXtVOHllBlgWhEtHx7XK+UjSQ/JJbZ2PdZtPivNUTzVJ8YOX+PbOGWCOSjLyb+kkhlbDUW6a/xsXJqKI7qi+bMRF0yVw7nfx4wdpkZvJEz1+XLb+qwMG38O15PH6oz9BQizWM8DZj/b2E2RFZ3v5Sk4pXOxA5iW7bDY40ZhCbpt1oXQ2x5XwrMxxuYzH/PjD1SimnwJzC+XQBr2aLCXaslnRdA9mjf6fhmrUiMkD8z3sdEymj7zq7pcDKprCyzBE3/E3qtFU1SUKzs/9ktZcJv82JdZ/P/b+7Yfy7Lzrlp73de+nr33udW5VNU5XefUpburuy6nemb6Mh6Px2N7PBPG41HiKHZigp2LYhGcECELghQbFFkRIEcGv/CAEE/hNQIJ4RcShYcQIjkiCFCEAggkECBekC1P8/2+tU/18AfwgkrT3dOXU1Wn9t7ru/4u+kDoA2/3s1BYO/N2lA0m5oHI8lHmPVAF4MwWaF0rV9pyTM2cqvG0Fn73HiRSWwgWJvHaw1VcM3uFcoW+WwzOTF/WqlKZ5bUkJQpJH9OxT+nFbNmcc7S2izwHEw3PgmIGKSMRITdGNTNLPKfY+pU2LZNC2gyofpQgeDpa9v2x4HT1vPXO9CRgwlnBJtC4YTDXQdkB4zHDlAAJ9aQSdXtRhtQagIDLinJXqYf1Sq2sHkyRU0KPVQccrwKTXe0mIGQ5duYrZprqB8wEUUOLLz7/V/JKf0t8VvwEY3k4NK95gsPqNRjOrM7Ein3Tt4ZUDJmJmy2KSSdiySJ9sajmDRdr/UaAC4c9DMejCye7EBdJVxszxrqTEe4qZcy8ltG2pssCW6/Pyy6uXnfRdWu3+SwG881Vx36iDgCN5sBCFIQqdEvJZ9DnfdhgaLcQGozE1tHcZlsXb/2tNsyhZIT7stPow9cfbgYcko3t1DktpIGQt6K4/Y1p8lacpBCABZViyD1rdPDB5zVbsTHGREWu2J64xpeRp3Fbriy2Y4oiEfMUMXiwGDN5zU0mxRSfYsGWUIHK0Aw6wuV4kUObhx5dKjpgnYMJD5gWgX0w2QuT3TNYdaOCoYXgoH2Y9mfZauqzVE/NktUuFNPXE5gqMWcdAxXmXwQKGbLPT2Ue/Y1h6QcdIGzjTEBQx35e4A2GkIiSmkJh6bjn4WwBzF8eGJJHv1bGesBTqb5ahTlqQSrhqbjWUAFMmSDC2oRUho2cH8tyHzNKatqvmetuM3xHDjWyxbgLvI3oPoHaGyeLv20zq6k7t0WVQT1ZRaoITrHdNesDVd6hnvTIuEN39+hBlb4Ea20FsT4KqPfaWiZja0vZ9loZMn+afm74uMrLpspH2r4aZlSGJ7I3VHao29zsp5dSUXP8YJWZ0h/DwDepzK5whUr7njLUQE7HBgRagx6CgvCmOhEn9fBg98HjI+enxcNBtgsgAqWheVbOCnPP21lIMScqM7MHOKzymAdBZAAEN0UpqMSFc/IYg6rRxIUnvf4d1X9YXlTrpuppXVfYc6QJEwd0SaEuG+eDnqrOFm0fwwIqxxNw/R1nPpkwJzYKJiEhq8+/n79s93f7v+fyOlN981WdlqZ+6O4qM6U8pcbuIDSDMDT2Dt1wqyv6mm40AsAioGZV4pPPf09+R/+q+KT4jPia+K0bRz0bcXOYGg/bLVommn53Gjnbxr1lFU3Wv+szNbOMQyoMlxZnnefm/WiBt0JMYFg611zryMyMLsJt52cuwc+eR5R8xP/0je16VC6SnokYy24ASPATZuR9t6LvFNOfim19TBVksoo7/G5NR5Hk6vpyeRWdj7fwwrihi4IYXSP/1Aw4vkRf0xd66xebbnfacdkBqtTX9xOqnofR1muzvASmN+4XTMdSpdb8nF4cl4OHGzg/LLZlKRr0q9UpPthSbdxZhQ0RIS+iMaj8Fp3hAqPw4+wOJdXgOt6sjSoVgA9iYQUofY7tOBO68AumORQNKH06u4FYlIDQjLRUASQ2T1M6/u7YDRkFBE8W64AmZpaVpGJB00EdF7kPfrr3Cr2iyEeFt6bnox4QPZrO8qHG8BsQe/Y4p7dD7TKDf1hgnf6lUTAApYh07iyLnYpKiJkw+7oeNoDzxGzad5izag/Zh/t+l8rQMsEUjN6YLbImEWtp+hRHUolUqz54g3XNBXVemC+gLqViNMkFMF9hqGShge9JWXntZ0EvkApze5HBnxNqHBoOfzpAMbFOp77x8p/k1LNSZZi83G4yjBsodxtIwoFM1zPwJ0DJGtTETNrhVbErB/dX5ZkyoK/QyywXC7nMof9x6PZClo5e8bodyqGiRrYx49p+LOtXg/pReFMtvzh+2aZVgoG2TJXo9Xd3fT6lu7CrgjYut6JX6xnde0qm00ROMrrQZjiRYVYy4QKWmgmLiyR1In1mVufFmkv/QlJF3c6C+ejLITsyWdBBFYmuZOlUIRojqXQD2cJ9pi2PTFIqPZDmKDH3shmeq/GsqJaJ+RguJhyDKQal9WbXnIt5TpEeyxKrC52ZO4An1EL5FOAkI/+n4x2Her342ewuUgKcAzHeoXtpfXa39WPnXQ756uSXogpmoHthWAw74awmx6PiZOFHVAzLs+ywyjlm7fzu838q/7b+szt/tPOfRU+ccMxCERS1w7pJ14OIqk/mengDU+Ao1bVl9He8F1tEaQgWuOmAAcwgvezKEXG1iuggjg9gu7FSKMctiO8sbsQWtwxxBhCsbZxHUpBaPZWY9W0GW5nC9QA/PyRTPheHl+vtyo9XhPTyhmXGEK3MVsUHYRGf4DKWR7qJ4CO82TMImbF244Cp46ptoNtPNcD+hEoDkydwzMtkpIQb41WWo41KykTvu4mEmsJRanepTrjcb+7Nshqk/XCdpYnVMxjHCl/bQSb0hrk9dKo9+4Oy8oHPNdYrrXWpdxaO5VqnDVX9oq5PqsNxeIkqCszSVGMLgx4WUseUPG2i87S5KvI7VhWUnIxbTrRzRa9N94Nd5a37YEJhJD2QflC4bKSpyTC/7+hzzeregpmllj5zDzAFUFIli4dRvPo2VgeY1NGTHkymsrQvf215b7Fn2PK6SCsMo9wwdQwzUtYVxSDdf+eRnDbZfrF4q/3zgW0P5YHXWZZREAltktOFq4e5zMrczDJb9HuuvdM+fr9+pnf/fnvYNsjOdIQVYkBO0cRw2KPTdeigDVEPfRaqkZ9Woh7NTZiWU54tJdU6U22RjnwIwDwBYKCsBoby1WF2kIZyCE80o6UpzHE/vxjsjX9sl4J49iibTQczR0Xj+8X8hz+fNraV5Ruz3Xd7vSJ763j+CXv9YPfPFWmf3lJh9i8rs1dMqlY8p/6zf8/sUxDX2l61/pm728JqfUeIv/P8e/Lv6V8QX2EOwVbWOxbxy6Ijww3ZSeoFvNd09no3K2SqwZfwQ1k/BHgGQJ4NKKPA3Reik1J5GhFuUSCw7eTGtxv8tpDUwixgSIBfI8rnhik0ZfEsthjoYHZMv9v6Ry3KrvIYtnvd76Lj5hqQnIvr9TW7h+Iw3/htd/Or1VXUJk2u19G/k9uijutgIhx3cYMsXFKlRG9wvbhacFOx6TjrzKtjK8EOVRThQOednW9HZ48jruTXzaQ3Smt0BDYRrY58OeDk6ITVTTl0yS7GyCHXlEGpXUR1jVgqIgUdFCuwuEXUG0nhCe/3RSubxA+oVEyzAJVJeihV7lvXF5WWcpelIkKVU7PvMYChjzzo+8DK3hY0b/DT6HH09rjXo46mcIENkPxeel6uK+0ziIvnrjWZZY5BiSFJEbLaUMJVrKIKzyk6XXKiqoIJ4pCWgj9KNwwCVkQyOZTXaxjZs3AFVK2Vs9Q0TdIgCpubSYBwFLADFDUGDQtiQABSf5FizsCfOb8fnascd0mptNjfJ8HCOfPQBleJ+b3kSfno3v2xrdSJ3qcD3gSIuygxFLtJ7jTYfPQma1NOPaXG+asUE9/L3OywvqJCoz48PH1aHDwpT9jw2PrGTdNs065eMyOdFpC7UdZWvkk1lNqTognB4JqX8/xTbf6RFIj7JKmHFwejj1LuzPpZP1VLUw9CUQKcmPVgkxvalcs+MmhW3itb6GfjX8iPps9c2B/YM099uDym63sweOpPjqQbaTOkYoSuo/Qlrin1ehO6+mKvD+BXha4mgyf30jlzYN8N2V0xHFVvN0WVtCETNkARB3cibQM9J3u+lxrxIA0UB37l+T+UC/1Z8UX4dyb9OKqOnG4T0SRFZ3bZRghtv0P38ZqPzyp7Awy4vu93A292KEq4l+h37fiQnbvjFDyK/MZgg0yYMJ8Ikv/ry3iIrrZmbjjwvHx7JqLEHifHNkYgHnwMPkQv2hqNMBiH8TdXETWDviC54uXffbG1iWNETTQXYARwFOjcNhUXSPcf0nDadjRYiF9GTHDk3G4hmCwvzlPn1fVWm2Jlks+N910i2xEEj6geZjMILaOtGyU3XlUY2d0f7as89dkAuzek3UqwICbUoCZ09JKo3Ca77QaPLs1BFqjGPy0KJ0dUCMM0O2F1fCUh3BtVOzt7bV6wBZEDb8UmSQ70Quha0pmS98sasUlEqHbCUm6aZ6aONWJ4rqvZWZcHFZKpszzqdYE+iQvQqKOwwDvCAjgZnhLQi+vMsEKVkY0H3Mun9M5zI6+oINVvqvunbXVqRu2soioen1e3UFbXEFfXoBfhmwcOEX0Oaw3pni5nFJ4c5Giw/jnwEJF7RzWt1x6Y6NBPf1H4tUx309f9sFY9FrCIrksgVh44u++Gmcz6ggJIRX2AKlwt5UKluemtj8ayYPAt1f5McQLVKUC54lApCnV0+cpRz+4v5CCbU8RKSrOXnBULKyberENDcYKxUErxiKoI1006SDNMfUSBAULCVmiw4GX0D3vbKROfBwuLNpaRB8tK2VEvrX3RZMNCezcLfV1V0AqVbBjhLMWWNvUDuztMR4AvKJmawh7Wd06mr1jtdhLx2ef/WD7T7+/84c5/Y98gZNUzBoHccF06U5/trgWy5JSeMfXB2Y3nqCPhXEZNpRer8U5MaevveLWdDW577mfCxqQZCYk3W38GJkdBpWRbftzszZarWCqbjozYoZt5D4SFuBzcaORGMRi8uUEEJMtrN24HLk9RIOeazlXZA7p65idptsxS5+RQCozpKK1pav2wAz4eFtQbAZkRCuoTbYSpUkNZseQSHZfcY2WcY3qGozSga+89lbmTfuWrbPKsHlJCrSAijwylnGycWJnJ1FtnqZPehYm1KwBTMaYXsWxSQ9R0IY+L0joRU3va+2t0B6kfDD7JbINs6POiKfF4Bpm1vihCrdRJ1fv4+M1eWevTLN8P4V7l/owz/d3q+lHvJ2wZMo03W5xpsV+5Ner5QOnsjbPXH+zr1h4E3+rxYdnshXBa+X2jetRrL8tgz9/6SDOHEWHqUmenWTqgLrgYZqv2LMhBSPb6d/PTMBpyr40nWCztas+nZdgduks6+Y56jFpgJAZDIuodZG9kCnluP/nweDz5enp1rMyCvpVe8u9Favp60ivUmB554VLMvCZHPju8ZzEj/4vP/6X8B/pzFP+uxOfixvVG4k+cimHkp9rO7YkJ5AYW7DwNL+Tivr7avFgAdqH8Kr44QlVmXJdy7Vno1c1ruc5EHnkSiR6sNPbCDevqmh1gLhfbY9Npo1yeiavNjZjY+ZZNij6Pu8eo3rCdaMdWMXZ9Fy9srSLBbjDsd3quehOR6WsWQUTr22f1FPz2uhQL86JG55ETlpZLs1r07Ypn8p0HDlAwEYqykd+uqJ04p4ZByNJQCKUTQJ16gXBHqabFDt9IEJOgIojIh7VXAjUeZpZAkuuxUWaCXr4qc5gu6SyhYgZg7p5MMpR5CJvjHO4qbK4OkBTlFwVB6AIYaGS44+y+TPFJgV+wmGjTccp00kq5r9AOKfbYYsP5XMrTKllRxqATmbWoxGQvGVNJ6XFYjcwhLUj5iXVEP/gWJQGYToUE0ogRKgIaDCrIHCm0qYoGVDg6k+JjicJalL5axTqyhWetPywYx3JAf/k7rqfXaXhqLNRexlldPVmNTpYZJYlHhTr3+z/1JpRh5CN/lvF0Cip/tT8vIMive2fVY7c3yvb67Z66DCHoI3skxXw2kyOXj2Azu/JYkwKQuK7ckR2HypvH7fVhBfZrcq+aLpAwvZMZ/cjpbowzsI417xWwE1gX97O+BbzBJcUA+qsiFAl1fLNETjFuoQuUpV7X1mc6M0/SdpkZ1Vwsh+em30wcXT0bgjXULiiRQRUAABsAf0Rqv+PoDhdBV6qnITOg2LsHkD9Mw2E9D7oBFf1fpRvM4x2MqgJ4DacmumZbJQ7ksZ5Eb+Pv0Yu/yB4zm5t2kFc8qCdRuw3nolvtxHaz3VpZsEUw81S2u58p1a4LGBd3djMoT9toQjXgMrU713F60xFKr9jSt+v5uu4RtKtn2/6uy4pobYGuUM9EBGR04+nlIvab8Se1v1eXdrmKYHJr2BaYh82RY76J7NKOVxtdjxfMTWdCzTMI8L8wqhkkn6yDhJuCtREqHIxXUdQcyppZmBmeo9GxKsGjELlkuBSdBuCYqczfs5B6xGsGdYvCjO6ozkJ7YnRFDz22k4WEwxv9CM7TYUph1U2nV490r9+MwMpMGkMZcDfNlE4T70o5p2xnMfFoMP2QnlUzI5WDvnyGWhUwfvQqRR39dlOIZIG/SgEgYA9fM/0LyqBwhYE9uhOpluwOn0q9qPtL/W5KT3ibV7NqVU7L0pQgwLPhXkyao+BPTiq5mDvbL6kVlVPvEzl3002x5/2wcF9G6GLjKgotQEhq4fKwufvpvdm63JW1g4eCDxO7i7ecTPZmI7N3nO3tVvkyobOwn13kr7bFxbhWa2dtbl3d84Uc+PSdvR8tx3N6ps0hL/d18k01CXf8vZkfItZpDx8Iw1BW6tSYTUFJUBmu6Hj6LZPCzYq69G/4k1SlHrse2I9TTdRIpEDl+ub0QLvd2qVFU9Ur6wzvjD/6/N/IpX5r53+LnjiLlVyUHFltbgQKkCq2HMWuokviNqcb48S9Jx57oIci6bmzVHthq8AOkmAmdhDJ7VADhyfyKT8k4N39tx2xIL/x5KjTE2JWJzeRN+UeJqLIuxYDTd6DbJ12X5jdmIvBNI5TO8YCfwJ5zd9pVDkUtm9Z3+jifNNs0H6tGXjFxNE+ABJ7AuXhpXST9e5YqRx1G5XciaAChQqOjB5sZBAIbrP1MN8ielpslKY1cuNSDnZwacReJoPUH7oo1oLF0lS04EbhlNJjEdp0SeHYFLXHMSsam/AKUdUUS/3C216dskI025g5l0HQtpQPzaDEAfRQzNKyDYIJFQYND887uS+jTs5lAUAg+TwEg6VrFnJquITrZS2WsNlvR+VC6BGJzmQ7OFHfM2o3b5QvqcmRqljtVp/W5T2RDIo75bPsU2Uxc/ZqGT72QDlLSSazj3z/pfKZkpUI0l2ce+FDL1BhxSMl2Zhe4V4b3XnJ7WXN3ezMJx/V0wGEeTPfhr69Sl9qPlIqqgAx6sJZeDjqD4s7fT+krrSh+4P0EtKaei8bkryokoMCwjDSjJC4QMCk8FNK1ARZ+un+u3QibKuz2ePgJpDJsNXo/ZN9W0nqyGD/S8eO6o/RnfvDd43cd0FQwf8WNvZZoDoTSXKZMhXpgz+xRZ30gFHsSZfaQQLuM53ZphnluoxeKf/8+e/KH+jfFH8YNVZkBNRvDVI6S5RlZ1LPNOVznkGWLFq/tDymGFhW7efZJ2o5Vsi0C/6TbRdRFfZy2Y/inAZ/urFuxVYVGW+4xRia/s3MBO1Z/wZxYSLsuVNZgG8incGo13d92c0pGPC/XoGHBHD/JWMHV9ZsV7xFlychugRAiY3wRXRasafjbURMXEhWm8HF5oVWytME2Y6/xuXT7fo1Tl1YTmHNuJHry6iiQqluabocbBcGPGx848s1hY9FR02F5MSAidmUPiEleHUZoc9iu2CBDuL1i0r7aXIO+QVeQHdyvvwj+e3dpyGHYrLph+DpnhcJiHVSZz3Fgh+VGGWyojauwF4RsiUZFMZAuocGgYP1odEQZWEJBPQzgpX/KFeCR4R5wiTZw2xWAF1kwWKAkp2IJ9amkAfxnUc90E6hSFdRrwhHWLPFuUniKpaFB0CKoMRSV6loeNgLWBslX6dKdlkVo5SyYFWkgbK+MNha0iuZUapY9JvSJDuhcNgABA1SBlAupVhkOzVmz2W54lkPKNV0kuYz+2yVKdG3dzIZKCoC4qeMq/ELfOrp4jno71Um7Y0U5OiDmJtkZZ8mIrpZZ6KwwudW5VnQYGMkrFyO6QawDVP91YNDM2numeMvGQpfrhhSpv4y1Ku0EuV8L3vVybTWpXO1SukbgP9clfSTEVU0VIyLHKsWA6Klrug6lzxuhsUqBjLoEyBfRnVqUL1gZ1p8yv1opayhOFD1D+tJNknMnrx7L8sXzswhsyNHx2t9abVn5F+pqsT08uloPMzpomrr+sWpoYOuGhf0OnubIrfgB4EOSzNMxL5bpapoqUDWn3lzzzS69pL1SKuhqk59xqM1JYblKB95XxqhU1mIAsayJojGwasE7C+0C/TVM9MYu7R6NmTDXrpPucMT4FIKhVR7Jx8vq9eT0aQXJpjcjxiqb9TwGL6/IVXTBjdVB4drBLNQEGOiCRnQcspzwRZUK0WZUEUGI1IXzLlZTpPdKZg5Ai7BKp3LqjJ2QwlL+Zbqjq88/3fyu/rtnf9Od/gQvKfOaalTiokKg6icub9ko6ZOYy1W3FiSXJ9HQAjzp3new+ykrbFdx//gkl3Mk/7DBxxPeQMLNMbNP3/Y/Jnb9i4OLldLHkJvcbftoEhY//sJB+V1G/sCNoAy8eNM5yWAqh/Mkg6JtrraPAGs+vqwo3Ju2eidswB9aT1gf7uko152s+zhQHy/orTVeDNpFQBKdBP0fKTXpiroqCaF6iuIJKEYBnSWXepxNFmQQYRZtS50mdPdBqUYmqWgQQEIxpKKcSSILpaPsWAYGtsAK0BB66MgKQ7saeA+jHwQXO1dXivT8+rQ5HCUD4CS9Ip942dpWxwBeMrYVpeu9VNjDlaaPZfqEs5oSsShpJjthuRukqIAtw+dc5QnAn/1gt77B/+WIoSElQgevUJfwKcaIhG/I9Mka4K/ewCMVhJ6Jp/v21PV1ww+Domui4QOqwdG1aNwLsHLUElhdI8ibpPoyuYhSSlLh2hcSTHEwvcC5Rg1QFqz4j96VYRPxTxypnTSo10f9Qw96HfcfqLGTn0sTxvresnPf/b4e4Nf0ipdqItCTkLPngx+3MMRLrENtWEOKmw9JXMKWXVSDZrdWWVm7HkwTvc+XXyyb/InpqKLmoTip47fKUv9BhUUqpbiQcja7NjeU7pHZ5GuwoTDrV+06i2XH1IHdSlkRY2NR43xW89/X/60frbzX1BjUAa0jHi0vINgnDsvOnm7wi3qOesQnG9LkMEWf94f9CM+lLUM1/Sqy+g5seHSox8VF1fx/xHFFR9WtL5Y2jLkSVGxsnVJXa1fyJyciUhvjNj+uC+9mRFjWzS4YMmleA7wRtZR+ZA3OdvmgOo9L0wYVDA1SPlp9wAQJWWejpL87j7UrsdAOlIbl7WFHIpatmWG1pYymGUqUckmxxSlAF10mJb2oBmaFRnjjAYllYMDi/O0oDrX7PPePUCbO+1lTU2Hp8/1fZ7s4imzGYVVdJRUONLfGSYgl0Or3/ZJj0rChDV0MiOdzDRl9UzR3Tx0YoEufCTXQe1aX+mjRn0tQbEOvQ+bwlsR3Z+e5xdVIUSoe+88AQGRQvQyfU8/mLZ3V5/R9UD5JqnlAaP9WCQM2QioBTVKzEgehjzUiaYg3djhR88/W74/3/vpLH3kKSS3aZ7jHC9KkXs6eAU1wD3jHqbiUf7LVe/X/GuH/ff3DyA0oM006Fk6evld56r+YjX4MZ00VSLbnIrf4CKRqqLzUylMaU5cstJq6E78y7vhgQ13B29Dokd8/fmfyAv9KSER+02krW/6D2JlOtj+tovGsf86x7qu0/fsb8vmIlZvU1SWw/kN573PhghcvF0yc4OLyYjtSy6u2Ua8QwfEYek5lnSrx1ydsnvU5QtsDoMBBbjsYL1zgYy5KBS6opVwJPptOj1QptBvLq5XNw4wXQ37NCKn6Y97Md+tks8PqaZIG+/oekPfFtUdRSsDSG1fT1O6rFJkGJTK3MPzniomBT5aJeKm3EhK2gChzkceSB1APyvmLBhmSZ+ykCAyARU2WVZ72wyrCmYOqZ6BzqQjhFZROSa85VU7JqrMQQOaFGfKM+MDnkBRg84Wrcu8Ouv5MqG47SrFrn30hGMJNaPvhMJrqV/zg8KulnCPSE9yYI9hQO4KU9LRpUMwXBZn+8zA+3rdfn5viN2axiK/VA8OLHS20v5Q5+9Mxl/Jsr2E2t7KpRnUb0NAFUox9uHZ68XJ/lSoUNFznWqVzUPzlxaTV1ygLq/o5WnmTLRZNao9cHdfG41fWiZy1MK1qja/DFETSi1e1SqzUbs7AdYeWh5ZQlHmSE2Ox69bNbXZ2gS/l/WPmiGFGUqiSTlCNqazWGgLjoFzstz1Izzf333+p3KmvyCOEYsHQyZNwRIJrGyWvkSJAiDYOrZjS6jbcPSdCfN/OWWeYhmwYcQJBfVSYHvONK0rXiyvV8uWEfcoVKbM+uZA2rLM1XLBNg+nYtV5NlAJc8IANIaAMbqGSUbxWb+6WU7j0QUAB/Oa68grRd1zuZ3MxMrrakA1ksEbAnyOmtM1xGpWV4sVIHKAxWKusrri310NoGeHnd/11XouOhErjGXPk1/JptHlj6dZW+F/KIcXVL/L3thmFT8zOsfoTmZ1ZlmYdsw0O8eoK3RARdcmJZ29Y54JWDGaOaNJ+4lIBT1aQRU6E9Nli823pqA3UrwKBvYdGv7YQmvecFv2XnMlEB+F86uiVyOwRzOgqLngOIcIj92UiKwl+jB2K2MVS6q7sT+GAPUYGxGn36czkPSCouTUYydMXn2zjK2wolSyHCZVv4ZUlTrJf6a+sNVq83jc+rpwOfy52SCINQcKuDNZSb/VlDp6i+SoLj9e5PN1kmW73v947t94Itczdxg8wHysNp/sZRpKnrnXckDtkJ5AEwdwcq/M2eR61PS+9FqRzuvXUAw5mdMB6Vfj7L55uem996R+pWxz08sURFs67zp8A3gxeFvMlWK/eirJYHTh2OMIOCDqFjW8wxMBzvcfP/8evfGv0LfcE/d3du7MNad9LhViPF136qybD++SDYs+MqJkabpXdbPGhP3EEIh5JCA679Vk+wM9RRKR3nSaVku7iLpNm23ZvgVlYMG2HXTgX+LGgOI+dxPdMPDGASyiy/bEopDRqxHAl/g/21LPwLTAm9VA/JhIlaS/kisIN1BCuTwc3BAPo4jb4kUbkPyHJFUpHrrZbmqPgo+EJ0GlLjV8qbC7qsph/mBq3EuqfMz+sC9DMlJ+SvfBO4wYQq8SszR7mAw8u6mWVQK9Mr0ydQkIOFBcGhZ7QGmwyDr8/tivzwABCuwEUEMml7uuOPUzuNnQvc07HVnU0ZFjmJTo9sUEJCYNTgvYLKCxWDTunrpFoMAzyCo61mGULLyaUI0EvrQI4oPv5LtQpcxgheILyDcovaGERw+8uKSHKthcBewoqN0NVPf35MiYQuefvTzy9VgVnxq+92QY/qrcf9S7finx7xaDYzHYK5bz8KiYvlz8lUHz3uvavmb9pYEmbSX2yuITcuploE61gV5Lrt09VcJjUcboguuP/sjIXGqW0QKTGN8TMG5QXDB5FuTAVVrkWlPCsrZMLCUjytnUKjtvMw8XQnknmiKlMwz11cn88oEcz4v6J6lmHbSyrnH06YpOy/Ju7fdNWum+KSv35cxU3k+drpPGigysQZP9QQ2353Qsxf5Rsz9kq6VrIPeKhvqCwc7PPf8D+b/U5Y7ZqXfGO4ud453znX+x8693/tPO/9j5PgXEWkzFvliKtbgnNlQsvS7e3Nk53Lwi0EBQhZNAPWC9OaTi/GpzIZ/xAmA5vB5aqtxYsvzqkqrcgWFBN8uWBBT/gYXsmwUywH25vrJzNqy/n1xfra6oowfRFltpOpEwJU6iNbG96qZ6QEXdmBeLZ6LTW+emZZ60dLLQxgy7w8m46jPGghq77A/psM/FIT7uyi5A22LIiLEDFHoNqx3g1c31koeVpWhaiBlw+gXOxAyv15Y+yeraRiGKa9OnazC0g/615SV7Q6+4T1em2TCSmr9AiTrPJmto/uwd/BxV+fpLOT0uv1F/8F/vU/+ozpYN4JPJPzvb7LVLN0iycjJU4tfkl66mp9a9PpgEe7pZnCrt6blJPq/ocbBl5cWEavLjvfEd6vmHTuXUGtrC9ZNAndfnS+eWiLH9wrCwmxjShZbzbOlHyHz+8llhF358ULr2IMucG1J9uXQeauoVHCwTwacoVHL9kR++K0ZZOoYdyY/gr5fHZs/QO/6CpMc5uN5odt1CGIUq1s/Rl9P0kr4SOtj13uZMpnf+Vm/2pcaoXOn/2M7NPWXMpFAN9fe6OIBQa8irs0FIH4L3dk89VLPZN3rvPvxTTMv04V9YrX/90T+6ntQfn3zw3U9cnR8Uo/Dut6/eppP+m40xr1ZVvT771b+biM1k8uQlf+/4o4d1Qxk1yYAs79ct2Jyr1aSdzUJYurEMY2DDINMj/+il3hiBaRiGsKjnvbiiHBzcS/LElKeDHtVzInXZKz+SyMf0GeeuHgbfa77SJgOjSu8Lam5SKf+6qKhRdI95jPqL3g2cvaaK3f6gPC7yq484HUbi6xiPruvHjw9++MfvFiKxB1SGnNmjqmfwTLyxhlTDD96++rb0afib70FUblEWf/mwPbg4+/jea9lPfjPra/1N0XzhC9h5JtkHX36n9ZTOZ/b1+Xx99DNPTv/GLd/zlu95y/e85Xv+P+d73saZ2zhzG2du48xtnLmNM7dx5jbO3MaZ2zhzG2du48xtnLmNM7dx5jbO3MaZ2zhzG2du48xtnPn/O87Aj3tHf0N/Y0fu7PQOegf6G9//Gn7u7Pwf/gBC4AB42mNgZGBgAOLrh+yy4vltvjLIczCAwLVpRt8hdNd2Bob/daytbCAuBwMTiAIAS7ELYwAAeNpjYGRgYP31PwpI/mYAAtZWBkYGVOAOAGrhBCwAAAB42m3PKwsCQRSG4W+W3W4x223aTRq8gMltWmwaDCJaFkHQaLIY7IIY7dsEwWITi/4Ag128vMMOWBx4+M6Zw8ww3l0lsbyN5B8tk0YNLfoTLtQdLDBAFn2sUECUzHQjy8hhjQl7FTIF7jFDNJBhf4cHdZ28kk20UaXfk0uMELpz9s0iswPZI7fkFDH12Z+r687/wd9eUvD8pXljzKc/TkyfJ8Mk7SyYSV80s1AveNpjYGDQgUCmQ6wu7HGc87jf8F7h3yeUJyolXiE5Q3qVrJP8NCU5lR61OxoxWlk6cron9KsMFxnfMa0w/2JlZb3HzsIxznmS6xv3OZ4LvKf4fPC7E1ARpBOyJSwufEukStS2mKy4sISupH0pn9LVMqOy+/DCOQA/+TbcAHjaY2BkYGBwZ3JlEGMAASYgZmQAiTkw6IEEABOHARgAeNqNUV1LAkEUPaMWSmASEdFDLL63mrkmBkEEPhTEkn08t36ktLm2jtZTv6Mf0lO/oOwX9Fd66szstIkWxGWWM+eee++5swCW8YIkRCoDYJMnwgIrvEU4gSyKBidRxb7BKeQxNngBD3gyeBF5kTM4jVVRMjiDqqgbvISKeDT4FWvi2eA3FMXE4Amy4tPgd+QS6Qh/JLGRWEeJnrbpxoKLEAG/I3jw0UMTV7hEm+HyBBiQbeOU55oan9mQlTbrVezhHMfUnxDNV23N1M0rrBnFBW8hhvQRoM/s9CQXDTJF7fyH7VIp6Vrpx3GFzd2qzN6y642eJ9Ehqzb0uL0Nh6eCMnYZzj+8//ZOB0SediyZs3BIrqd1FlEfrT/et0u95Jwhaigw7nXYZEI9f1prkwnpo6A9etxCbSrjTc+oVu94pCda2CGncg57l/kGNSKHzPcfb1HdoVbtJemgqfsPOG3EWz3u3sAdGbVNyAr/CzM7bOd42m3Mx04CYRhG4fOCgAURvQa7qP/8zFAsi4k6NgTsla1KYgwbF168Ccp87jibZ3fIMOqnzyvjOgZllCXLIksss8Iqa6yzQYVNttjGEeCpEhJRo06DJjvsssc+hxyR/D5OOOWMc1pc0KZDl0uuuOaGW+6454FHnnjmhZ4mlFNeBU1qStOaUVGzKmlOZc1rIf/28T14D1J84euz71zs/vTO/RuY3qyaoRmZNbNuNsymGaf6JDVKjZKDIVmuNI4AAAAAAVpw2jcAAA==) format('woff'); - font-weight: normal; - font-style: normal; - -} - -.weepeople { - font-family: "WeePeople"; -} \ No newline at end of file diff --git a/spaces/merve/measuring-fairness/source/measuring-fairness/gs.js b/spaces/merve/measuring-fairness/source/measuring-fairness/gs.js deleted file mode 100644 index f3f72c87ecdb3e28fb4f4d198d70900b431151c2..0000000000000000000000000000000000000000 --- a/spaces/merve/measuring-fairness/source/measuring-fairness/gs.js +++ /dev/null @@ -1,106 +0,0 @@ -/* Copyright 2020 Google LLC. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - - - - -window.makeGS = function(){ - var gs = {} - - var bodySel = d3.select('body') - - var prevSlideIndex = -1 - function updateSlide(i){ - var slide = slides[i] - if (!slide) return - - gs.prevSlide = gs.curSlide - gs.curSlide = slide - - var dur = gs.prevSlide ? 500*1 : 0 - - sel.personSel.transition().duration(dur) - .translate(d => d.pos[slide.pos]) - - sel.textSel.transition().duration(dur) - .at({fill: slide.textFill}) - - - sel.rectSel.transition('opacity').duration(dur) - .at({opacity: slide.rectOpacity}) - - if (!slide.animateThreshold){ - sel.rectSel.transition('fill').duration(dur) - .at({fill: slide.rectFill}) - - sel.textSel.transition('stroke').duration(dur) - .st({strokeWidth: slide.textStroke}) - - slider.setSlider(slide.threshold, true) - bodySel.transition('gs-tween') - } else { - sel.rectSel.transition('fill').duration(dur) - sel.textSel.transition('stroke').duration(dur) - - bodySel.transition('gs-tween').duration(dur*2) - .attrTween('gs-tween', () => { - var i = d3.interpolate(slider.threshold, slide.threshold) - - return t => { - slider.setSlider(i(t)) - } - }) - } - - - sel.truthAxis.transition().duration(dur) - .st({opacity: slide.truthAxisOpacity}) - - sel.mlAxis.transition().duration(dur) - .st({opacity: slide.mlAxisOpacity}) - - sel.fpAxis.transition().duration(dur) - .st({opacity: slide.fpAxisOpacity}) - - sel.sexAxis.transition().duration(dur) - .st({opacity: slide.sexAxisOpacity}) - - sel.brAxis.transition().duration(dur) - .st({opacity: slide.brAxisOpacity}) - - sel.botAxis.transition().duration(dur) - .translate(slide.botAxisY, 1) - - - prevSlideIndex = i - slides.curSlide = slide - } - - gs.graphScroll = d3.graphScroll() - .container(d3.select('.container-1')) - .graph(d3.selectAll('container-1 #graph')) - .eventId('uniqueId1') - .sections(d3.selectAll('.container-1 #sections > div')) - .offset(innerWidth < 900 ? 300 : 520) - .on('active', updateSlide) - - return gs -} - - - - - -if (window.init) window.init() diff --git a/spaces/merve/measuring-fairness/source/measuring-fairness/init.js b/spaces/merve/measuring-fairness/source/measuring-fairness/init.js deleted file mode 100644 index 5a8df63793d90464eb148443787eb91e2b34180b..0000000000000000000000000000000000000000 --- a/spaces/merve/measuring-fairness/source/measuring-fairness/init.js +++ /dev/null @@ -1,200 +0,0 @@ -/* Copyright 2020 Google LLC. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - - - - -d3.select('body').selectAppend('div.tooltip.tooltip-hidden') - -nCols = 12 - -window.colors = { - well: d3.color('#669399') + '', - sick: d3.color('#EE2A2A') + '', - - // well: d3.color('green') + '', - // sick: d3.color('purple'), - - // well: d3.color('#e9a3c9') + '', - // sick: d3.color('#a1d76a'), - - // well: d3.color('#e9a3c9') + '', - // sick: d3.color('#a1d76a'), - - // well: d3.color('#e9a3c9') + '', - // sick: d3.color('#a1d76a'), - - // well: d3.color('#865327') + '', - // sick: d3.color('#012394'), - - // well: d3.color('#012394') + '', - // sick: d3.color('#FBC20F') + '', - - // well: d3.color('#012394') + '', - // sick: d3.color('#E71E24') + '', - - // well: d3.color('#A9159C') + '', - // sick: d3.color('#E71E24') + '', - - // well: d3.color('#A9159C') + '', - // sick: d3.color('#012394') + '', - - // well: d3.color('orange') + '', - // sick: d3.color('#012394') + '', - - -} - -window.colors = { - well: d3.interpolate(colors.well, '#fff')(.5), - sick: d3.interpolate(colors.sick, '#fff')(.2), -} - -window.lcolors = { - well: d3.interpolate(colors.well, '#fff')(.5), - sick: d3.interpolate(colors.sick, '#fff')(.35) -} -window.llcolors = { - well: d3.interpolate(colors.well, '#fff')(.5), - sick: d3.interpolate(colors.sick, '#fff')(1) -} -window.dcolors = { - well: d3.interpolate(colors.well, '#000')(.65), - sick: d3.interpolate(colors.sick, '#000')(.65) -} - -// window.colors = { -// well: d3.color('#BEF5FF') + '', -// sick: d3.color('#FCC5C3') + '', -// } - -// window.colors = { -// well: d3.color('#669399') + '', -// sick: d3.color('#EE2A2A') + '', -// } - -// window.lcolors = { -// well: d3.interpolate(colors.well, '#fff')(.3), -// sick: d3.interpolate(colors.sick, '#fff')(.3) -// } -// window.llcolors = { -// well: d3.interpolate(colors.well, '#fff')(.2), -// sick: d3.interpolate(colors.sick, '#fff')(.2) -// } - -// window.lcolors = { -// well: '#CFFCF6', -// sick: '#FFBD96' -// } - -// copy(logColors()) -function logColors(){ - return ` - body{ - --colors-well: ${d3.rgb(colors.well)}; - --colors-sick: ${d3.rgb(colors.sick)}; - --lcolors-well: ${d3.rgb(lcolors.well)}; - --lcolors-sick: ${d3.rgb(lcolors.sick)}; - --dcolors-well: ${d3.rgb(dcolors.well)}; - --dcolors-sick: ${d3.rgb(dcolors.sick)}; - } - ` -} - - - -window.init = function(){ - console.clear() - - graphSel = d3.select('#graph').html('').append('div') - totalWidth = graphSel.node().offsetWidth - totalWidth = 400 - - c = d3.conventions({ - sel: graphSel.st({marginTop: 40}), - margin: {top: 20}, - totalWidth, - totalHeight: totalWidth, - }) - - students = makeStudents() - sel = makeSel() - mini = makeMini() - slider = makeSlider() - slides = makeSlides() - gs = makeGS() - - function sizeGraphSel(){ - var scale = (totalWidth + 35)/(innerWidth - 10) // off by one, s is 35 - scale = d3.clamp(1, scale, 2) - - graphSel.st({ - transform: `scale(${1/scale})`, - transformOrigin: '0px 0px', - - }) - } - sizeGraphSel() - d3.select(window).on('resize', sizeGraphSel) - -} -init() - - - - - -!(function(){ - var footnums = '¹²³' - - d3.selectAll('.footstart').each(function(d, i){ - d3.select(this) - .at({ - href: '#footend-' + i, - }) - .text(footnums[i]) - .parent().at({id: 'footstart-' + i}) - }) - - d3.selectAll('.footend').each(function(d, i){ - d3.select(this) - .at({ - href: '#footstart-' + i, - id: 'footend-' + i, - }) - .text(footnums[i]) - }) - - - d3.selectAll('#sections wee, #graph .weepeople').attr('aria-hidden', true) - -})() - - - - - - - - - - - - - - - - - diff --git a/spaces/merve/uncertainty-calibration/public/anonymization/make-gs.js b/spaces/merve/uncertainty-calibration/public/anonymization/make-gs.js deleted file mode 100644 index 4eb1aaeffeb2a69e726a9d452d7eea7b3352b318..0000000000000000000000000000000000000000 --- a/spaces/merve/uncertainty-calibration/public/anonymization/make-gs.js +++ /dev/null @@ -1,105 +0,0 @@ -window.makeGS = function(){ - var prevSlideIndex = -1 - function updateSlide(i){ - var slide = slides[i] - if (!slide) return - - d3.select('.tooltip').classed('tooltip-hidden', true) - - var dur = 500 - - sel.student.transition('xKey').duration(dur).delay(dur ? slide.circleDelayFn : 0) - .translate(d => (d.isAdditionalStudent && slide.xKey != 'plagerizedShifted') ? [0,0]: d.pos[slide.xKey]) - - - if (sel.rectAt[slide.xKey]){ - sel.uniqueBox.transition('at').duration(dur) - .delay(d => dur ? slide.circleDelayFn(d.d0) : 0) - .at(sel.rectAt[slide.xKey]) - .translate(d => d.d0.group[slide.xKey].pos) - } - - sel.uniqueBox.transition().duration(dur) - .st({opacity: slide.showUniqueBox ? 1 : 0}) - - sel.uniqueSeasonBox.transition() - .delay((d, i) => slide.showUniqueSeasonBox ? dur*2 + i*40 : 0).duration(slide.showUniqueSeasonBox ? 0 : dur) - .st({opacity: slide.showUniqueSeasonBox ? 1 : 0}) - - - if (sliders.headsProb != slide.headsProbTarget && slide.animateHeadsProbSlider != -1){ - var headI = d3.interpolate(sliders.headsProb, slide.headsProbTarget) - if (window.headSliderTimer) window.headSliderTimer.stop() - window.headSliderTimer = d3.timer(ms => { - var dur = slide.animateHeadsProbSlider ? 2000 : 1 - var t = d3.easeCubicInOut(d3.clamp(0, ms/dur, 1)) - sliders.updateHeadsProb(headI(t)) - if (t == 1) headSliderTimer.stop() - }) - } - - if (sliders.population != slide.populationTarget){ - var popI = d3.interpolate(sliders.population, slide.populationTarget) - if (window.popSliderTimer) window.popSliderTimer.stop() - window.popSliderTimer = d3.timer(ms => { - var dur = slide.animatePopulationSlider ? 2000 : 1 - var t = d3.easeCubicInOut(d3.clamp(0, ms/dur, 1)) - sliders.updatePopulation(Math.round(popI(t)/2)*2) - if (t == 1) popSliderTimer.stop() - }) - } - - axii.stateAxis.transition().duration(dur/2) - .st({opacity: slide.showStateAxis ? 1 : 0}) - axii.ageAxis.transition().duration(dur/2) - .st({opacity: slide.showAgeAxis ? 1 : 0}) - axii.seasonAxis.transition().duration(dur/2) - .st({opacity: slide.showSeasonAxis ? 1 : 0}) - axii.headAxis.transition().duration(dur/2) - .st({opacity: slide.showHeadAxis ? 1 : 0}) - axii.headCaptionAxis.transition().duration(dur/2) - .st({opacity: slide.showHeadCaptionAxis ? 1 : 0}) - estimates.axisSel.transition().delay(dur).duration(dur/2) - .st({opacity: slide.showHistogramAxis ? 1 : 0}) - estimates.activeSel.transition().delay(dur).duration(dur/2) - .st({opacity: slide.showHistogramAxis ? 1 : 0}) - // axii.estimateAxis.transition().delay(dur).duration(dur/2) - // .st({opacity: slide.showEstimate && !slide.enterHistogram ? 1 : 0}) - // axii.plagerizedAxis.transition().delay(dur).duration(dur/2) - // .st({opacity: slide.showPlagerizedAxis ? 1 : 0}) - - - annotationSel.transition().duration(dur/2) - .st({opacity: d => i == d.slide ? 1 : 0}) - - estimates.containerSel.transition('xKey').duration(dur/2) - .st({opacity: slide.showHistogram ? 1 : 0}) - - if (slide.enterHistogram){ - estimates.render(true) - } else { - window.flipAllCoinsTimer._time = Infinity - } - if (slide.enterHistogram === 0) estimates.estimateSel.classed('active', 1) - - - // Display the default coin flip state if the histogram is not visible. - sel.flipCircle.transition().duration(dur) - .at({transform: d => { - return slide.showFlipCircle && d.coinVals[estimates.active.index] < sliders.headsProb ? 'scale(1)' : 'scale(.1)'}}) - - prevSlideIndex = i - slides.curSlide = slide - } - - var gs = d3.graphScroll() - .container(d3.select('.container-1')) - .graph(d3.selectAll('container-1 #graph')) - .eventId('uniqueId1') - .sections(d3.selectAll('.container-1 #sections > div')) - .offset(300) - .on('active', updateSlide) -} - - -if (window.init) window.init() diff --git a/spaces/merve/uncertainty-calibration/source/fill-in-the-blank/init-sent.js b/spaces/merve/uncertainty-calibration/source/fill-in-the-blank/init-sent.js deleted file mode 100644 index 263a35a62a0fa9f2064834bc78a93222c8040897..0000000000000000000000000000000000000000 --- a/spaces/merve/uncertainty-calibration/source/fill-in-the-blank/init-sent.js +++ /dev/null @@ -1,136 +0,0 @@ -/* Copyright 2021 Google LLC. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - - -window.initSent = async function(sent, sel){ - var isHamlet = sent.class == 'hamlet' - var isMobile = innerWidth < 900 - - var sel = d3.select('.' + sent.class) - .st({opacity: .5, marginBottom: isHamlet ? '' : 40}) - - - // Load completitions - var str = sent.str - while (str.includes('__')) str = str.replace('__', '_') - str = str.replace('_', 'things') - - var tokens = tokenizer.tokenizeCLS(str) - .filter(d => d < 30522) - - var topTokens = await post('embed_group_top', {tokens}) - topTokens.forEach(sent => { - sent.forEach(d => d.str = tokenizer.vocab[d.i]) - }) - - var displayTokens = tokens - .slice(1) - .map((vocabIndex, i) => { - return {i, str: bertLargeVocab[vocabIndex].replace('##', '')} - }) - displayTokens.pop() - - - sel.html('').st({opacity: 1}) - if (!sel.node()) return - - var divSel = sel.append('div') - .st({position: 'relative'}) - var svgSel = divSel.append('svg') - .st({position: 'absolute', top: 0, zIndex: -10}) - - var tokenSel = divSel - .append('div.token-container') - .st({padding: 20, paddingLeft: 0, paddingRight: 0, fontSize: 20}) - .appendMany('button.token', displayTokens) - .text(d => d.str) - .on('click', drawToken) - - var connectionPath = svgSel.append('path').at({fill: 'none', stroke: '#000', strokeWidth: 1}) - - var padding = 5 - var width = divSel.node().offsetWidth - var botWidth = isMobile ? width - padding*2 : 580 - - var botTextSel = divSel.append('div.top-sents') - .translate([width/2 - botWidth/2 - padding + .5, 15]) - .st({ - width: botWidth, - height: 170, - outline: '1px solid #000', - padding: padding, - // position: 'absolute', - background: '#fff', - overflowY: 'scroll', - fontSize: isMobile ? 10 : '', - }) - - if (isHamlet){ - divSel.append('div.caption') - .text(`BERT's predictions for what should fill in the hidden word`) - .st({fontWeight: '', lineHeight: '1.1em', fontSize: 14, textAlign: 'center', width: '100%', marginTop: 20}) - } - - var curIndex = -1 - function drawToken(token){ - var node = tokenSel.filter(d => d == token).node() - var x = node.offsetLeft + node.offsetWidth/2 - var y = node.offsetTop + node.offsetHeight - - var y1 = botTextSel.node().offsetTop - - connectionPath.at({d: ['M', x, y, 'L', width/2, y1 + 15].join(' ')}) - - var completionSel = botTextSel.html('').appendMany('span', topTokens[token.i + 1]) - .st({display: 'inline-block', fontFamily: 'monospace', width: isMobile ? '47%' : '31%', borderBottom: '1px solid #ccc', margin: 4, fontSize: innerWidth < 350 ? 12 : isMobile ? 13 : 14 }) - - completionSel.append('span') - .st({color: '#ccc'}) - .html(d => { - var str = d3.format('.3f')(d.p*100) + '% ' - if (str.length < 8) str = ' ' + str - return str - }) - - completionSel.append('span') - .text(d => d.str.replace('▁', '')) - - - tokenSel - .text(d => d.str) - .classed('active', false) - .filter(d => d == token) - .classed('active', true) - .text(d => d.str.split('').map(d => '_').join('')) - } - - var i = displayTokens.length - (isHamlet ? 2 : 2) - if (tokens.includes(2477)) i = tokens.indexOf(2477) - 1 - drawToken(displayTokens[i]) - - var topTokensSel = sel.append('div.top-tokens') -} - - - - - - - - - - - -if (window.init) init() diff --git a/spaces/mikebars/huggingface/assets/index-6cef24ca.css b/spaces/mikebars/huggingface/assets/index-6cef24ca.css deleted file mode 100644 index 4a5c97a95abfd594b510cbbf344b6237352a6e68..0000000000000000000000000000000000000000 --- a/spaces/mikebars/huggingface/assets/index-6cef24ca.css +++ /dev/null @@ -1 +0,0 @@ -*,:before,:after{box-sizing:border-box;border-width:0;border-style:solid;border-color:#e5e7eb}:before,:after{--tw-content: ""}html{line-height:1.5;-webkit-text-size-adjust:100%;-moz-tab-size:4;-o-tab-size:4;tab-size:4;font-family:ui-sans-serif,system-ui,-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,sans-serif,"Apple Color Emoji","Segoe UI Emoji",Segoe UI Symbol,"Noto Color Emoji";font-feature-settings:normal}body{margin:0;line-height:inherit}hr{height:0;color:inherit;border-top-width:1px}abbr:where([title]){-webkit-text-decoration:underline dotted;text-decoration:underline dotted}h1,h2,h3,h4,h5,h6{font-size:inherit;font-weight:inherit}a{color:inherit;text-decoration:inherit}b,strong{font-weight:bolder}code,kbd,samp,pre{font-family:ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,monospace;font-size:1em}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}table{text-indent:0;border-color:inherit;border-collapse:collapse}button,input,optgroup,select,textarea{font-family:inherit;font-size:100%;font-weight:inherit;line-height:inherit;color:inherit;margin:0;padding:0}button,select{text-transform:none}button,[type=button],[type=reset],[type=submit]{-webkit-appearance:button;background-color:transparent;background-image:none}:-moz-focusring{outline:auto}:-moz-ui-invalid{box-shadow:none}progress{vertical-align:baseline}::-webkit-inner-spin-button,::-webkit-outer-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}summary{display:list-item}blockquote,dl,dd,h1,h2,h3,h4,h5,h6,hr,figure,p,pre{margin:0}fieldset{margin:0;padding:0}legend{padding:0}ol,ul,menu{list-style:none;margin:0;padding:0}textarea{resize:vertical}input::-moz-placeholder,textarea::-moz-placeholder{opacity:1;color:#9ca3af}input::placeholder,textarea::placeholder{opacity:1;color:#9ca3af}button,[role=button]{cursor:pointer}:disabled{cursor:default}img,svg,video,canvas,audio,iframe,embed,object{display:block;vertical-align:middle}img,video{max-width:100%;height:auto}[hidden]{display:none}*,:before,:after{--tw-border-spacing-x: 0;--tw-border-spacing-y: 0;--tw-translate-x: 0;--tw-translate-y: 0;--tw-rotate: 0;--tw-skew-x: 0;--tw-skew-y: 0;--tw-scale-x: 1;--tw-scale-y: 1;--tw-pan-x: ;--tw-pan-y: ;--tw-pinch-zoom: ;--tw-scroll-snap-strictness: proximity;--tw-ordinal: ;--tw-slashed-zero: ;--tw-numeric-figure: ;--tw-numeric-spacing: ;--tw-numeric-fraction: ;--tw-ring-inset: ;--tw-ring-offset-width: 0px;--tw-ring-offset-color: #fff;--tw-ring-color: rgb(59 130 246 / .5);--tw-ring-offset-shadow: 0 0 #0000;--tw-ring-shadow: 0 0 #0000;--tw-shadow: 0 0 #0000;--tw-shadow-colored: 0 0 #0000;--tw-blur: ;--tw-brightness: ;--tw-contrast: ;--tw-grayscale: ;--tw-hue-rotate: ;--tw-invert: ;--tw-saturate: ;--tw-sepia: ;--tw-drop-shadow: ;--tw-backdrop-blur: ;--tw-backdrop-brightness: ;--tw-backdrop-contrast: ;--tw-backdrop-grayscale: ;--tw-backdrop-hue-rotate: ;--tw-backdrop-invert: ;--tw-backdrop-opacity: ;--tw-backdrop-saturate: ;--tw-backdrop-sepia: }::backdrop{--tw-border-spacing-x: 0;--tw-border-spacing-y: 0;--tw-translate-x: 0;--tw-translate-y: 0;--tw-rotate: 0;--tw-skew-x: 0;--tw-skew-y: 0;--tw-scale-x: 1;--tw-scale-y: 1;--tw-pan-x: ;--tw-pan-y: ;--tw-pinch-zoom: ;--tw-scroll-snap-strictness: proximity;--tw-ordinal: ;--tw-slashed-zero: ;--tw-numeric-figure: ;--tw-numeric-spacing: ;--tw-numeric-fraction: ;--tw-ring-inset: ;--tw-ring-offset-width: 0px;--tw-ring-offset-color: #fff;--tw-ring-color: rgb(59 130 246 / .5);--tw-ring-offset-shadow: 0 0 #0000;--tw-ring-shadow: 0 0 #0000;--tw-shadow: 0 0 #0000;--tw-shadow-colored: 0 0 #0000;--tw-blur: ;--tw-brightness: ;--tw-contrast: ;--tw-grayscale: ;--tw-hue-rotate: ;--tw-invert: ;--tw-saturate: ;--tw-sepia: ;--tw-drop-shadow: ;--tw-backdrop-blur: ;--tw-backdrop-brightness: ;--tw-backdrop-contrast: ;--tw-backdrop-grayscale: ;--tw-backdrop-hue-rotate: ;--tw-backdrop-invert: ;--tw-backdrop-opacity: ;--tw-backdrop-saturate: ;--tw-backdrop-sepia: }.container{width:100%}@media (min-width: 640px){.container{max-width:640px}}@media (min-width: 768px){.container{max-width:768px}}@media (min-width: 1024px){.container{max-width:1024px}}@media (min-width: 1280px){.container{max-width:1280px}}@media (min-width: 1536px){.container{max-width:1536px}}.block{display:block}.flex{display:flex}.table{display:table}.hidden{display:none}.h-full{height:100%}.min-h-screen{min-height:100vh}.w-2\/3{width:66.666667%}.w-full{width:100%}.cursor-not-allowed{cursor:not-allowed}.cursor-pointer{cursor:pointer}.cursor-wait{cursor:wait}.flex-col{flex-direction:column}.items-center{align-items:center}.justify-center{justify-content:center}.space-y-12>:not([hidden])~:not([hidden]){--tw-space-y-reverse: 0;margin-top:calc(3rem * calc(1 - var(--tw-space-y-reverse)));margin-bottom:calc(3rem * var(--tw-space-y-reverse))}.overflow-auto{overflow:auto}.whitespace-pre-wrap{white-space:pre-wrap}.border-4{border-width:4px}.border-yellow-200{--tw-border-opacity: 1;border-color:rgb(254 240 138 / var(--tw-border-opacity))}.bg-yellow-200{--tw-bg-opacity: 1;background-color:rgb(254 240 138 / var(--tw-bg-opacity))}.bg-yellow-500{--tw-bg-opacity: 1;background-color:rgb(234 179 8 / var(--tw-bg-opacity))}.p-6{padding:1.5rem}.py-24{padding-top:6rem;padding-bottom:6rem}.py-6{padding-top:1.5rem;padding-bottom:1.5rem}.text-center{text-align:center}.text-6xl{font-size:3.75rem;line-height:1}.text-xl{font-size:1.25rem;line-height:1.75rem}.opacity-50{opacity:.5}*,*:before,*:after{box-sizing:inherit;-webkit-user-select:inherit;-moz-user-select:inherit;user-select:inherit}html,body,#root{box-sizing:border-box;height:100%;min-height:100vh;width:100%;min-width:100vw;margin:0;padding:0;-webkit-user-select:none;-moz-user-select:none;user-select:none}input::-webkit-file-upload-button{display:none}@media (min-width: 1024px){.lg\:w-1\/3{width:33.333333%}} diff --git a/spaces/mikeee/radiobee-dev/radiobee/trim_df.py b/spaces/mikeee/radiobee-dev/radiobee/trim_df.py deleted file mode 100644 index 0bac444c8e305d53f875a62cf027558ec70e8f54..0000000000000000000000000000000000000000 --- a/spaces/mikeee/radiobee-dev/radiobee/trim_df.py +++ /dev/null @@ -1,27 +0,0 @@ -"""Trim df.""" -import pandas as pd - - -# fmt: off -def trim_df( - df1: pd.DataFrame, - len_: int = 4, - ignore_index: bool = True, -) -> pd.DataFrame: - # fmt: on - """Trim df.""" - if len(df1) > 2 * len_: - df_trimmed = pd.concat( - [ - df1.iloc[:len_, :], - pd.DataFrame( - # [["...", "...",]], - [["..."] * len(df1.columns)], - columns=df1.columns, - ), - df1.iloc[-len_:, :], - ], - ignore_index=ignore_index, - ) - return df_trimmed - return df1 diff --git a/spaces/milyiyo/reimagine-it/captioning/utils/dist_utils.py b/spaces/milyiyo/reimagine-it/captioning/utils/dist_utils.py deleted file mode 100644 index 53a7c462570edb8f381c65fabf60c729f1607f41..0000000000000000000000000000000000000000 --- a/spaces/milyiyo/reimagine-it/captioning/utils/dist_utils.py +++ /dev/null @@ -1,305 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -This file contains primitives for multi-gpu communication. -This is useful when doing distributed training. -""" - -import functools -import logging -import numpy as np -import pickle -import torch -import torch.distributed as dist - -import torch - -_LOCAL_PROCESS_GROUP = None -""" -A torch process group which only includes processes that on the same machine as the current process. -This variable is set when processes are spawned by `launch()` in "engine/launch.py". -""" - - -def get_world_size() -> int: - if not dist.is_available(): - return 1 - if not dist.is_initialized(): - return 1 - return dist.get_world_size() - - -def get_rank() -> int: - if not dist.is_available(): - return 0 - if not dist.is_initialized(): - return 0 - return dist.get_rank() - - -def get_local_rank() -> int: - """ - Returns: - The rank of the current process within the local (per-machine) process group. - """ - if not dist.is_available(): - return 0 - if not dist.is_initialized(): - return 0 - assert _LOCAL_PROCESS_GROUP is not None - return dist.get_rank(group=_LOCAL_PROCESS_GROUP) - - -def get_local_size() -> int: - """ - Returns: - The size of the per-machine process group, - i.e. the number of processes per machine. - """ - if not dist.is_available(): - return 1 - if not dist.is_initialized(): - return 1 - return dist.get_world_size(group=_LOCAL_PROCESS_GROUP) - - -def is_main_process() -> bool: - return get_rank() == 0 - - -def synchronize(): - """ - Helper function to synchronize (barrier) among all processes when - using distributed training - """ - if not dist.is_available(): - return - if not dist.is_initialized(): - return - world_size = dist.get_world_size() - if world_size == 1: - return - dist.barrier() - - -@functools.lru_cache() -def _get_global_gloo_group(): - """ - Return a process group based on gloo backend, containing all the ranks - The result is cached. - """ - if dist.get_backend() == "nccl": - return dist.new_group(backend="gloo") - else: - return dist.group.WORLD - - -def _serialize_to_tensor(data, group): - backend = dist.get_backend(group) - assert backend in ["gloo", "nccl"] - device = torch.device("cpu" if backend == "gloo" else "cuda") - - buffer = pickle.dumps(data) - if len(buffer) > 1024 ** 3: - logger = logging.getLogger(__name__) - logger.warning( - "Rank {} trying to all-gather {:.2f} GB of data on device {}".format( - get_rank(), len(buffer) / (1024 ** 3), device - ) - ) - storage = torch.ByteStorage.from_buffer(buffer) - tensor = torch.ByteTensor(storage).to(device=device) - return tensor - - -def _pad_to_largest_tensor(tensor, group): - """ - Returns: - list[int]: size of the tensor, on each rank - Tensor: padded tensor that has the max size - """ - world_size = dist.get_world_size(group=group) - assert ( - world_size >= 1 - ), "comm.gather/all_gather must be called from ranks within the given group!" - local_size = torch.tensor( - [tensor.numel()], dtype=torch.int64, device=tensor.device) - size_list = [ - torch.zeros([1], dtype=torch.int64, device=tensor.device) - for _ in range(world_size) - ] - dist.all_gather(size_list, local_size, group=group) - size_list = [int(size.item()) for size in size_list] - - max_size = max(size_list) - - # we pad the tensor because torch all_gather does not support - # gathering tensors of different shapes - if local_size != max_size: - padding = torch.zeros( - (max_size - local_size,), dtype=torch.uint8, device=tensor.device - ) - tensor = torch.cat((tensor, padding), dim=0) - return size_list, tensor - - -def all_gather(data, group=None): - """ - Run all_gather on arbitrary picklable data (not necessarily tensors). - Args: - data: any picklable object - group: a torch process group. By default, will use a group which - contains all ranks on gloo backend. - Returns: - list[data]: list of data gathered from each rank - """ - if get_world_size() == 1: - return [data] - if group is None: - group = _get_global_gloo_group() - if dist.get_world_size(group) == 1: - return [data] - - tensor = _serialize_to_tensor(data, group) - - size_list, tensor = _pad_to_largest_tensor(tensor, group) - max_size = max(size_list) - - # receiving Tensor from all ranks - tensor_list = [ - torch.empty((max_size,), dtype=torch.uint8, device=tensor.device) - for _ in size_list - ] - dist.all_gather(tensor_list, tensor, group=group) - - data_list = [] - for size, tensor in zip(size_list, tensor_list): - buffer = tensor.cpu().numpy().tobytes()[:size] - data_list.append(pickle.loads(buffer)) - - return data_list - - -def gather(data, dst=0, group=None): - """ - Run gather on arbitrary picklable data (not necessarily tensors). - Args: - data: any picklable object - dst (int): destination rank - group: a torch process group. By default, will use a group which - contains all ranks on gloo backend. - Returns: - list[data]: on dst, a list of data gathered from each rank. Otherwise, - an empty list. - """ - if get_world_size() == 1: - return [data] - if group is None: - group = _get_global_gloo_group() - if dist.get_world_size(group=group) == 1: - return [data] - rank = dist.get_rank(group=group) - - tensor = _serialize_to_tensor(data, group) - size_list, tensor = _pad_to_largest_tensor(tensor, group) - - # receiving Tensor from all ranks - if rank == dst: - max_size = max(size_list) - tensor_list = [ - torch.empty((max_size,), dtype=torch.uint8, device=tensor.device) - for _ in size_list - ] - dist.gather(tensor, tensor_list, dst=dst, group=group) - - data_list = [] - for size, tensor in zip(size_list, tensor_list): - buffer = tensor.cpu().numpy().tobytes()[:size] - data_list.append(pickle.loads(buffer)) - return data_list - else: - dist.gather(tensor, [], dst=dst, group=group) - return [] - - -def shared_random_seed(): - """ - Returns: - int: a random number that is the same across all workers. - If workers need a shared RNG, they can use this shared seed to - create one. - All workers must call this function, otherwise it will deadlock. - """ - ints = np.random.randint(2 ** 31) - all_ints = all_gather(ints) - return all_ints[0] - - -# def reduce_dict(input_dict, average=True): -# """ -# Reduce the values in the dictionary from all processes so that process with rank -# 0 has the reduced results. -# Args: -# input_dict (dict): inputs to be reduced. All the values must be scalar CUDA Tensor. -# average (bool): whether to do average or sum -# Returns: -# a dict with the same keys as input_dict, after reduction. -# """ -# world_size = get_world_size() -# if world_size < 2: -# return input_dict -# with torch.no_grad(): -# names = [] -# values = [] -# # sort the keys so that they are consistent across processes -# for k in sorted(input_dict.keys()): -# names.append(k) -# values.append(input_dict[k]) -# values = torch.stack(values, dim=0) -# dist.reduce(values, dst=0) -# if dist.get_rank() == 0 and average: -# # only main process gets accumulated, so only divide by -# # world_size in this case -# values /= world_size -# reduced_dict = {k: v for k, v in zip(names, values)} -# return reduced_dict - - -def reduce_dict(input_dict, average=True): - """ - Reduce the values in the dictionary from all processes so that process with rank - 0 has the reduced results. - Args: - input_dict (dict): inputs to be reduced. (values not necessarily tensors). - average (bool): whether to do average or sum - Returns: - a dict with the same keys as input_dict, after reduction. - """ - - world_size = get_world_size() - if world_size < 2: - return input_dict - - with torch.no_grad(): - - # Convert to CUDA Tensor for dist.reduce() - input_dict_cuda_vals = {} - for k, v in input_dict.items(): - if type(v) == torch.Tensor: - input_dict_cuda_vals[k] = v.to('cuda') - else: - input_dict_cuda_vals[k] = torch.tensor(v, device='cuda') - - names = [] - values = [] - for k, v in sorted(input_dict_cuda_vals.items()): - names.append(k) - values.append(v) - values = torch.stack(values, dim=0) - dist.reduce(values, dst=0) # reduce to gpu 0 - - if dist.get_rank() == 0 and average: - # only main process gets accumulated, so only divide by - # world_size in this case - values /= world_size - reduced_dict = {k: v for k, v in zip(names, values)} - return reduced_dict diff --git a/spaces/mipbkhn/SmartGPTpublic/app.py b/spaces/mipbkhn/SmartGPTpublic/app.py deleted file mode 100644 index 92ef1ad50825448ba62ad190bd6135380b085c5f..0000000000000000000000000000000000000000 --- a/spaces/mipbkhn/SmartGPTpublic/app.py +++ /dev/null @@ -1,61 +0,0 @@ -import openai -from playsound import playsound -from gtts import gTTS -import speech_recognition as sr -import gradio as gr -import os - -openai.api_key = os.environ['api_key'] - -def generate_response(prompt): - prompt = (f"{prompt}") - - response = openai.ChatCompletion.create( - model="gpt-3.5-turbo", - messages=[ - {"role": "user", "content": f"{prompt}"}, - ]) - - message = response.choices[0]['message']['content'] - return message - -r = sr.Recognizer() -from pydub import AudioSegment -def transcribe(audio, lang): - with sr.AudioFile(audio) as source: audio = r.record(source) - text = r.recognize_google(audio, language=lang) - text = generate_response(text) - tts = gTTS(text=text, lang=lang) - out = "tmp.mp3" - tts.save(out) - return out - -with open('gradio_article.md') as f: - article = f.read() - -interface_options = { - "title": "Smart GPT", - "description": "Let's have a chat! Talk to me, and I'll respond in a jiffy!", - "article": article, - "layout": "horizontal", - "theme": "default", -} - -inputs = gr.Audio(source="microphone", type="filepath") -outputs = "audio" - -lang = gr.Dropdown(choices=["en", "vi", "nl"], value="en", resettable=False) -if lang.value == "": - lang.value = "en" - -gr.Interface(fn=transcribe, inputs=[inputs, lang], outputs=outputs, live=False, - allow_clear=False, **interface_options).launch() - -# TODO -# Custom voice - # VALL-E - # https://cloud.google.com/text-to-speech/custom-voice/docs/quickstart - # Mozilla TTS - # OpenSeq2Seq - # Best VN: Vbee, FPT - # Elevenlabs for English \ No newline at end of file diff --git a/spaces/misterbrainley/generate_dnd_images/utils.py b/spaces/misterbrainley/generate_dnd_images/utils.py deleted file mode 100644 index 612fe793fbd0a9cc461b26051fdbbc0d24a0372b..0000000000000000000000000000000000000000 --- a/spaces/misterbrainley/generate_dnd_images/utils.py +++ /dev/null @@ -1,263 +0,0 @@ -import glob -import numpy as np -import pickle -import torch -from torch import nn -from torch.utils.data import DataLoader, Dataset -import torchvision.transforms as T -from torchvision.utils import make_grid - - -# default image directory (training only) -img_path = '/home/alan/Projects/gen_dnd_art/filtered_images/im128/*pkl' -img_files = glob.glob(img_path) - -# determine class names from image directory (training only) -''' -labels = np.array([i.split('/')[-1].split('_')[:3] for i in img_files]) -species = np.unique(labels[:, 0]).tolist() -classes = np.unique(labels[:, 1]).tolist() -genders = np.unique(labels[:, 2]).tolist() -''' - -# hard code class labels (for application) -species = ['dragonborn', 'dwarf', 'elf', 'gnome', 'halfling', 'human', 'orc', 'tiefling'] -classes = [ - 'barbarian', 'bard', 'cleric', 'druid', 'fighter', 'monk', - 'paladin','ranger', 'rogue', 'sorcerer', 'warlock', 'wizard' -] -genders = ['', 'female', 'male'] - - -class ImSet(Dataset): - def __init__(self, img_path=img_path): - super().__init__() - self.img_files = glob.glob(img_path) - self.transform = T.Compose([ - T.ToTensor(), - T.ColorJitter(0.1, 0.1, 0.1, 0.1), - T.RandomHorizontalFlip(), - # add random noise and clip - lambda x: torch.clip(torch.randn(x.shape) / 20 + x, 0, 1), - T.Normalize(0.5, 0.5) - ]) - - def __len__(self): - return len(self.img_files) - - def __getitem__(self, i): - img_file = self.img_files[i] - - # load image - with open(img_file, 'rb') as fid: - img = pickle.load(fid) - - # apply transforms - img = self.transform(img).float() - - # extract class label - img_fname = img_file.split('/')[-1] - species_, class_, gender_, _, _ = img_fname.split('_') - species_ = species.index(species_) - class_ = classes.index(class_) - gender_ = genders.index(gender_) - - return (img_fname, img, species_, class_, gender_) - -class VariationalEncoder(nn.Module): - def __init__(self, input_channels=3, latent_size=2048): - super().__init__() - - self.latent_size = latent_size - - self.net = nn.Sequential( - # 128 -> 63 - nn.Conv2d(input_channels, 8, 4, 2), - nn.LeakyReLU(0.2), - - # 63 -> 31 - nn.Conv2d(8, 16, 3, 2), - nn.LeakyReLU(0.2), - - # 31 -> 15 - nn.Conv2d(16, 32, 3, 2), - nn.LeakyReLU(0.2), - - # 15 -> 7 - nn.Conv2d(32, 64, 3, 2), - nn.LeakyReLU(0.2), - - # 7 -> 5 - nn.Conv2d(64, 128, 3, 1), - nn.LeakyReLU(0.2), - - # 5 -> 4 - nn.Conv2d(128, 256, 2, 1), - nn.LeakyReLU(0.2), - - # 4 -> 3 - nn.Conv2d(256, 512, 2, 1), - nn.LeakyReLU(0.2), - - # 3 -> 2 - nn.Conv2d(512, 1024, 2, 1), - nn.LeakyReLU(0.2), - - # 2 -> 1 - nn.Conv2d(1024, latent_size, 2, 1), - nn.LeakyReLU(0.2), - - nn.Flatten(), - nn.Linear(latent_size, latent_size), - nn.Dropout(0.4) - ) - - # parameters for variational autoencoder - self.mu = nn.Linear(latent_size, latent_size) - self.sigma = nn.Linear(latent_size, latent_size) - self.N = torch.distributions.Normal(0, 1) - # self.N.loc = self.N.loc.cuda() - # self.N.scale = self.N.scale.cuda() - self.kl = 0 - - def forward(self, x): - x = self.net(x) - mu = self.mu(x) - sigma = torch.exp(self.sigma(x)) - x = mu + sigma * self.N.sample(mu.shape) - self.kl = (sigma**2 + mu**2 - torch.log(sigma) - 1/2).sum() - - return x - -class ConditionalEncoder(VariationalEncoder): - def __init__(self, latent_size=2048): - super().__init__(input_channels=4, latent_size=latent_size) - - self.emb_species = nn.Embedding(len(species), 128**2 // 3 + 128**2 % 3) - self.emb_class = nn.Embedding(len(classes), 128**2 // 3) - self.emb_gender = nn.Embedding(len(genders), 128**2 // 3) - self.emb_reshape = nn.Unflatten(1, (1, 128, 128)) - - - def forward(self, img, species_, class_, gender_): - x = self.emb_species(species_) - y = self.emb_class(class_) - z = self.emb_gender(gender_) - - x = torch.concat([x, y, z], dim=1) - x = self.emb_reshape(x) - - x = torch.concat([img, x], dim=1) - x = self.net(x) - - mu = self.mu(x) - sigma = torch.exp(self.sigma(x)) - x = mu + sigma * self.N.sample(mu.shape) - self.kl = (sigma**2 + mu**2 - torch.log(sigma) - 1/2).sum() - return x - - -class Decoder(nn.Module): - def __init__(self, latent_size=2048): - super().__init__() - self.latent_size = latent_size - self.net = nn.Sequential( - - nn.Linear(latent_size, latent_size), - nn.Dropout(0.4), - - nn.Unflatten(1, (latent_size, 1, 1)), - - # 1 -> 2 - nn.ConvTranspose2d(latent_size, 1024, 2, 1), - nn.LeakyReLU(0.2), - - # 2 -> 3 - nn.ConvTranspose2d(1024, 512, 2, 1), - nn.LeakyReLU(0.2), - - # 3 -> 4 - nn.ConvTranspose2d(512, 256, 2, 1), - nn.LeakyReLU(0.2), - - # 4 -> 5 - nn.ConvTranspose2d(256, 128, 2, 1), - nn.LeakyReLU(0.2), - - # 5 -> 7 - nn.ConvTranspose2d(128, 64, 3, 1), - nn.LeakyReLU(0.2), - - # 7 -> 15 - nn.ConvTranspose2d(64, 32, 3, 2), - nn.LeakyReLU(0.2), - - # 15 -> 31 - nn.ConvTranspose2d(32, 16, 3, 2), - nn.LeakyReLU(0.2), - - # 31 -> 63 - nn.ConvTranspose2d(16, 8, 3, 2), - nn.LeakyReLU(0.2), - - # 63 -> 128 - nn.ConvTranspose2d(8, 3, 4, 2), - nn.Tanh() - ) - - def forward(self, x): - return self.net(x) - - -class ConditionalDecoder(Decoder): - def __init__(self, latent_size=1024): - super().__init__(latent_size) - - self.emb_species = nn.Embedding(len(species), latent_size // 3 + latent_size % 3) - self.emb_class = nn.Embedding(len(classes), latent_size // 3) - self.emb_gender = nn.Embedding(len(genders), latent_size // 3) - self.label_net = nn.Linear(2*latent_size, latent_size) - - def forward(self, Z, species_, class_, gender_): - x = self.emb_species(species_) - y = self.emb_class(class_) - z = self.emb_gender(gender_) - - x = torch.concat([Z, x, y, z], dim=1) - x = self.label_net(x) - x = self.net(x) - return x - - -class VariationalAutoEncoder(nn.Module): - def __init__(self, latent_size=1024): - super().__init__() - self.latent_size = latent_size - self.enc = VariationalEncoder(latent_size) - self.dec = Decoder(latent_size) - - def forward(self, x): - return self.dec(self.enc(x)) - -class ConditionalVariationalAutoEncoder(nn.Module): - def __init__(self, latent_size=1024): - super().__init__() - self.latent_size = latent_size - self.enc = ConditionalEncoder(latent_size) - self.dec = ConditionalDecoder(latent_size) - - def forward(self, img, species_, class_, gender_): - Z = self.enc(img, species_, class_, gender_) - x = self.dec(Z, species_, class_, gender_) - return x - -def show_tensor(Z, ax, **kwargs): - if len(Z.shape) > 3: - Z = Z[0] - - if Z.min() < 1: - Z = (Z + 1) / 2 - - Z = np.transpose(Z.detach().cpu().numpy(), (1, 2, 0)) - ax.imshow(Z, **kwargs) - return ax \ No newline at end of file diff --git a/spaces/mithril-security/blind_chat/src/lib/types/Timestamps.ts b/spaces/mithril-security/blind_chat/src/lib/types/Timestamps.ts deleted file mode 100644 index 12d1867d1be509310190df09d2392bfaa77d6500..0000000000000000000000000000000000000000 --- a/spaces/mithril-security/blind_chat/src/lib/types/Timestamps.ts +++ /dev/null @@ -1,4 +0,0 @@ -export interface Timestamps { - createdAt: Date; - updatedAt: Date; -} diff --git a/spaces/mrstuffandthings/Bark-Voice-Cloning/training/__init__.py b/spaces/mrstuffandthings/Bark-Voice-Cloning/training/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/mshukor/UnIVAL/fairseq/examples/linformer/linformer_src/modules/linformer_sentence_encoder.py b/spaces/mshukor/UnIVAL/fairseq/examples/linformer/linformer_src/modules/linformer_sentence_encoder.py deleted file mode 100644 index 44f7989bd863329f763aa62b78df2eb42b3084ea..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/examples/linformer/linformer_src/modules/linformer_sentence_encoder.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import math - -import torch.nn as nn -from fairseq.models.transformer import TransformerEncoder - -from .linformer_sentence_encoder_layer import LinformerTransformerEncoderLayer - - -class LinformerTransformerEncoder(TransformerEncoder): - """ - Implementation for a Bi-directional Linformer based Sentence Encoder used - in BERT/XLM style pre-trained models. - - This first computes the token embedding using the token embedding matrix, - position embeddings (if specified) and segment embeddings - (if specified). After applying the specified number of - LinformerEncoderLayers, it outputs all the internal states of the - encoder as well as the final representation associated with the first - token (usually CLS token). - - Input: - - tokens: B x T matrix representing sentences - - segment_labels: B x T matrix representing segment label for tokens - - Output: - - a tuple of the following: - - a list of internal model states used to compute the - predictions where each tensor has shape T x B x C - - sentence representation associated with first input token - in format B x C. - """ - - def __init__(self, args, dictionary, embed_tokens): - self.compress_layer = None - super().__init__(args, dictionary, embed_tokens) - - def build_encoder_layer(self, args): - if self.args.shared_layer_kv_compressed == 1 and self.compress_layer is None: - compress_layer = nn.Linear( - self.args.max_positions, - self.args.max_positions // self.args.compressed, - ) - # intialize parameters for compressed layer - nn.init.xavier_uniform_(compress_layer.weight, gain=1 / math.sqrt(2)) - if self.args.freeze_compress == 1: - compress_layer.weight.requires_grad = False - self.compress_layer = compress_layer - - return LinformerTransformerEncoderLayer(args, self.compress_layer) diff --git a/spaces/mshukor/UnIVAL/fairseq/fairseq/data/encoders/hf_bert_bpe.py b/spaces/mshukor/UnIVAL/fairseq/fairseq/data/encoders/hf_bert_bpe.py deleted file mode 100644 index a41c059343ec7e2914b2c9d2f53f526c33f9659d..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/fairseq/data/encoders/hf_bert_bpe.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from dataclasses import dataclass, field -from typing import Optional - -from fairseq.data.encoders import register_bpe -from fairseq.dataclass import FairseqDataclass - - -@dataclass -class BertBPEConfig(FairseqDataclass): - bpe_cased: bool = field(default=False, metadata={"help": "set for cased BPE"}) - bpe_vocab_file: Optional[str] = field( - default=None, metadata={"help": "bpe vocab file"} - ) - - -@register_bpe("bert", dataclass=BertBPEConfig) -class BertBPE(object): - def __init__(self, cfg): - try: - from transformers import BertTokenizer - except ImportError: - raise ImportError( - "Please install transformers with: pip install transformers" - ) - - if cfg.bpe_vocab_file: - self.bert_tokenizer = BertTokenizer( - cfg.bpe_vocab_file, do_lower_case=not cfg.bpe_cased - ) - else: - vocab_file_name = ( - "bert-base-cased" if cfg.bpe_cased else "bert-base-uncased" - ) - self.bert_tokenizer = BertTokenizer.from_pretrained(vocab_file_name) - - def encode(self, x: str) -> str: - return " ".join(self.bert_tokenizer.tokenize(x)) - - def decode(self, x: str) -> str: - return self.bert_tokenizer.clean_up_tokenization( - self.bert_tokenizer.convert_tokens_to_string(x.split(" ")) - ) - - def is_beginning_of_word(self, x: str) -> bool: - return not x.startswith("##") diff --git a/spaces/mshukor/UnIVAL/fairseq/fairseq/registry.py b/spaces/mshukor/UnIVAL/fairseq/fairseq/registry.py deleted file mode 100644 index f3b9406043d75a51d7bf4af5294f82b33a8f9a5e..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/fairseq/registry.py +++ /dev/null @@ -1,100 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from argparse import Namespace - -from typing import Union -from fairseq.dataclass import FairseqDataclass -from fairseq.dataclass.utils import merge_with_parent -from hydra.core.config_store import ConfigStore -from omegaconf import DictConfig - -REGISTRIES = {} - - -def setup_registry(registry_name: str, base_class=None, default=None, required=False): - assert registry_name.startswith("--") - registry_name = registry_name[2:].replace("-", "_") - - REGISTRY = {} - REGISTRY_CLASS_NAMES = set() - DATACLASS_REGISTRY = {} - - # maintain a registry of all registries - if registry_name in REGISTRIES: - return # registry already exists - REGISTRIES[registry_name] = { - "registry": REGISTRY, - "default": default, - "dataclass_registry": DATACLASS_REGISTRY, - } - - def build_x(cfg: Union[DictConfig, str, Namespace], *extra_args, **extra_kwargs): - if isinstance(cfg, DictConfig): - choice = cfg._name - - if choice and choice in DATACLASS_REGISTRY: - dc = DATACLASS_REGISTRY[choice] - cfg = merge_with_parent(dc(), cfg) - elif isinstance(cfg, str): - choice = cfg - if choice in DATACLASS_REGISTRY: - cfg = DATACLASS_REGISTRY[choice]() - else: - choice = getattr(cfg, registry_name, None) - if choice in DATACLASS_REGISTRY: - cfg = DATACLASS_REGISTRY[choice].from_namespace(cfg) - - if choice is None: - if required: - raise ValueError("{} is required!".format(registry_name)) - return None - - cls = REGISTRY[choice] - if hasattr(cls, "build_" + registry_name): - builder = getattr(cls, "build_" + registry_name) - else: - builder = cls - - return builder(cfg, *extra_args, **extra_kwargs) - - def register_x(name, dataclass=None): - def register_x_cls(cls): - if name in REGISTRY: - raise ValueError( - "Cannot register duplicate {} ({})".format(registry_name, name) - ) - if cls.__name__ in REGISTRY_CLASS_NAMES: - raise ValueError( - "Cannot register {} with duplicate class name ({})".format( - registry_name, cls.__name__ - ) - ) - if base_class is not None and not issubclass(cls, base_class): - raise ValueError( - "{} must extend {}".format(cls.__name__, base_class.__name__) - ) - - if dataclass is not None and not issubclass(dataclass, FairseqDataclass): - raise ValueError( - "Dataclass {} must extend FairseqDataclass".format(dataclass) - ) - - cls.__dataclass = dataclass - if cls.__dataclass is not None: - DATACLASS_REGISTRY[name] = cls.__dataclass - - cs = ConfigStore.instance() - node = dataclass() - node._name = name - cs.store(name=name, group=registry_name, node=node, provider="fairseq") - - REGISTRY[name] = cls - - return cls - - return register_x_cls - - return build_x, register_x, REGISTRY, DATACLASS_REGISTRY diff --git a/spaces/mygyasir/SargeZT-controlnet-sd-xl-1.0-depth-16bit-zoe/README.md b/spaces/mygyasir/SargeZT-controlnet-sd-xl-1.0-depth-16bit-zoe/README.md deleted file mode 100644 index e6638bacd61c181a19dd44884c66790741729084..0000000000000000000000000000000000000000 --- a/spaces/mygyasir/SargeZT-controlnet-sd-xl-1.0-depth-16bit-zoe/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: SargeZT Controlnet Sd Xl 1.0 Depth 16bit Zoe -emoji: 📚 -colorFrom: pink -colorTo: yellow -sdk: gradio -sdk_version: 3.40.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/mygyasir/genious_bgremover/carvekit/utils/models_utils.py b/spaces/mygyasir/genious_bgremover/carvekit/utils/models_utils.py deleted file mode 100644 index da0141de0d3ed2d90559f5f590022e62a5e659aa..0000000000000000000000000000000000000000 --- a/spaces/mygyasir/genious_bgremover/carvekit/utils/models_utils.py +++ /dev/null @@ -1,126 +0,0 @@ -""" -Source url: https://github.com/OPHoperHPO/image-background-remove-tool -Author: Nikita Selin (OPHoperHPO)[https://github.com/OPHoperHPO]. -License: Apache License 2.0 -""" - -import random -import warnings -from typing import Union, Tuple, Any - -import torch -from torch import autocast - - -class EmptyAutocast(object): - """ - Empty class for disable any autocasting. - """ - - def __enter__(self): - return None - - def __exit__(self, exc_type, exc_val, exc_tb): - return - - def __call__(self, func): - return - - -def get_precision_autocast( - device="cpu", fp16=True, override_dtype=None -) -> Union[ - Tuple[EmptyAutocast, Union[torch.dtype, Any]], - Tuple[autocast, Union[torch.dtype, Any]], -]: - """ - Returns precision and autocast settings for given device and fp16 settings. - Args: - device: Device to get precision and autocast settings for. - fp16: Whether to use fp16 precision. - override_dtype: Override dtype for autocast. - - Returns: - Autocast object, dtype - """ - dtype = torch.float32 - cache_enabled = None - - if device == "cpu" and fp16: - warnings.warn('FP16 is not supported on CPU. Using FP32 instead.') - dtype = torch.float32 - - # TODO: Implement BFP16 on CPU. There are unexpected slowdowns on cpu on a clean environment. - # warnings.warn( - # "Accuracy BFP16 has experimental support on the CPU. " - # "This may result in an unexpected reduction in quality." - # ) - # dtype = ( - # torch.bfloat16 - # ) # Using bfloat16 for CPU, since autocast is not supported for float16 - - - if "cuda" in device and fp16: - dtype = torch.float16 - cache_enabled = True - - if override_dtype is not None: - dtype = override_dtype - - if dtype == torch.float32 and device == "cpu": - return EmptyAutocast(), dtype - - return ( - torch.autocast( - device_type=device, dtype=dtype, enabled=True, cache_enabled=cache_enabled - ), - dtype, - ) - - -def cast_network(network: torch.nn.Module, dtype: torch.dtype): - """Cast network to given dtype - - Args: - network: Network to be casted - dtype: Dtype to cast network to - """ - if dtype == torch.float16: - network.half() - elif dtype == torch.bfloat16: - network.bfloat16() - elif dtype == torch.float32: - network.float() - else: - raise ValueError(f"Unknown dtype {dtype}") - - -def fix_seed(seed=42): - """Sets fixed random seed - - Args: - seed: Random seed to be set - """ - random.seed(seed) - torch.manual_seed(seed) - if torch.cuda.is_available(): - torch.cuda.manual_seed(seed) - torch.cuda.manual_seed_all(seed) - # noinspection PyUnresolvedReferences - torch.backends.cudnn.deterministic = True - # noinspection PyUnresolvedReferences - torch.backends.cudnn.benchmark = False - return True - - -def suppress_warnings(): - # Suppress PyTorch 1.11.0 warning associated with changing order of args in nn.MaxPool2d layer, - # since source code is not affected by this issue and there aren't any other correct way to hide this message. - warnings.filterwarnings( - "ignore", - category=UserWarning, - message="Note that order of the arguments: ceil_mode and " - "return_indices will changeto match the args list " - "in nn.MaxPool2d in a future release.", - module="torch", - ) diff --git a/spaces/nateraw/background-remover/app.py b/spaces/nateraw/background-remover/app.py deleted file mode 100644 index c53c42ae3e0e6ec108301bc6f7dbce2c36684e95..0000000000000000000000000000000000000000 --- a/spaces/nateraw/background-remover/app.py +++ /dev/null @@ -1,127 +0,0 @@ -import cv2 -import gradio as gr -import numpy as np -import onnxruntime -import requests -from huggingface_hub import hf_hub_download -from PIL import Image - - -# Get x_scale_factor & y_scale_factor to resize image -def get_scale_factor(im_h, im_w, ref_size=512): - - if max(im_h, im_w) < ref_size or min(im_h, im_w) > ref_size: - if im_w >= im_h: - im_rh = ref_size - im_rw = int(im_w / im_h * ref_size) - elif im_w < im_h: - im_rw = ref_size - im_rh = int(im_h / im_w * ref_size) - else: - im_rh = im_h - im_rw = im_w - - im_rw = im_rw - im_rw % 32 - im_rh = im_rh - im_rh % 32 - - x_scale_factor = im_rw / im_w - y_scale_factor = im_rh / im_h - - return x_scale_factor, y_scale_factor - - -MODEL_PATH = hf_hub_download('nateraw/background-remover-files', 'modnet.onnx', repo_type='dataset') - - -def main(image_path, threshold): - - # read image - im = cv2.imread(image_path) - im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) - - # unify image channels to 3 - if len(im.shape) == 2: - im = im[:, :, None] - if im.shape[2] == 1: - im = np.repeat(im, 3, axis=2) - elif im.shape[2] == 4: - im = im[:, :, 0:3] - - # normalize values to scale it between -1 to 1 - im = (im - 127.5) / 127.5 - - im_h, im_w, im_c = im.shape - x, y = get_scale_factor(im_h, im_w) - - # resize image - im = cv2.resize(im, None, fx=x, fy=y, interpolation=cv2.INTER_AREA) - - # prepare input shape - im = np.transpose(im) - im = np.swapaxes(im, 1, 2) - im = np.expand_dims(im, axis=0).astype('float32') - - # Initialize session and get prediction - session = onnxruntime.InferenceSession(MODEL_PATH, None) - input_name = session.get_inputs()[0].name - output_name = session.get_outputs()[0].name - result = session.run([output_name], {input_name: im}) - - # refine matte - matte = (np.squeeze(result[0]) * 255).astype('uint8') - matte = cv2.resize(matte, dsize=(im_w, im_h), interpolation=cv2.INTER_AREA) - - # HACK - Could probably just convert this to PIL instead of writing - cv2.imwrite('out.png', matte) - - image = Image.open(image_path) - matte = Image.open('out.png') - - # obtain predicted foreground - image = np.asarray(image) - if len(image.shape) == 2: - image = image[:, :, None] - if image.shape[2] == 1: - image = np.repeat(image, 3, axis=2) - elif image.shape[2] == 4: - image = image[:, :, 0:3] - - b, g, r = cv2.split(image) - - mask = np.asarray(matte) - a = np.ones(mask.shape, dtype='uint8') * 255 - alpha_im = cv2.merge([b, g, r, a], 4) - bg = np.zeros(alpha_im.shape) - new_mask = np.stack([mask, mask, mask, mask], axis=2) - foreground = np.where(new_mask > threshold, alpha_im, bg).astype(np.uint8) - - return Image.fromarray(foreground) - - -title = "MODNet Background Remover" -description = "Gradio demo for MODNet, a model that can remove the background from a given image. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below." -article = "" - -url = "https://huggingface.co/datasets/nateraw/background-remover-files/resolve/main/twitter_profile_pic.jpeg" -image = Image.open(requests.get(url, stream=True).raw) -image.save('twitter_profile_pic.jpg') - -url = "https://upload.wikimedia.org/wikipedia/commons/8/8d/President_Barack_Obama.jpg" -image = Image.open(requests.get(url, stream=True).raw) -image.save('obama.jpg') - -interface = gr.Interface( - fn=main, - inputs=[ - gr.inputs.Image(type='filepath'), - gr.inputs.Slider(minimum=0, maximum=250, default=100, step=5, label='Mask Cutoff Threshold'), - ], - outputs='image', - examples=[['twitter_profile_pic.jpg', 120], ['obama.jpg', 155]], - title=title, - description=description, - article=article, -) - -if __name__ == '__main__': - interface.launch(debug=True) diff --git a/spaces/neko321/Voice-Changer1/infer_pack/models_onnx_moess.py b/spaces/neko321/Voice-Changer1/infer_pack/models_onnx_moess.py deleted file mode 100644 index 12efb0629a2e3d0d746a34f467254536c2bdbe5f..0000000000000000000000000000000000000000 --- a/spaces/neko321/Voice-Changer1/infer_pack/models_onnx_moess.py +++ /dev/null @@ -1,849 +0,0 @@ -import math, pdb, os -from time import time as ttime -import torch -from torch import nn -from torch.nn import functional as F -from infer_pack import modules -from infer_pack import attentions -from infer_pack import commons -from infer_pack.commons import init_weights, get_padding -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from infer_pack.commons import init_weights -import numpy as np -from infer_pack import commons - - -class TextEncoder256(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class TextEncoder256Sim(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - x = self.proj(x) * x_mask - return x, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - def remove_weight_norm(self): - for i in range(self.n_flows): - self.flows[i * 2].remove_weight_norm() - - -class PosteriorEncoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class Generator(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=0, - ): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class SineGen(torch.nn.Module): - """Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__( - self, - samp_rate, - harmonic_num=0, - sine_amp=0.1, - noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False, - ): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - return uv - - def forward(self, f0, upp): - """sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0 = f0[:, None].transpose(1, 2) - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( - idx + 2 - ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 - rand_ini = torch.rand( - f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device - ) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 - tmp_over_one *= upp - tmp_over_one = F.interpolate( - tmp_over_one.transpose(2, 1), - scale_factor=upp, - mode="linear", - align_corners=True, - ).transpose(2, 1) - rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose( - 2, 1 - ) ####### - tmp_over_one %= 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi - ) - sine_waves = sine_waves * self.sine_amp - uv = self._f02uv(f0) - uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose(2, 1) - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__( - self, - sampling_rate, - harmonic_num=0, - sine_amp=0.1, - add_noise_std=0.003, - voiced_threshod=0, - is_half=True, - ): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - self.is_half = is_half - # to produce sine waveforms - self.l_sin_gen = SineGen( - sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod - ) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x, upp=None): - sine_wavs, uv, _ = self.l_sin_gen(x, upp) - if self.is_half: - sine_wavs = sine_wavs.half() - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - return sine_merge, None, None # noise, uv - - -class GeneratorNSF(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - sr, - is_half=False, - ): - super(GeneratorNSF, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) - self.m_source = SourceModuleHnNSF( - sampling_rate=sr, harmonic_num=0, is_half=is_half - ) - self.noise_convs = nn.ModuleList() - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - c_cur = upsample_initial_channel // (2 ** (i + 1)) - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) - self.noise_convs.append( - Conv1d( - 1, - c_cur, - kernel_size=stride_f0 * 2, - stride=stride_f0, - padding=stride_f0 // 2, - ) - ) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - self.upp = np.prod(upsample_rates) - - def forward(self, x, f0, g=None): - har_source, noi_source, uv = self.m_source(f0, self.upp) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -sr2sr = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -class SynthesizerTrnMs256NSFsidM(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, pitch, nsff0, sid, rnd, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * rnd) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) - return o - - -class SynthesizerTrnMs256NSFsid_sim(nn.Module): - """ - Synthesizer for Training - """ - - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - # hop_length, - gin_channels=0, - use_sdp=True, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256Sim( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - is_half=kwargs["is_half"], - ) - - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, ds, max_len=None - ): # y是spec不需要了现在 - g = self.emb_g(ds.unsqueeze(0)).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - x, x_mask = self.enc_p(phone, pitch, phone_lengths) - x = self.flow(x, x_mask, g=g, reverse=True) - o = self.dec((x * x_mask)[:, :, :max_len], pitchf, g=g) - return o - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11, 17] - # periods = [3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 1024, - 1024, - (kernel_size, 1), - 1, - padding=(get_padding(kernel_size, 1), 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap diff --git a/spaces/neloreis/TheBloke-Wizard-Vicuna-13B-Uncensored-HF/README.md b/spaces/neloreis/TheBloke-Wizard-Vicuna-13B-Uncensored-HF/README.md deleted file mode 100644 index d80976a04341545b299d3588e0782b903c26b2fd..0000000000000000000000000000000000000000 --- a/spaces/neloreis/TheBloke-Wizard-Vicuna-13B-Uncensored-HF/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: TheBloke Wizard Vicuna 13B Uncensored HF -emoji: 🔥 -colorFrom: pink -colorTo: indigo -sdk: gradio -sdk_version: 3.32.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Jhamela 1 Full Movie Free Download 720p Movies.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Jhamela 1 Full Movie Free Download 720p Movies.md deleted file mode 100644 index 756f543e7493513b0c845f9074604c82609096ed..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Jhamela 1 Full Movie Free Download 720p Movies.md +++ /dev/null @@ -1,20 +0,0 @@ - -

        Jhamela 1: A Haryanvi Comedy Movie You Don't Want to Miss

        -

        If you are looking for a fun and entertaining movie to watch, you might want to check out Jhamela 1, a Haryanvi comedy movie starring Uttar Kumar and Sonal Khatri. Jhamela 1 is a story of two brothers who get into trouble with their father, their girlfriends, and their enemies. The movie is full of hilarious situations, witty dialogues, and action scenes.

        -

        Jhamela 1 Full Movie Download 720p Movies


        Download ===== https://urlcod.com/2uI9tP



        -

        Jhamela 1 was released in 2019 by Nav Haryanvi, a popular Haryanvi music and movie label. The movie was directed by Dinesh Chaudhary and written by Avnish Kumar Singh. The movie also features Monu Dhankad, Rajeev Sirohi, Surjeet Sirohi, Krishan Pal Bharat, Arvind Malik, Priyanka, Niru Sharma, Monu Sahrawat, Nikhil Kumar, Ratan Pal, Ranveer Singh, and Kashmir in supporting roles.

        -

        The movie has received positive reviews from the audience and critics alike. The movie has also been praised for its music and songs, composed by Shyam Studio and Pradeep Panchal. The movie has songs like "Jhamela", "Chori Chori", "Dil Ki Baat", and "Mera Dil". The movie has also been dubbed in Hindi for a wider reach.

        -

        If you want to watch Jhamela 1 full movie online or download it in 720p quality, you can visit the following websites:

        -
          -
        • YouTube: You can watch Jhamela 1 full movie on YouTube for free. The movie has been uploaded by Nav Haryanvi on their official channel[^1^]. You can also download the movie using a YouTube downloader app or website.
        • -
        • HDHub4u: You can download Jhamela 1 full movie in 720p HD quality from HDHub4u[^2^], a website that offers Bollywood and Hollywood movies and web series in Hindi and English. You can also stream the movie online on this website.
        • -
        • Pastebin: You can find a link to download Jhamela 1 full movie in 720p HD quality from Pastebin[^3^], a website that allows users to share text online. The link has been posted by an anonymous user on this website.
        • -
        • Google Drive: You can download Jhamela 1 full movie in 720p HD quality from Google Drive[^4^], a cloud storage service that allows users to store and share files online. The movie has been uploaded by an unknown user on this service.
        • -
        -

        However, we advise you to watch Jhamela 1 full movie legally from authorized sources. Downloading or streaming movies from unauthorized sources may violate the copyright laws of your country and may also expose you to malware or viruses. Please support the original creators of the movie by watching it from legitimate platforms.

        -

        - -

        Jhamela 1 is not only a comedy movie, but also a movie that showcases the culture and lifestyle of Haryana, a state in northern India. The movie depicts the rural and urban aspects of Haryana, as well as the traditions and values of the Haryanvi people. The movie also portrays the social issues and challenges faced by the youth of Haryana, such as unemployment, education, and love.

        -

        Jhamela 1 is a movie that can be enjoyed by people of all ages and backgrounds. The movie has a universal appeal and a message of love, friendship, and family. The movie also has a lot of humor and entertainment that will make you laugh and smile. If you are looking for a movie that will make your day, you should watch Jhamela 1.

        7b8c122e87
        -
        -
        \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Microwave Circuit Design A Practical Approach Using ADS !EXCLUSIVE!.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Microwave Circuit Design A Practical Approach Using ADS !EXCLUSIVE!.md deleted file mode 100644 index c5a8964ac93ffe2ed3fd4c3ccbf59ded1e6b1d5b..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Microwave Circuit Design A Practical Approach Using ADS !EXCLUSIVE!.md +++ /dev/null @@ -1,46 +0,0 @@ -
        -

        Microwave Circuit Design: A Practical Approach Using ADS

        -

        Microwave circuit design is a challenging and rewarding field that requires both theoretical knowledge and practical skills. Microwave circuits are used in various applications, such as communication systems, radars, sensors, and medical devices. However, designing microwave circuits is not easy, as it involves dealing with high frequencies, complex devices, and nonlinear phenomena.

        -

        One of the most widely used tools for microwave circuit design is the Advanced Design System (ADS) software from Keysight Technologies. ADS is an electronic design automation (EDA) package that enables engineers to simulate, optimize, and verify microwave circuits using various models, methods, and libraries. ADS also provides a user-friendly interface and a powerful scripting language for customizing and automating design tasks.

        -

        Microwave Circuit Design: A Practical Approach Using ADS


        Download Zip 🌟 https://urlcod.com/2uI9xx



        -

        In this article, we will introduce the basic concepts and techniques of microwave circuit design using ADS. We will cover the following topics:

        -
          -
        • Microwave IC categorization and roles
        • -
        • Passive device impedances and equivalent circuits
        • -
        • Coaxial and microstrip transmission lines
        • -
        • Active devices (FET, BJT, DC Bias) and impedance matching
        • -
        • Low noise amplifier (LNA) design
        • -
        • Power amplifier (PA) design
        • -
        • Microwave oscillator design
        • -
        • Phase lock loops (PLL) design
        • -
        • Mixer design
        • -
        -

        We will also provide some examples of microwave circuit simulation using ADS and explain how to use the ADS toolset and window framework. By the end of this article, you should have a better understanding of how to design active microwave circuits using ADS.

        -

        This article is based on the book Microwave Circuit Design: A Practical Approach Using ADS by Kyung-Whan Yeom[^1^], which is a complete guide to modern circuit design with simulation tutorials. You can find more information about the book and download the updated files for the latest version of ADS from the book's website[^2^].

        - -

        Microwave IC Categorization and Roles

        -

        Microwave integrated circuits (MICs) are electronic circuits that operate at microwave frequencies, typically above 1 GHz. MICs can be classified into two categories: monolithic microwave integrated circuits (MMICs) and hybrid microwave integrated circuits (HMICs).

        -

        MMICs are fabricated on a single semiconductor substrate using various processes, such as GaAs MESFET, GaN HEMT, SiGe HBT, or CMOS. MMICs have the advantages of high integration density, low cost, high reliability, and good performance. However, MMICs also have some limitations, such as low power handling capability, limited substrate materials, and thermal issues.

        -

        HMICs are composed of discrete components that are mounted on a substrate using various techniques, such as wire bonding, flip-chip bonding, or surface mount technology. HMICs have the advantages of high power handling capability, wide choice of substrate materials, and flexibility in design. However, HMICs also have some drawbacks, such as low integration density, high cost, low reliability, and parasitic effects.

        -

        The roles of MICs can be divided into four categories: signal generation, signal amplification, signal conversion, and signal processing. Signal generation refers to creating an oscillating signal at a desired frequency and power level. Signal amplification refers to increasing the power level of an input signal without changing its frequency or phase. Signal conversion refers to changing the frequency or phase of an input signal using a nonlinear device or a feedback loop. Signal processing refers to modifying the characteristics of an input signal using various techniques, such as filtering, modulation, demodulation, detection, or coding.

        - -

        Passive Device Impedances and Equivalent Circuits

        -

        Passive devices are components that do not require external power sources to operate. Passive devices include resistors, -capacitors, -inductors, -transmission lines, -couplers, -splitters, -combiners, -filters, -and antennas. -Passive devices are characterized by their impedances, -which describe how they affect the voltage -and current -of an AC signal.

        -

        -

        The impedance -of a passive device -can be expressed as a complex number

        81aa517590
        -
        -
        \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Muthutamilmovie720phd_VERIFIED_ Download ((INSTALL)).md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Muthutamilmovie720phd_VERIFIED_ Download ((INSTALL)).md deleted file mode 100644 index 29d4553ec401ac087cdfefee3a2da02b369a7da1..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Muthutamilmovie720phd_VERIFIED_ Download ((INSTALL)).md +++ /dev/null @@ -1,29 +0,0 @@ -
        -

        Muthu Tamil Movie 720p HD Download: How to Watch Rajinikanth's Classic Film Online

        - -

        Muthu is a 1995 Tamil-language masala film starring Rajinikanth, Meena and Sarath Babu. It is a remake of the Malayalam film Thenmavin Kombath (1994) and was directed by K. S. Ravikumar. The film was a blockbuster hit and won several awards, including the Tamil Nadu State Film Award for Best Actor for Rajinikanth. The film was also dubbed in Japanese and released as Muthu Odoru Maharaja (Muthu – The Dancing Maharaja) in 1998, becoming the highest-grossing Indian film in Japan.

        - -

        If you are a fan of Rajinikanth or want to watch a classic Tamil film, you might be wondering how to download Muthu Tamil movie 720p HD online. Well, there are several websites that claim to offer Muthu Tamil movie 720p HD download for free, but you should be careful as they might contain malware or viruses that can harm your device. Moreover, downloading movies from unauthorized sources is illegal and can get you into trouble with the law.

        -

        Muthutamilmovie720phddownload ((INSTALL))


        Download Ziphttps://urlcod.com/2uIaA6



        - -

        The best way to watch Muthu Tamil movie online is to stream it legally on a platform that has the rights to the film. For example, you can watch Muthu Tamil movie on Amazon Prime Video, which offers a 30-day free trial for new users. You can also rent or buy Muthu Tamil movie on YouTube or Google Play Movies. These platforms offer high-quality video and audio, as well as subtitles and other features.

        - -

        To watch Muthu Tamil movie online, you will need a stable internet connection and a compatible device, such as a smartphone, tablet, laptop or smart TV. You will also need to create an account on the platform of your choice and pay for the subscription or rental fee if applicable. Once you have done that, you can search for Muthu Tamil movie on the platform and start streaming it.

        - -

        Muthu Tamil movie is a must-watch for Rajinikanth fans and lovers of Tamil cinema. It has a captivating story, memorable songs, stunning visuals and superb performances by the cast. It is also a cultural phenomenon that has transcended borders and languages. So, what are you waiting for? Watch Muthu Tamil movie online today and enjoy the magic of Rajinikanth.

        - -

        Muthu Tamil Movie: Interesting Facts and Trivia

        - -

        Muthu Tamil movie is not only a commercial success but also a film that has many interesting facts and trivia behind it. Here are some of them:

        - -
          -
        • Muthu Tamil movie is a remake of the Malayalam film Thenmavin Kombath (1994), which starred Mohanlal and Shobana. Rajinikanth liked the story of the original film and asked K. S. Ravikumar to direct the remake. Ravikumar did not watch the original film and developed the screenplay of Muthu based on Rajinikanth's narration.
        • -
        • Muthu Tamil movie was shot in various locations in India, including Mysore, Madras and Kerala. The climax scene was shot at Athirappilly Falls, which is also known as the "Niagara Falls of India". The film also features some scenes shot in Sri Lanka.
        • -
        • Muthu Tamil movie has a soundtrack composed by A. R. Rahman, who collaborated with Rajinikanth for the first time. The songs were written by Vairamuthu and became very popular among the audience. The song "Oruvan Oruvan Mudhalali" (One Man is the Leader) became an anthem for Rajinikanth's fans and was later used in his political campaigns.
        • -
        • Muthu Tamil movie was dubbed in Japanese and released as Muthu Odoru Maharaja (Muthu – The Dancing Maharaja) in 1998. The film was distributed by Japan's leading distributor Toho and became a huge hit, earning more than $3 million at the box office. It was also screened at various film festivals and received critical acclaim. Rajinikanth became a household name in Japan and gained a large fan following there.
        • -
        • Muthu Tamil movie was also dubbed in Telugu, Hindi and Bhojpuri. The Telugu version was titled Muthu Maharaja and the Hindi version was titled Muthu – The Dancing Maharaja. The Bhojpuri version was titled Muthu Pandey.
        • -
        - -

        Muthu Tamil movie is a film that has entertained millions of people across the world and has made Rajinikanth a global star. It is a film that showcases his charisma, style and talent as an actor. It is a film that you should not miss if you love Tamil cinema.

        cec2833e83
        -
        -
        \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Star Wars Jedi Fallen Order CODEX FitGirl !!TOP!!.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Star Wars Jedi Fallen Order CODEX FitGirl !!TOP!!.md deleted file mode 100644 index c44bd757cae132028173f03e5d0afac1e6c27abd..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Star Wars Jedi Fallen Order CODEX FitGirl !!TOP!!.md +++ /dev/null @@ -1,19 +0,0 @@ -
        -

        How to Download and Install Star Wars Jedi: Fallen Order CODEX, FitGirl

        -

        If you are a fan of Star Wars and action-adventure games, you might be interested in Star Wars Jedi: Fallen Order, a game that lets you play as a Jedi Padawan who survived the purge of Order 66. The game was released in 2019 by Respawn Entertainment and EA, and received positive reviews from critics and players alike.

        -

        However, if you don't want to buy the game or you want to try it before buying it, you might be looking for a way to download and install it for free. There are many websites that offer cracked versions of the game, but not all of them are safe and reliable. Some of them might contain viruses, malware, or unwanted programs that can harm your computer or steal your personal information.

        -

        Star Wars Jedi: Fallen Order CODEX, FitGirl


        DOWNLOAD ★★★ https://urlcod.com/2uIcqi



        -

        One of the most trusted sources for cracked games is CODEX, a group that releases high-quality cracks for various games. They have released a crack for Star Wars Jedi: Fallen Order that works with the latest version of the game and includes all the DLCs and updates. You can download the CODEX crack from their official website or from other reputable torrent sites.

        -

        Another option is to download a repack version of the game from FitGirl, a popular repacker who compresses games to reduce their size and make them easier to download. FitGirl has repacked Star Wars Jedi: Fallen Order with the CODEX crack and made it available on her website and on other torrent sites. The FitGirl repack is smaller than the original game and has optional components that you can choose to install or not.

        -

        Whichever option you choose, you will need to follow some steps to download and install Star Wars Jedi: Fallen Order CODEX, FitGirl on your PC. Here are the steps:

        -
          -
        1. Download the game files from the source of your choice. You will need a torrent client such as uTorrent or BitTorrent to download the files.
        2. -
        3. Extract the files using a program such as WinRAR or 7-Zip. You will need a password to extract the files. The password is usually provided on the website where you downloaded the files or in a text file inside the archive.
        4. -
        5. Run the setup.exe file and follow the instructions to install the game. You can choose where to install the game and which components to install. Make sure to check the box that says "Copy contents of CODEX directory to installdir" or something similar.
        6. -
        7. Run the game from the desktop shortcut or from the game folder. Enjoy playing Star Wars Jedi: Fallen Order!
        8. -
        -

        Note: Some antivirus programs might detect the crack as a false positive and block it or delete it. To avoid this, you should disable your antivirus program before installing the game and add an exception for the game folder in your antivirus settings.

        -

        -

        Disclaimer: This article is for educational purposes only. We do not condone piracy or illegal downloading of games. If you like Star Wars Jedi: Fallen Order, you should support the developers and buy the game from official sources.

        81aa517590
        -
        -
        \ No newline at end of file diff --git a/spaces/nkasmanoff/SearchingFace/vectorize_dataset.py b/spaces/nkasmanoff/SearchingFace/vectorize_dataset.py deleted file mode 100644 index 6555d2dc1c4e2b59beb480a22da82b0e5b792df8..0000000000000000000000000000000000000000 --- a/spaces/nkasmanoff/SearchingFace/vectorize_dataset.py +++ /dev/null @@ -1,36 +0,0 @@ -from datasets import load_dataset -from helpers import clean_up_tags -from langchain.text_splitter import CharacterTextSplitter -from langchain.vectorstores import Chroma -from langchain.document_loaders import DataFrameLoader - - -def load_descriptions_data(dataset='nkasmanoff/hf-dataset-cards'): - if dataset == 'hf-dataset-cards': - hf_datasets = load_dataset(dataset) - hf_df = hf_datasets['train'].to_pandas() - hf_df.dropna(subset=['README'],inplace=True) - hf_df['description_full'] = hf_df['README'] - - else: - hf_datasets = load_dataset('nkasmanoff/huggingface-datasets') - hf_df = hf_datasets['train'].to_pandas() - hf_df['tags_cleaned'] = hf_df['tags'].apply(clean_up_tags) - hf_df.dropna(subset=['description'],inplace=True) - hf_df['description_full'] = hf_df['description'].fillna('') + ' ' + hf_df['tags_cleaned'] - hf_df = hf_df[hf_df['description_full'] != ' '] - hf_df = hf_df[['id','description_full']] - - return hf_df - - -def create_db(hf_df, embeddings): - loader = DataFrameLoader(hf_df, page_content_column="description_full") - documents = loader.load() - # split the documents into chunks - text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) - texts = text_splitter.split_documents(documents) - # select which embeddings we want to use - # create the vectorestore to use as the index - db = Chroma.from_documents(texts, embeddings) - return db \ No newline at end of file diff --git a/spaces/nomic-ai/liuhaotian_LLaVA-Instruct-150K/index.html b/spaces/nomic-ai/liuhaotian_LLaVA-Instruct-150K/index.html deleted file mode 100644 index adce69c05d309d490d494e0fba960a9c73780798..0000000000000000000000000000000000000000 --- a/spaces/nomic-ai/liuhaotian_LLaVA-Instruct-150K/index.html +++ /dev/null @@ -1,42 +0,0 @@ - - - - liuhaotian/LLaVA-Instruct-150K - - - - -
        - -
        - - - \ No newline at end of file diff --git a/spaces/ntt123/WaveGRU-Text-To-Speech/extract_model.py b/spaces/ntt123/WaveGRU-Text-To-Speech/extract_model.py deleted file mode 100644 index f03adc21e8d3ef05c4040e53797ed0cb3d748b9a..0000000000000000000000000000000000000000 --- a/spaces/ntt123/WaveGRU-Text-To-Speech/extract_model.py +++ /dev/null @@ -1,5 +0,0 @@ -import pickle - -dic = pickle.load(open("./tacotrons_ljs_24k_v1_0300000.ckpt", "rb")) -del dic["optim_state_dict"] -pickle.dump(dic, open("./tacotrons_ljs_24k_v1_0300000.ckpt", "wb")) diff --git a/spaces/oguzakif/video-object-remover/FGT_codes/LAFC/models/BaseNetwork.py b/spaces/oguzakif/video-object-remover/FGT_codes/LAFC/models/BaseNetwork.py deleted file mode 100644 index b916da739d190b1cf3e9f8e8708abb50cc652c29..0000000000000000000000000000000000000000 --- a/spaces/oguzakif/video-object-remover/FGT_codes/LAFC/models/BaseNetwork.py +++ /dev/null @@ -1,51 +0,0 @@ -from .utils.network_blocks import * -from .utils.network_blocks_2d import * - - -class BaseNetwork(nn.Module): - def __init__(self, conv_type): - super(BaseNetwork, self).__init__() - self.conv_type = conv_type - if conv_type == 'gated': - self.ConvBlock = GatedConv - self.DeconvBlock = GatedDeconv - self.ConvBlock2d = GatedConv2d - self.DeconvBlock2d = GatedDeconv2d - if conv_type == 'partial': - self.ConvBlock = PartialConv - self.DeconvBlock = PartialDeconv - self.ConvBlock2d = PartialConv2d - self.DeconvBlock2d = PartialDeconv2d - if conv_type == 'vanilla': - self.ConvBlock = VanillaConv - self.DeconvBlock = VanillaDeconv - self.ConvBlock2d = VanillaConv2d - self.DeconvBlock2d = VanillaDeconv2d - - def init_weights(self, init_type='kaiming', gain=0.02): - ''' - initialize network's weights - init_type: normal | xavier | kaiming | orthogonal - https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/9451e70673400885567d08a9e97ade2524c700d0/models/networks.py#L39 - ''' - - def init_func(m): - classname = m.__class__.__name__ - if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1): - if init_type == 'normal': - nn.init.normal_(m.weight.data, 0.0, gain) - elif init_type == 'xavier': - nn.init.xavier_normal_(m.weight.data, gain=gain) - elif init_type == 'kaiming': - nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') - elif init_type == 'orthogonal': - nn.init.orthogonal_(m.weight.data, gain=gain) - - if hasattr(m, 'bias') and m.bias is not None: - nn.init.constant_(m.bias.data, 0.0) - - elif classname.find('BatchNorm2d') != -1: - nn.init.normal_(m.weight.data, 1.0, gain) - nn.init.constant_(m.bias.data, 0.0) - - self.apply(init_func) diff --git a/spaces/oliver2023/chatgpt-on-wechat/channel/terminal/terminal_channel.py b/spaces/oliver2023/chatgpt-on-wechat/channel/terminal/terminal_channel.py deleted file mode 100644 index 7e8ad357f1a990885bd5109132ffeeddf6f5c0d4..0000000000000000000000000000000000000000 --- a/spaces/oliver2023/chatgpt-on-wechat/channel/terminal/terminal_channel.py +++ /dev/null @@ -1,31 +0,0 @@ -from bridge.context import * -from channel.channel import Channel -import sys - -class TerminalChannel(Channel): - def startup(self): - context = Context() - print("\nPlease input your question") - while True: - try: - prompt = self.get_input("User:\n") - except KeyboardInterrupt: - print("\nExiting...") - sys.exit() - - context.type = ContextType.TEXT - context['session_id'] = "User" - context.content = prompt - print("Bot:") - sys.stdout.flush() - res = super().build_reply_content(prompt, context).content - print(res) - - - def get_input(self, prompt): - """ - Multi-line input function - """ - print(prompt, end="") - line = input() - return line diff --git a/spaces/omarbaba/streamlit-test/README.md b/spaces/omarbaba/streamlit-test/README.md deleted file mode 100644 index 1f75d20e2ed076dcbe360336f7186ad873b4b948..0000000000000000000000000000000000000000 --- a/spaces/omarbaba/streamlit-test/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Streamlit Test -emoji: 📈 -colorFrom: pink -colorTo: purple -sdk: streamlit -sdk_version: 1.28.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/oms12/dfgan/text_processing.py b/spaces/oms12/dfgan/text_processing.py deleted file mode 100644 index a3f7a0e61d4cef3efc55f60ec6f362763326ed83..0000000000000000000000000000000000000000 --- a/spaces/oms12/dfgan/text_processing.py +++ /dev/null @@ -1,108 +0,0 @@ -import os -import sys -import errno -import numpy as np -import numpy.random as random -import torch -import json -import pickle -from easydict import EasyDict as edict -from io import BytesIO -from PIL import Image -from torchvision import transforms - - -########### GEN ############# -def get_tokenizer(): - from nltk.tokenize import RegexpTokenizer - tokenizer = RegexpTokenizer(r'\w+') - return tokenizer - - -def tokenize(wordtoix, sentences): - '''generate images from example sentences''' - tokenizer = get_tokenizer() - # a list of indices for a sentence - captions = [] - cap_lens = [] - new_sent = [] - for sent in sentences: - if len(sent) == 0: - continue - sent = sent.replace("\ufffd\ufffd", " ") - tokens = tokenizer.tokenize(sent.lower()) - if len(tokens) == 0: - print('sent', sent) - continue - rev = [] - for t in tokens: - t = t.encode('ascii', 'ignore').decode('ascii') - if len(t) > 0 and t in wordtoix: - rev.append(wordtoix[t]) - captions.append(rev) - cap_lens.append(len(rev)) - new_sent.append(sent) - return captions, cap_lens, new_sent - - -def sort_example_captions(captions, cap_lens, device): - max_len = np.max(cap_lens) - sorted_indices = np.argsort(cap_lens)[::-1] - cap_lens = np.asarray(cap_lens) - cap_lens = cap_lens[sorted_indices] - cap_array = np.zeros((len(captions), max_len), dtype='int64') - for i in range(len(captions)): - idx = sorted_indices[i] - cap = captions[idx] - c_len = len(cap) - cap_array[i, :c_len] = cap - captions = torch.from_numpy(cap_array).to(device) - cap_lens = torch.from_numpy(cap_lens).to(device) - return captions, cap_lens, sorted_indices - - -def prepare_sample_data(captions, caption_lens, text_encoder, device): - print('*'*40) - captions, sorted_cap_lens, sorted_cap_idxs = sort_example_captions(captions, caption_lens, device) - sent_emb, words_embs = encode_tokens(text_encoder, captions, sorted_cap_lens) - sent_emb = rm_sort(sent_emb, sorted_cap_idxs) - words_embs = rm_sort(words_embs, sorted_cap_idxs) - return sent_emb, words_embs - - -def encode_tokens(text_encoder, caption, cap_lens): - # encode text - with torch.no_grad(): - if hasattr(text_encoder, 'module'): - hidden = text_encoder.module.init_hidden(caption.size(0)) - else: - hidden = text_encoder.init_hidden(caption.size(0)) - words_embs, sent_emb = text_encoder(caption, cap_lens, hidden) - words_embs, sent_emb = words_embs.detach(), sent_emb.detach() - return sent_emb, words_embs - - -def sort_sents(captions, caption_lens, device): - # sort data by the length in a decreasing order - sorted_cap_lens, sorted_cap_indices = torch.sort(caption_lens, 0, True) - captions = captions[sorted_cap_indices].squeeze() - captions = captions.to(device) - sorted_cap_lens = sorted_cap_lens.to(device) - return captions, sorted_cap_lens, sorted_cap_indices - - -def rm_sort(caption, sorted_cap_idxs): - non_sort_cap = torch.empty_like(caption) - for idx, sort in enumerate(sorted_cap_idxs): - non_sort_cap[sort] = caption[idx] - return non_sort_cap - - -def get_img(img): - im = img.data.cpu().numpy() - # [-1, 1] --> [0, 255] - im = (im + 1.0) * 127.5 - im = im.astype(np.uint8) - im = np.transpose(im, (1, 2, 0)) - im = Image.fromarray(im) - return im \ No newline at end of file diff --git a/spaces/ondrejbiza/isa/README.md b/spaces/ondrejbiza/isa/README.md deleted file mode 100644 index e5481c03c95aae95ebe7cef9886523cbe0140b78..0000000000000000000000000000000000000000 --- a/spaces/ondrejbiza/isa/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Invariant Slot Attention -emoji: 🦉 -colorFrom: blue -colorTo: pink -sdk: gradio -sdk_version: 3.27.0 -app_file: app.py -pinned: true -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ori1026/OriChatGPT/run_macOS.command b/spaces/ori1026/OriChatGPT/run_macOS.command deleted file mode 100644 index 62af07283093d8e580763d7acfe493c3d88e7b08..0000000000000000000000000000000000000000 --- a/spaces/ori1026/OriChatGPT/run_macOS.command +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -# 获取脚本所在目录 -script_dir=$(dirname "$0") - -# 将工作目录更改为脚本所在目录 -cd "$script_dir" - -# 检查Git仓库是否有更新 -git remote update -pwd - -if ! git status -uno | grep 'up to date' > /dev/null; then - # 如果有更新,关闭当前运行的服务器 - pkill -f ChuanhuChatbot.py - - # 拉取最新更改 - git pull - - # 安装依赖 - pip3 install -r requirements.txt - - # 重新启动服务器 - nohup python3 ChuanhuChatbot.py & -fi diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/en/optimization/mps.md b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/en/optimization/mps.md deleted file mode 100644 index 138c85b511840b977c62270bd7b9a80b793a20af..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/en/optimization/mps.md +++ /dev/null @@ -1,71 +0,0 @@ - - -# Metal Performance Shaders (MPS) - -🤗 Diffusers is compatible with Apple silicon (M1/M2 chips) using the PyTorch [`mps`](https://pytorch.org/docs/stable/notes/mps.html) device, which uses the Metal framework to leverage the GPU on MacOS devices. You'll need to have: - -- macOS computer with Apple silicon (M1/M2) hardware -- macOS 12.6 or later (13.0 or later recommended) -- arm64 version of Python -- [PyTorch 2.0](https://pytorch.org/get-started/locally/) (recommended) or 1.13 (minimum version supported for `mps`) - -The `mps` backend uses PyTorch's `.to()` interface to move the Stable Diffusion pipeline on to your M1 or M2 device: - -```python -from diffusers import DiffusionPipeline - -pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") -pipe = pipe.to("mps") - -# Recommended if your computer has < 64 GB of RAM -pipe.enable_attention_slicing() - -prompt = "a photo of an astronaut riding a horse on mars" -``` - - - -Generating multiple prompts in a batch can [crash](https://github.com/huggingface/diffusers/issues/363) or fail to work reliably. We believe this is related to the [`mps`](https://github.com/pytorch/pytorch/issues/84039) backend in PyTorch. While this is being investigated, you should iterate instead of batching. - - - -If you're using **PyTorch 1.13**, you need to "prime" the pipeline with an additional one-time pass through it. This is a temporary workaround for an issue where the first inference pass produces slightly different results than subsequent ones. You only need to do this pass once, and after just one inference step you can discard the result. - -```diff - from diffusers import DiffusionPipeline - - pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5").to("mps") - pipe.enable_attention_slicing() - - prompt = "a photo of an astronaut riding a horse on mars" -# First-time "warmup" pass if PyTorch version is 1.13 -+ _ = pipe(prompt, num_inference_steps=1) - -# Results match those from the CPU device after the warmup pass. - image = pipe(prompt).images[0] -``` - -## Troubleshoot - -M1/M2 performance is very sensitive to memory pressure. When this occurs, the system automatically swaps if it needs to which significantly degrades performance. - -To prevent this from happening, we recommend *attention slicing* to reduce memory pressure during inference and prevent swapping. This is especially relevant if your computer has less than 64GB of system RAM, or if you generate images at non-standard resolutions larger than 512×512 pixels. Call the [`~DiffusionPipeline.enable_attention_slicing`] function on your pipeline: - -```py -from diffusers import DiffusionPipeline - -pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True).to("mps") -pipeline.enable_attention_slicing() -``` - -Attention slicing performs the costly attention operation in multiple steps instead of all at once. It usually improves performance by ~20% in computers without universal memory, but we've observed *better performance* in most Apple silicon computers unless you have 64GB of RAM or more. diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/models/modeling_utils.py b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/models/modeling_utils.py deleted file mode 100644 index 67746ebacef21a947223fe1ed25ce6edb0242c69..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/models/modeling_utils.py +++ /dev/null @@ -1,1007 +0,0 @@ -# coding=utf-8 -# Copyright 2023 The HuggingFace Inc. team. -# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import inspect -import itertools -import os -import re -from functools import partial -from typing import Any, Callable, List, Optional, Tuple, Union - -import safetensors -import torch -from huggingface_hub import create_repo -from torch import Tensor, device, nn - -from .. import __version__ -from ..utils import ( - CONFIG_NAME, - DIFFUSERS_CACHE, - FLAX_WEIGHTS_NAME, - HF_HUB_OFFLINE, - SAFETENSORS_WEIGHTS_NAME, - WEIGHTS_NAME, - _add_variant, - _get_model_file, - deprecate, - is_accelerate_available, - is_torch_version, - logging, -) -from ..utils.hub_utils import PushToHubMixin - - -logger = logging.get_logger(__name__) - - -if is_torch_version(">=", "1.9.0"): - _LOW_CPU_MEM_USAGE_DEFAULT = True -else: - _LOW_CPU_MEM_USAGE_DEFAULT = False - - -if is_accelerate_available(): - import accelerate - from accelerate.utils import set_module_tensor_to_device - from accelerate.utils.versions import is_torch_version - - -def get_parameter_device(parameter: torch.nn.Module): - try: - parameters_and_buffers = itertools.chain(parameter.parameters(), parameter.buffers()) - return next(parameters_and_buffers).device - except StopIteration: - # For torch.nn.DataParallel compatibility in PyTorch 1.5 - - def find_tensor_attributes(module: torch.nn.Module) -> List[Tuple[str, Tensor]]: - tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)] - return tuples - - gen = parameter._named_members(get_members_fn=find_tensor_attributes) - first_tuple = next(gen) - return first_tuple[1].device - - -def get_parameter_dtype(parameter: torch.nn.Module): - try: - params = tuple(parameter.parameters()) - if len(params) > 0: - return params[0].dtype - - buffers = tuple(parameter.buffers()) - if len(buffers) > 0: - return buffers[0].dtype - - except StopIteration: - # For torch.nn.DataParallel compatibility in PyTorch 1.5 - - def find_tensor_attributes(module: torch.nn.Module) -> List[Tuple[str, Tensor]]: - tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)] - return tuples - - gen = parameter._named_members(get_members_fn=find_tensor_attributes) - first_tuple = next(gen) - return first_tuple[1].dtype - - -def load_state_dict(checkpoint_file: Union[str, os.PathLike], variant: Optional[str] = None): - """ - Reads a checkpoint file, returning properly formatted errors if they arise. - """ - try: - if os.path.basename(checkpoint_file) == _add_variant(WEIGHTS_NAME, variant): - return torch.load(checkpoint_file, map_location="cpu") - else: - return safetensors.torch.load_file(checkpoint_file, device="cpu") - except Exception as e: - try: - with open(checkpoint_file) as f: - if f.read().startswith("version"): - raise OSError( - "You seem to have cloned a repository without having git-lfs installed. Please install " - "git-lfs and run `git lfs install` followed by `git lfs pull` in the folder " - "you cloned." - ) - else: - raise ValueError( - f"Unable to locate the file {checkpoint_file} which is necessary to load this pretrained " - "model. Make sure you have saved the model properly." - ) from e - except (UnicodeDecodeError, ValueError): - raise OSError( - f"Unable to load weights from checkpoint file for '{checkpoint_file}' " - f"at '{checkpoint_file}'. " - "If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True." - ) - - -def load_model_dict_into_meta(model, state_dict, device=None, dtype=None, model_name_or_path=None): - device = device or torch.device("cpu") - dtype = dtype or torch.float32 - - unexpected_keys = [] - empty_state_dict = model.state_dict() - for param_name, param in state_dict.items(): - if param_name not in empty_state_dict: - unexpected_keys.append(param_name) - continue - - if empty_state_dict[param_name].shape != param.shape: - model_name_or_path_str = f"{model_name_or_path} " if model_name_or_path is not None else "" - raise ValueError( - f"Cannot load {model_name_or_path_str}because {param_name} expected shape {empty_state_dict[param_name]}, but got {param.shape}. If you want to instead overwrite randomly initialized weights, please make sure to pass both `low_cpu_mem_usage=False` and `ignore_mismatched_sizes=True`. For more information, see also: https://github.com/huggingface/diffusers/issues/1619#issuecomment-1345604389 as an example." - ) - - accepts_dtype = "dtype" in set(inspect.signature(set_module_tensor_to_device).parameters.keys()) - if accepts_dtype: - set_module_tensor_to_device(model, param_name, device, value=param, dtype=dtype) - else: - set_module_tensor_to_device(model, param_name, device, value=param) - return unexpected_keys - - -def _load_state_dict_into_model(model_to_load, state_dict): - # Convert old format to new format if needed from a PyTorch state_dict - # copy state_dict so _load_from_state_dict can modify it - state_dict = state_dict.copy() - error_msgs = [] - - # PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants - # so we need to apply the function recursively. - def load(module: torch.nn.Module, prefix=""): - args = (state_dict, prefix, {}, True, [], [], error_msgs) - module._load_from_state_dict(*args) - - for name, child in module._modules.items(): - if child is not None: - load(child, prefix + name + ".") - - load(model_to_load) - - return error_msgs - - -class ModelMixin(torch.nn.Module, PushToHubMixin): - r""" - Base class for all models. - - [`ModelMixin`] takes care of storing the model configuration and provides methods for loading, downloading and - saving models. - - - **config_name** ([`str`]) -- Filename to save a model to when calling [`~models.ModelMixin.save_pretrained`]. - """ - config_name = CONFIG_NAME - _automatically_saved_args = ["_diffusers_version", "_class_name", "_name_or_path"] - _supports_gradient_checkpointing = False - _keys_to_ignore_on_load_unexpected = None - - def __init__(self): - super().__init__() - - def __getattr__(self, name: str) -> Any: - """The only reason we overwrite `getattr` here is to gracefully deprecate accessing - config attributes directly. See https://github.com/huggingface/diffusers/pull/3129 We need to overwrite - __getattr__ here in addition so that we don't trigger `torch.nn.Module`'s __getattr__': - https://pytorch.org/docs/stable/_modules/torch/nn/modules/module.html#Module - """ - - is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name) - is_attribute = name in self.__dict__ - - if is_in_config and not is_attribute: - deprecation_message = f"Accessing config attribute `{name}` directly via '{type(self).__name__}' object attribute is deprecated. Please access '{name}' over '{type(self).__name__}'s config object instead, e.g. 'unet.config.{name}'." - deprecate("direct config name access", "1.0.0", deprecation_message, standard_warn=False, stacklevel=3) - return self._internal_dict[name] - - # call PyTorch's https://pytorch.org/docs/stable/_modules/torch/nn/modules/module.html#Module - return super().__getattr__(name) - - @property - def is_gradient_checkpointing(self) -> bool: - """ - Whether gradient checkpointing is activated for this model or not. - """ - return any(hasattr(m, "gradient_checkpointing") and m.gradient_checkpointing for m in self.modules()) - - def enable_gradient_checkpointing(self): - """ - Activates gradient checkpointing for the current model (may be referred to as *activation checkpointing* or - *checkpoint activations* in other frameworks). - """ - if not self._supports_gradient_checkpointing: - raise ValueError(f"{self.__class__.__name__} does not support gradient checkpointing.") - self.apply(partial(self._set_gradient_checkpointing, value=True)) - - def disable_gradient_checkpointing(self): - """ - Deactivates gradient checkpointing for the current model (may be referred to as *activation checkpointing* or - *checkpoint activations* in other frameworks). - """ - if self._supports_gradient_checkpointing: - self.apply(partial(self._set_gradient_checkpointing, value=False)) - - def set_use_memory_efficient_attention_xformers( - self, valid: bool, attention_op: Optional[Callable] = None - ) -> None: - # Recursively walk through all the children. - # Any children which exposes the set_use_memory_efficient_attention_xformers method - # gets the message - def fn_recursive_set_mem_eff(module: torch.nn.Module): - if hasattr(module, "set_use_memory_efficient_attention_xformers"): - module.set_use_memory_efficient_attention_xformers(valid, attention_op) - - for child in module.children(): - fn_recursive_set_mem_eff(child) - - for module in self.children(): - if isinstance(module, torch.nn.Module): - fn_recursive_set_mem_eff(module) - - def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None): - r""" - Enable memory efficient attention from [xFormers](https://facebookresearch.github.io/xformers/). - - When this option is enabled, you should observe lower GPU memory usage and a potential speed up during - inference. Speed up during training is not guaranteed. - - - - ⚠️ When memory efficient attention and sliced attention are both enabled, memory efficient attention takes - precedent. - - - - Parameters: - attention_op (`Callable`, *optional*): - Override the default `None` operator for use as `op` argument to the - [`memory_efficient_attention()`](https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.memory_efficient_attention) - function of xFormers. - - Examples: - - ```py - >>> import torch - >>> from diffusers import UNet2DConditionModel - >>> from xformers.ops import MemoryEfficientAttentionFlashAttentionOp - - >>> model = UNet2DConditionModel.from_pretrained( - ... "stabilityai/stable-diffusion-2-1", subfolder="unet", torch_dtype=torch.float16 - ... ) - >>> model = model.to("cuda") - >>> model.enable_xformers_memory_efficient_attention(attention_op=MemoryEfficientAttentionFlashAttentionOp) - ``` - """ - self.set_use_memory_efficient_attention_xformers(True, attention_op) - - def disable_xformers_memory_efficient_attention(self): - r""" - Disable memory efficient attention from [xFormers](https://facebookresearch.github.io/xformers/). - """ - self.set_use_memory_efficient_attention_xformers(False) - - def save_pretrained( - self, - save_directory: Union[str, os.PathLike], - is_main_process: bool = True, - save_function: Callable = None, - safe_serialization: bool = True, - variant: Optional[str] = None, - push_to_hub: bool = False, - **kwargs, - ): - """ - Save a model and its configuration file to a directory so that it can be reloaded using the - [`~models.ModelMixin.from_pretrained`] class method. - - Arguments: - save_directory (`str` or `os.PathLike`): - Directory to save a model and its configuration file to. Will be created if it doesn't exist. - is_main_process (`bool`, *optional*, defaults to `True`): - Whether the process calling this is the main process or not. Useful during distributed training and you - need to call this function on all processes. In this case, set `is_main_process=True` only on the main - process to avoid race conditions. - save_function (`Callable`): - The function to use to save the state dictionary. Useful during distributed training when you need to - replace `torch.save` with another method. Can be configured with the environment variable - `DIFFUSERS_SAVE_MODE`. - safe_serialization (`bool`, *optional*, defaults to `True`): - Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`. - variant (`str`, *optional*): - If specified, weights are saved in the format `pytorch_model..bin`. - push_to_hub (`bool`, *optional*, defaults to `False`): - Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the - repository you want to push to with `repo_id` (will default to the name of `save_directory` in your - namespace). - kwargs (`Dict[str, Any]`, *optional*): - Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. - """ - if os.path.isfile(save_directory): - logger.error(f"Provided path ({save_directory}) should be a directory, not a file") - return - - os.makedirs(save_directory, exist_ok=True) - - if push_to_hub: - commit_message = kwargs.pop("commit_message", None) - private = kwargs.pop("private", False) - create_pr = kwargs.pop("create_pr", False) - token = kwargs.pop("token", None) - repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) - repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id - - # Only save the model itself if we are using distributed training - model_to_save = self - - # Attach architecture to the config - # Save the config - if is_main_process: - model_to_save.save_config(save_directory) - - # Save the model - state_dict = model_to_save.state_dict() - - weights_name = SAFETENSORS_WEIGHTS_NAME if safe_serialization else WEIGHTS_NAME - weights_name = _add_variant(weights_name, variant) - - # Save the model - if safe_serialization: - safetensors.torch.save_file( - state_dict, os.path.join(save_directory, weights_name), metadata={"format": "pt"} - ) - else: - torch.save(state_dict, os.path.join(save_directory, weights_name)) - - logger.info(f"Model weights saved in {os.path.join(save_directory, weights_name)}") - - if push_to_hub: - self._upload_folder( - save_directory, - repo_id, - token=token, - commit_message=commit_message, - create_pr=create_pr, - ) - - @classmethod - def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs): - r""" - Instantiate a pretrained PyTorch model from a pretrained model configuration. - - The model is set in evaluation mode - `model.eval()` - by default, and dropout modules are deactivated. To - train the model, set it back in training mode with `model.train()`. - - Parameters: - pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*): - Can be either: - - - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on - the Hub. - - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved - with [`~ModelMixin.save_pretrained`]. - - cache_dir (`Union[str, os.PathLike]`, *optional*): - Path to a directory where a downloaded pretrained model configuration is cached if the standard cache - is not used. - torch_dtype (`str` or `torch.dtype`, *optional*): - Override the default `torch.dtype` and load the model with another dtype. If `"auto"` is passed, the - dtype is automatically derived from the model's weights. - force_download (`bool`, *optional*, defaults to `False`): - Whether or not to force the (re-)download of the model weights and configuration files, overriding the - cached versions if they exist. - resume_download (`bool`, *optional*, defaults to `False`): - Whether or not to resume downloading the model weights and configuration files. If set to `False`, any - incompletely downloaded files are deleted. - proxies (`Dict[str, str]`, *optional*): - A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', - 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. - output_loading_info (`bool`, *optional*, defaults to `False`): - Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. - local_files_only(`bool`, *optional*, defaults to `False`): - Whether to only load local model weights and configuration files or not. If set to `True`, the model - won't be downloaded from the Hub. - use_auth_token (`str` or *bool*, *optional*): - The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from - `diffusers-cli login` (stored in `~/.huggingface`) is used. - revision (`str`, *optional*, defaults to `"main"`): - The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier - allowed by Git. - from_flax (`bool`, *optional*, defaults to `False`): - Load the model weights from a Flax checkpoint save file. - subfolder (`str`, *optional*, defaults to `""`): - The subfolder location of a model file within a larger model repository on the Hub or locally. - mirror (`str`, *optional*): - Mirror source to resolve accessibility issues if you're downloading a model in China. We do not - guarantee the timeliness or safety of the source, and you should refer to the mirror site for more - information. - device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*): - A map that specifies where each submodule should go. It doesn't need to be defined for each - parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the - same device. - - Set `device_map="auto"` to have 🤗 Accelerate automatically compute the most optimized `device_map`. For - more information about each option see [designing a device - map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). - max_memory (`Dict`, *optional*): - A dictionary device identifier for the maximum memory. Will default to the maximum memory available for - each GPU and the available CPU RAM if unset. - offload_folder (`str` or `os.PathLike`, *optional*): - The path to offload weights if `device_map` contains the value `"disk"`. - offload_state_dict (`bool`, *optional*): - If `True`, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if - the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True` - when there is some disk offload. - low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): - Speed up model loading only loading the pretrained weights and not initializing the weights. This also - tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. - Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this - argument to `True` will raise an error. - variant (`str`, *optional*): - Load weights from a specified `variant` filename such as `"fp16"` or `"ema"`. This is ignored when - loading `from_flax`. - use_safetensors (`bool`, *optional*, defaults to `None`): - If set to `None`, the `safetensors` weights are downloaded if they're available **and** if the - `safetensors` library is installed. If set to `True`, the model is forcibly loaded from `safetensors` - weights. If set to `False`, `safetensors` weights are not loaded. - - - - To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with - `huggingface-cli login`. You can also activate the special - ["offline-mode"](https://huggingface.co/diffusers/installation.html#offline-mode) to use this method in a - firewalled environment. - - - - Example: - - ```py - from diffusers import UNet2DConditionModel - - unet = UNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="unet") - ``` - - If you get the error message below, you need to finetune the weights for your downstream task: - - ```bash - Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match: - - conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated - You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. - ``` - """ - cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE) - ignore_mismatched_sizes = kwargs.pop("ignore_mismatched_sizes", False) - force_download = kwargs.pop("force_download", False) - from_flax = kwargs.pop("from_flax", False) - resume_download = kwargs.pop("resume_download", False) - proxies = kwargs.pop("proxies", None) - output_loading_info = kwargs.pop("output_loading_info", False) - local_files_only = kwargs.pop("local_files_only", HF_HUB_OFFLINE) - use_auth_token = kwargs.pop("use_auth_token", None) - revision = kwargs.pop("revision", None) - torch_dtype = kwargs.pop("torch_dtype", None) - subfolder = kwargs.pop("subfolder", None) - device_map = kwargs.pop("device_map", None) - max_memory = kwargs.pop("max_memory", None) - offload_folder = kwargs.pop("offload_folder", None) - offload_state_dict = kwargs.pop("offload_state_dict", False) - low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT) - variant = kwargs.pop("variant", None) - use_safetensors = kwargs.pop("use_safetensors", None) - - allow_pickle = False - if use_safetensors is None: - use_safetensors = True - allow_pickle = True - - if low_cpu_mem_usage and not is_accelerate_available(): - low_cpu_mem_usage = False - logger.warning( - "Cannot initialize model with low cpu memory usage because `accelerate` was not found in the" - " environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install" - " `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip" - " install accelerate\n```\n." - ) - - if device_map is not None and not is_accelerate_available(): - raise NotImplementedError( - "Loading and dispatching requires `accelerate`. Please make sure to install accelerate or set" - " `device_map=None`. You can install accelerate with `pip install accelerate`." - ) - - # Check if we can handle device_map and dispatching the weights - if device_map is not None and not is_torch_version(">=", "1.9.0"): - raise NotImplementedError( - "Loading and dispatching requires torch >= 1.9.0. Please either update your PyTorch version or set" - " `device_map=None`." - ) - - if low_cpu_mem_usage is True and not is_torch_version(">=", "1.9.0"): - raise NotImplementedError( - "Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set" - " `low_cpu_mem_usage=False`." - ) - - if low_cpu_mem_usage is False and device_map is not None: - raise ValueError( - f"You cannot set `low_cpu_mem_usage` to `False` while using device_map={device_map} for loading and" - " dispatching. Please make sure to set `low_cpu_mem_usage=True`." - ) - - # Load config if we don't provide a configuration - config_path = pretrained_model_name_or_path - - user_agent = { - "diffusers": __version__, - "file_type": "model", - "framework": "pytorch", - } - - # load config - config, unused_kwargs, commit_hash = cls.load_config( - config_path, - cache_dir=cache_dir, - return_unused_kwargs=True, - return_commit_hash=True, - force_download=force_download, - resume_download=resume_download, - proxies=proxies, - local_files_only=local_files_only, - use_auth_token=use_auth_token, - revision=revision, - subfolder=subfolder, - device_map=device_map, - max_memory=max_memory, - offload_folder=offload_folder, - offload_state_dict=offload_state_dict, - user_agent=user_agent, - **kwargs, - ) - - # load model - model_file = None - if from_flax: - model_file = _get_model_file( - pretrained_model_name_or_path, - weights_name=FLAX_WEIGHTS_NAME, - cache_dir=cache_dir, - force_download=force_download, - resume_download=resume_download, - proxies=proxies, - local_files_only=local_files_only, - use_auth_token=use_auth_token, - revision=revision, - subfolder=subfolder, - user_agent=user_agent, - commit_hash=commit_hash, - ) - model = cls.from_config(config, **unused_kwargs) - - # Convert the weights - from .modeling_pytorch_flax_utils import load_flax_checkpoint_in_pytorch_model - - model = load_flax_checkpoint_in_pytorch_model(model, model_file) - else: - if use_safetensors: - try: - model_file = _get_model_file( - pretrained_model_name_or_path, - weights_name=_add_variant(SAFETENSORS_WEIGHTS_NAME, variant), - cache_dir=cache_dir, - force_download=force_download, - resume_download=resume_download, - proxies=proxies, - local_files_only=local_files_only, - use_auth_token=use_auth_token, - revision=revision, - subfolder=subfolder, - user_agent=user_agent, - commit_hash=commit_hash, - ) - except IOError as e: - if not allow_pickle: - raise e - pass - if model_file is None: - model_file = _get_model_file( - pretrained_model_name_or_path, - weights_name=_add_variant(WEIGHTS_NAME, variant), - cache_dir=cache_dir, - force_download=force_download, - resume_download=resume_download, - proxies=proxies, - local_files_only=local_files_only, - use_auth_token=use_auth_token, - revision=revision, - subfolder=subfolder, - user_agent=user_agent, - commit_hash=commit_hash, - ) - - if low_cpu_mem_usage: - # Instantiate model with empty weights - with accelerate.init_empty_weights(): - model = cls.from_config(config, **unused_kwargs) - - # if device_map is None, load the state dict and move the params from meta device to the cpu - if device_map is None: - param_device = "cpu" - state_dict = load_state_dict(model_file, variant=variant) - model._convert_deprecated_attention_blocks(state_dict) - # move the params from meta device to cpu - missing_keys = set(model.state_dict().keys()) - set(state_dict.keys()) - if len(missing_keys) > 0: - raise ValueError( - f"Cannot load {cls} from {pretrained_model_name_or_path} because the following keys are" - f" missing: \n {', '.join(missing_keys)}. \n Please make sure to pass" - " `low_cpu_mem_usage=False` and `device_map=None` if you want to randomly initialize" - " those weights or else make sure your checkpoint file is correct." - ) - - unexpected_keys = load_model_dict_into_meta( - model, - state_dict, - device=param_device, - dtype=torch_dtype, - model_name_or_path=pretrained_model_name_or_path, - ) - - if cls._keys_to_ignore_on_load_unexpected is not None: - for pat in cls._keys_to_ignore_on_load_unexpected: - unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] - - if len(unexpected_keys) > 0: - logger.warn( - f"Some weights of the model checkpoint were not used when initializing {cls.__name__}: \n {[', '.join(unexpected_keys)]}" - ) - - else: # else let accelerate handle loading and dispatching. - # Load weights and dispatch according to the device_map - # by default the device_map is None and the weights are loaded on the CPU - try: - accelerate.load_checkpoint_and_dispatch( - model, - model_file, - device_map, - max_memory=max_memory, - offload_folder=offload_folder, - offload_state_dict=offload_state_dict, - dtype=torch_dtype, - ) - except AttributeError as e: - # When using accelerate loading, we do not have the ability to load the state - # dict and rename the weight names manually. Additionally, accelerate skips - # torch loading conventions and directly writes into `module.{_buffers, _parameters}` - # (which look like they should be private variables?), so we can't use the standard hooks - # to rename parameters on load. We need to mimic the original weight names so the correct - # attributes are available. After we have loaded the weights, we convert the deprecated - # names to the new non-deprecated names. Then we _greatly encourage_ the user to convert - # the weights so we don't have to do this again. - - if "'Attention' object has no attribute" in str(e): - logger.warn( - f"Taking `{str(e)}` while using `accelerate.load_checkpoint_and_dispatch` to mean {pretrained_model_name_or_path}" - " was saved with deprecated attention block weight names. We will load it with the deprecated attention block" - " names and convert them on the fly to the new attention block format. Please re-save the model after this conversion," - " so we don't have to do the on the fly renaming in the future. If the model is from a hub checkpoint," - " please also re-upload it or open a PR on the original repository." - ) - model._temp_convert_self_to_deprecated_attention_blocks() - accelerate.load_checkpoint_and_dispatch( - model, - model_file, - device_map, - max_memory=max_memory, - offload_folder=offload_folder, - offload_state_dict=offload_state_dict, - dtype=torch_dtype, - ) - model._undo_temp_convert_self_to_deprecated_attention_blocks() - else: - raise e - - loading_info = { - "missing_keys": [], - "unexpected_keys": [], - "mismatched_keys": [], - "error_msgs": [], - } - else: - model = cls.from_config(config, **unused_kwargs) - - state_dict = load_state_dict(model_file, variant=variant) - model._convert_deprecated_attention_blocks(state_dict) - - model, missing_keys, unexpected_keys, mismatched_keys, error_msgs = cls._load_pretrained_model( - model, - state_dict, - model_file, - pretrained_model_name_or_path, - ignore_mismatched_sizes=ignore_mismatched_sizes, - ) - - loading_info = { - "missing_keys": missing_keys, - "unexpected_keys": unexpected_keys, - "mismatched_keys": mismatched_keys, - "error_msgs": error_msgs, - } - - if torch_dtype is not None and not isinstance(torch_dtype, torch.dtype): - raise ValueError( - f"{torch_dtype} needs to be of type `torch.dtype`, e.g. `torch.float16`, but is {type(torch_dtype)}." - ) - elif torch_dtype is not None: - model = model.to(torch_dtype) - - model.register_to_config(_name_or_path=pretrained_model_name_or_path) - - # Set model in evaluation mode to deactivate DropOut modules by default - model.eval() - if output_loading_info: - return model, loading_info - - return model - - @classmethod - def _load_pretrained_model( - cls, - model, - state_dict, - resolved_archive_file, - pretrained_model_name_or_path, - ignore_mismatched_sizes=False, - ): - # Retrieve missing & unexpected_keys - model_state_dict = model.state_dict() - loaded_keys = list(state_dict.keys()) - - expected_keys = list(model_state_dict.keys()) - - original_loaded_keys = loaded_keys - - missing_keys = list(set(expected_keys) - set(loaded_keys)) - unexpected_keys = list(set(loaded_keys) - set(expected_keys)) - - # Make sure we are able to load base models as well as derived models (with heads) - model_to_load = model - - def _find_mismatched_keys( - state_dict, - model_state_dict, - loaded_keys, - ignore_mismatched_sizes, - ): - mismatched_keys = [] - if ignore_mismatched_sizes: - for checkpoint_key in loaded_keys: - model_key = checkpoint_key - - if ( - model_key in model_state_dict - and state_dict[checkpoint_key].shape != model_state_dict[model_key].shape - ): - mismatched_keys.append( - (checkpoint_key, state_dict[checkpoint_key].shape, model_state_dict[model_key].shape) - ) - del state_dict[checkpoint_key] - return mismatched_keys - - if state_dict is not None: - # Whole checkpoint - mismatched_keys = _find_mismatched_keys( - state_dict, - model_state_dict, - original_loaded_keys, - ignore_mismatched_sizes, - ) - error_msgs = _load_state_dict_into_model(model_to_load, state_dict) - - if len(error_msgs) > 0: - error_msg = "\n\t".join(error_msgs) - if "size mismatch" in error_msg: - error_msg += ( - "\n\tYou may consider adding `ignore_mismatched_sizes=True` in the model `from_pretrained` method." - ) - raise RuntimeError(f"Error(s) in loading state_dict for {model.__class__.__name__}:\n\t{error_msg}") - - if len(unexpected_keys) > 0: - logger.warning( - f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when" - f" initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are" - f" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task" - " or with another architecture (e.g. initializing a BertForSequenceClassification model from a" - " BertForPreTraining model).\n- This IS NOT expected if you are initializing" - f" {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly" - " identical (initializing a BertForSequenceClassification model from a" - " BertForSequenceClassification model)." - ) - else: - logger.info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n") - if len(missing_keys) > 0: - logger.warning( - f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at" - f" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\nYou should probably" - " TRAIN this model on a down-stream task to be able to use it for predictions and inference." - ) - elif len(mismatched_keys) == 0: - logger.info( - f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at" - f" {pretrained_model_name_or_path}.\nIf your task is similar to the task the model of the" - f" checkpoint was trained on, you can already use {model.__class__.__name__} for predictions" - " without further training." - ) - if len(mismatched_keys) > 0: - mismatched_warning = "\n".join( - [ - f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated" - for key, shape1, shape2 in mismatched_keys - ] - ) - logger.warning( - f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at" - f" {pretrained_model_name_or_path} and are newly initialized because the shapes did not" - f" match:\n{mismatched_warning}\nYou should probably TRAIN this model on a down-stream task to be" - " able to use it for predictions and inference." - ) - - return model, missing_keys, unexpected_keys, mismatched_keys, error_msgs - - @property - def device(self) -> device: - """ - `torch.device`: The device on which the module is (assuming that all the module parameters are on the same - device). - """ - return get_parameter_device(self) - - @property - def dtype(self) -> torch.dtype: - """ - `torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype). - """ - return get_parameter_dtype(self) - - def num_parameters(self, only_trainable: bool = False, exclude_embeddings: bool = False) -> int: - """ - Get number of (trainable or non-embedding) parameters in the module. - - Args: - only_trainable (`bool`, *optional*, defaults to `False`): - Whether or not to return only the number of trainable parameters. - exclude_embeddings (`bool`, *optional*, defaults to `False`): - Whether or not to return only the number of non-embedding parameters. - - Returns: - `int`: The number of parameters. - - Example: - - ```py - from diffusers import UNet2DConditionModel - - model_id = "runwayml/stable-diffusion-v1-5" - unet = UNet2DConditionModel.from_pretrained(model_id, subfolder="unet") - unet.num_parameters(only_trainable=True) - 859520964 - ``` - """ - - if exclude_embeddings: - embedding_param_names = [ - f"{name}.weight" - for name, module_type in self.named_modules() - if isinstance(module_type, torch.nn.Embedding) - ] - non_embedding_parameters = [ - parameter for name, parameter in self.named_parameters() if name not in embedding_param_names - ] - return sum(p.numel() for p in non_embedding_parameters if p.requires_grad or not only_trainable) - else: - return sum(p.numel() for p in self.parameters() if p.requires_grad or not only_trainable) - - def _convert_deprecated_attention_blocks(self, state_dict): - deprecated_attention_block_paths = [] - - def recursive_find_attn_block(name, module): - if hasattr(module, "_from_deprecated_attn_block") and module._from_deprecated_attn_block: - deprecated_attention_block_paths.append(name) - - for sub_name, sub_module in module.named_children(): - sub_name = sub_name if name == "" else f"{name}.{sub_name}" - recursive_find_attn_block(sub_name, sub_module) - - recursive_find_attn_block("", self) - - # NOTE: we have to check if the deprecated parameters are in the state dict - # because it is possible we are loading from a state dict that was already - # converted - - for path in deprecated_attention_block_paths: - # group_norm path stays the same - - # query -> to_q - if f"{path}.query.weight" in state_dict: - state_dict[f"{path}.to_q.weight"] = state_dict.pop(f"{path}.query.weight") - if f"{path}.query.bias" in state_dict: - state_dict[f"{path}.to_q.bias"] = state_dict.pop(f"{path}.query.bias") - - # key -> to_k - if f"{path}.key.weight" in state_dict: - state_dict[f"{path}.to_k.weight"] = state_dict.pop(f"{path}.key.weight") - if f"{path}.key.bias" in state_dict: - state_dict[f"{path}.to_k.bias"] = state_dict.pop(f"{path}.key.bias") - - # value -> to_v - if f"{path}.value.weight" in state_dict: - state_dict[f"{path}.to_v.weight"] = state_dict.pop(f"{path}.value.weight") - if f"{path}.value.bias" in state_dict: - state_dict[f"{path}.to_v.bias"] = state_dict.pop(f"{path}.value.bias") - - # proj_attn -> to_out.0 - if f"{path}.proj_attn.weight" in state_dict: - state_dict[f"{path}.to_out.0.weight"] = state_dict.pop(f"{path}.proj_attn.weight") - if f"{path}.proj_attn.bias" in state_dict: - state_dict[f"{path}.to_out.0.bias"] = state_dict.pop(f"{path}.proj_attn.bias") - - def _temp_convert_self_to_deprecated_attention_blocks(self): - deprecated_attention_block_modules = [] - - def recursive_find_attn_block(module): - if hasattr(module, "_from_deprecated_attn_block") and module._from_deprecated_attn_block: - deprecated_attention_block_modules.append(module) - - for sub_module in module.children(): - recursive_find_attn_block(sub_module) - - recursive_find_attn_block(self) - - for module in deprecated_attention_block_modules: - module.query = module.to_q - module.key = module.to_k - module.value = module.to_v - module.proj_attn = module.to_out[0] - - # We don't _have_ to delete the old attributes, but it's helpful to ensure - # that _all_ the weights are loaded into the new attributes and we're not - # making an incorrect assumption that this model should be converted when - # it really shouldn't be. - del module.to_q - del module.to_k - del module.to_v - del module.to_out - - def _undo_temp_convert_self_to_deprecated_attention_blocks(self): - deprecated_attention_block_modules = [] - - def recursive_find_attn_block(module): - if hasattr(module, "_from_deprecated_attn_block") and module._from_deprecated_attn_block: - deprecated_attention_block_modules.append(module) - - for sub_module in module.children(): - recursive_find_attn_block(sub_module) - - recursive_find_attn_block(self) - - for module in deprecated_attention_block_modules: - module.to_q = module.query - module.to_k = module.key - module.to_v = module.value - module.to_out = nn.ModuleList([module.proj_attn, nn.Dropout(module.dropout)]) - - del module.query - del module.key - del module.value - del module.proj_attn diff --git a/spaces/pierreguillou/extracao_das_palavras_frases_chave_em_portugues/app.py b/spaces/pierreguillou/extracao_das_palavras_frases_chave_em_portugues/app.py deleted file mode 100644 index 0b45dae501b506df4e37cfd82acbaddde5b1984e..0000000000000000000000000000000000000000 --- a/spaces/pierreguillou/extracao_das_palavras_frases_chave_em_portugues/app.py +++ /dev/null @@ -1,391 +0,0 @@ -import os -import subprocess -import gradio as gr -import wget -from ftlangdetect import detect -from cleantext import clean -from keybert import KeyBERT -from keyphrase_vectorizers import KeyphraseCountVectorizer -from sklearn.feature_extraction.text import CountVectorizer -from functools import partial -from sentence_transformers import SentenceTransformer - - -## models sentence-bert multilingual - -# fonte SBERT: https://www.sbert.net/docs/pretrained_models.html#multi-lingual-models -# models na Hugging Face model hub (https://huggingface.co/sentence-transformers/...) -# old: paraphrase-multilingual-MiniLM-L12-v2 -model_id = ["paraphrase-multilingual-mpnet-base-v2", "sentence-transformers/LaBSE", "distiluse-base-multilingual-cased-v1"] -model_name = ["SBERT multilingual", "LaBSE", "DistilBERT mltilingual (v1)"] - - -## get KeyBERT model - -kw_model_0 = KeyBERT(model=model_id[0]) -#kw_model_1 = KeyBERT(model=model_id[1]) -#kw_model_2 = KeyBERT(model=model_id[2]) -kw_model = { - 0: kw_model_0, - #1: kw_model_1, - #2: kw_model_2 - } - - -## max_seq_length - -# get max_seq_length of the KeyBERT model -#if isinstance(kw_model_0.model.embedding_model, SentenceTransformer): -# max_seq_length_0 = kw_model_0.model.embedding_model.max_seq_length -# change max_seq_length -#kw_model_0.model.embedding_model.max_seq_length = 512 -#num_tokens = kw_model_0.model.embedding_model.tokenize([doc_original])['input_ids'].shape[1] - - -## spacy (pipeline) - -import spacy -# Portuguese pipeline optimized for CPU. Components: tok2vec, morphologizer, parser, lemmatizer (trainable_lemmatizer), senter, ner, attribute_ruler. -spacy_pipeline = "pt_core_news_lg" -# download spacy pipeline (https://spacy.io/models/pt) -os.system(f"python -m spacy download {spacy_pipeline}") - -# Load tokenizer, tagger, parser, NER and word vectors -#os.system("python -m spacy download pt_core_news_lg") -nlp = spacy.load(spacy_pipeline) - -# Add the component to the pipeline -# "nlp" Object is used to create documents with linguistic annotations. -nlp.add_pipe('sentencizer') - - -## download stop words in Portuguese -output = subprocess.run(["python", "stopwords.py"], capture_output=True, text=True) -stop_words = list(eval(output.stdout)) - - -## Part-of-Speech Tagging for Portuguese -# (https://melaniewalsh.github.io/Intro-Cultural-Analytics/05-Text-Analysis/Multilingual/Portuguese/03-POS-Keywords-Portuguese.html) -#pos_pattern = '********' -pos_pattern = '***' - - -# vectorizer options -vectorizer_options = ["keyword", "3gramword", "nounfrase"] - - -# function principal (keywords) -def get_kw_html(doc, top_n, diversity, vectorizer_option, model_id, pos_pattern): - - # lowercase - lowercase = False - - ## define o vectorizer - def get_vectorizer(vectorizer_option): - - # one word - if vectorizer_option == "keyword": - vectorizer = CountVectorizer( - ngram_range=(1, 1), - stop_words=stop_words, - lowercase=lowercase - ) - - # upt to 3-gram - elif vectorizer_option == "3gramword": - vectorizer = CountVectorizer( - ngram_range=(1, 3), - #stop_words=stop_words, - lowercase=lowercase - ) - - # proper noun / noun (adjective) phrase - elif vectorizer_option == "nounfrase": - vectorizer = KeyphraseCountVectorizer( - spacy_pipeline=spacy_pipeline, - #stop_words=stop_words, - pos_pattern=pos_pattern, - lowercase=lowercase - ) - - return vectorizer - - # function to clean text of document - def get_lang(doc): - doc = clean(doc, - fix_unicode=True, # fix various unicode errors - to_ascii=False, # transliterate to closest ASCII representation - lower=True, # lowercase text - no_line_breaks=True, # fully strip line breaks as opposed to only normalizing them - no_urls=True, # replace all URLs with a special token - no_emails=False, # replace all email addresses with a special token - no_phone_numbers=False, # replace all phone numbers with a special token - no_numbers=False, # replace all numbers with a special token - no_digits=False, # replace all digits with a special token - no_currency_symbols=False, # replace all currency symbols with a special token - no_punct=False, # remove punctuations - replace_with_punct="", # instead of removing punctuations you may replace them - replace_with_url="", - replace_with_email="", - replace_with_phone_number="", - replace_with_number="", - replace_with_digit="0", - replace_with_currency_symbol="", - lang="pt" # set to 'de' for German special handling - ) - res = detect(text=str(doc), low_memory=False) - lang = res["lang"] - score = res["score"] - - return lang, score - - def get_passages(doc): - # method: https://github.com/UKPLab/sentence-transformers/blob/b86eec31cf0a102ad786ba1ff31bfeb4998d3ca5/examples/applications/retrieve_rerank/in_document_search_crossencoder.py#L19 - doc = doc.replace("\r\n", "\n").replace("\n", " ") - doc = nlp(doc) - - paragraphs = [] - for sent in doc.sents: - if len(sent.text.strip()) > 0: - paragraphs.append(sent.text.strip()) - - window_size = 2 - passages = [] - paragraphs = [paragraphs] - for paragraph in paragraphs: - for start_idx in range(0, len(paragraph), window_size): - end_idx = min(start_idx+window_size, len(paragraph)) - passages.append(" ".join(paragraph[start_idx:end_idx])) - - return passages - - # keywords - def get_kw(doc, kw_model=kw_model[model_id], top_n=top_n, diversity=diversity, vectorizer=get_vectorizer(vectorizer_option)): - - keywords = kw_model.extract_keywords( - doc, - vectorizer = vectorizer, - use_mmr = True, - diversity = diversity, - top_n = top_n, - ) - - return keywords - - def get_embeddings(doc, candidates, kw_model=kw_model[model_id]): - - doc_embeddings = kw_model.model.embed([doc]) - word_embeddings = kw_model.model.embed(candidates) - - # doc_embeddings, word_embeddings = kw_model.extract_embeddings(docs=doc, candidates = candidates, - # keyphrase_ngram_range = (1, 100), - # stop_words = None, - # min_df = 1, - # ) - - return doc_embeddings, word_embeddings - - # highlight - def get_html(keywords, doc=doc): - - # ordering of lists (from longest keywords to shortest ones) - list3 = [keyword[0] for keyword in keywords] - list2 = [len(item.split()) for item in list3] - list1 = list(range(len(list2))) - list2, list1 = (list(t) for t in zip(*sorted(zip(list2, list1)))) - list1 = list1[::-1] - keywords_list = [list3[idx] for idx in list1] - - # converting doc to html format - html_doc = doc - for idx,keyword in enumerate(keywords_list): - if sum([True if keyword in item else False for item in keywords_list[:idx]]) == 0: - if keyword not in '' and keyword not in '': - html_doc = html_doc.replace(keyword, '' + keyword + '') - html_doc = '

        ' + html_doc + '

        ' - - return html_doc - - # if isinstance(kw_model_0.model.embedding_model, SentenceTransformer): - # num_tokens = kw_model_0.model.embedding_model.tokenize([doc])['input_ids'].shape[1] - - - ## main - - # empty doc - if len(doc) == 0: - - # get keywords and highlighted text - keywords, keywords_list_json = [("",0.)], {"":0.} - html_doc = '

        ' - label = "O texto do documento não pode estar vazio. Recomece, por favor." - - else: - - # detect lang - lang, score = get_lang(doc) - - # error in lang detect - if lang!="pt" or score<0.9: - - # get keywords and highlighted text - keywords, keywords_list_json = [("",0.)], {"":0.} - html_doc = '

        ' - label = "O APP não tem certeza de que o texto do documento está em português. Recomece com um texto em português, por favor." - - # text not empty and in the correct language - else: - - # get passages - passages= get_passages(doc) - num_passages = len(passages) - - # parameters - candidates_list = list() - passages_embeddings = dict() - candidates_embeddings_list = list() - - # get keywords, candidates and their embeddings - for i,passage in enumerate(passages): - keywords = get_kw(passage) - - candidates = [keyword for keyword,prob in keywords] - candidates_list.extend(candidates) - - passages_embeddings[i], candidates_embeddings = get_embeddings(passage, candidates) - candidates_embeddings_list.extend(candidates_embeddings) - - if len(candidates_list) > 0: - - # get unique candidates - candidates_unique_list = list(set(candidates_list)) - candidates_embeddings_unique_list = [candidates_embeddings_list[candidates_list.index(candidate)] for candidate in candidates_unique_list] - num_candidates_unique = len(candidates_unique_list) - - # get distances between the candidates and respectively all the passages - # Maximal Marginal Relevance (MMR) - from keybert._mmr import mmr - from keybert._maxsum import max_sum_distance - keywords_list = list() - for i in range(num_passages): - keywords_list.append(mmr(passages_embeddings[i], - candidates_embeddings_unique_list, - candidates_unique_list, - num_candidates_unique, - diversity = 0) - ) - - # get the average distances between the candidates and the passages (1 distance by candidate) - keywords_with_distance_list = dict() - for i in range(num_passages): - for keyword, prob in keywords_list[i]: - if i == 0: keywords_with_distance_list[keyword] = prob - else: keywords_with_distance_list[keyword] += prob - - # get top_n keywords with prob - keywords_list_sorted = {k: v for k, v in sorted(keywords_with_distance_list.items(), key=lambda item: item[1], reverse=True)} - keywords_with_distance_list_sorted = [(keyword, round(keywords_with_distance_list[keyword]/num_passages, 4)) for keyword in keywords_list_sorted] - keywords_with_distance_list_sorted = keywords_with_distance_list_sorted[:top_n] - - # main keyword - label = f"A palavra/frase chave com a maior similaridade é {keywords_with_distance_list_sorted[0][0]}." - - # json for printing - keywords_list_json = {keyword:prob for keyword, prob in keywords_with_distance_list_sorted} - - # get html doc - html_doc = get_html(keywords_with_distance_list_sorted) - - else: - - label, keywords_list_json, html_doc = "O APP não encontrou de palavras/frases chave no texto.", {"":0.}, "" - - return label, keywords_list_json, html_doc - -def get_kw_html_0(doc, top_n, diversity, vectorizer_option, model_id=0, pos_pattern=pos_pattern): - return get_kw_html(doc, top_n, diversity, vectorizer_option, model_id, pos_pattern) - - -title = "Extração das palavras/frases chave em português" - -description = '

        (17/12/2022) Forneça seu próprio texto em português e o APP vai fazer a extração das palavras/frases chave com as maiores similaridades ao texto.

        \ -

        Este aplicativo usa os modelos seguintes:\ -
        - SBERT multilingual,\ -
        - KeyBERT para calcular as similaridades entre as palavras/frases chave e o texto do documento.

        ' - -# examples -doc_original_0 = """ -As contas de pelo menos seis jornalistas norte-americanos que cobrem tecnologia foram suspensas pelo Twitter na noite desta quinta-feira (15). Os profissionais escrevem sobre o tema para diversos veículos de comunicação dos Estados Unidos, como os jornais 'The New York Times' e 'Washington Post'. - -A rede social afirmou apenas que suspende contas que violam as regras, mas não deu mais detalhes sobre os bloqueios. - -Assim que comprou o Twitter, Elon Musk disse defender a liberdade de expressão, e reativou, inclusive, a conta do ex-presidente Donald Trump, suspensa desde o ataque ao Capitólio, em 2021. - -Os jornalistas que tiveram as contas bloqueadas questionaram o compromisso de Musk com a liberdade de expressão. - -Eles encararam o bloqueio como uma retaliação de Musk às críticas que o bilionário vem recebendo pela forma como está conduzindo a rede social: com demissões em massa e o desmonte de áreas, como o conselho de confiança e segurança da empresa. - -Metade dos funcionários do Twitter foram demitidos desde que ele assumiu o comando da empresa e outros mil pediram demissão. - """ - -doc_original_1 = """ -O bilionário Elon Musk restabeleceu neste sábado (17) as contas suspensas de jornalistas no Twitter. A súbita suspensão, um dia antes, provocou reações de entidades da sociedade e setores políticos, além de ameaças de sanção por parte da União Europeia. - -O empresário, que comprou a rede social em outubro, acusou os repórteres de compartilhar informações privadas sobre seu paradeiro, sem apresentar provas. - -Ainda na sexta (16), o empresário publicou uma enquete na rede social perguntando se as contas deveriam ser reativadas "agora" ou "em sete dias": 58,7% votaram pela retomada imediata; 41,3%, em sete dias. - -"O povo falou. Contas envolvidas em doxing (revelação intencional e pública de informações pessoais sem autorização) com minha localização terão sua suspensão suspensa agora", tuitou o empresário neste sábado. - -O g1 verificou a conta de alguns dos jornalistas suspensos, que pertencem a funcionários de veículos como a CNN, o The New York Times, o The Washington Post, e as páginas estavam ativas. - -As exceções, até a última atualização desta reportagem, eram a conta @ElonJet, que rastreava o paradeiro do jato do próprio Elon Musk, e o perfil do criador, Jack Sweeney. - -Ao acessar ambas as páginas, é possível visualizar a seguinte mensagem: "O Twitter suspende as contas que violam as Regras do Twitter". - -A plataforma também suspendeu a conta da rede social Mastodon, concorrente do Twitter. - -O Twitter Spaces também foi tirado do ar na sexta, após Musk ter sido questionado ao vivo sobre essas últimas decisões, informou a agência de notícias Bloomberg – o bilionário disse que o recurso voltou ao ar na tarde do mesmo dia. - """ - -# parameters -num_results = 5 -diversity = 0.3 - -examples = [ - [doc_original_0.strip(), num_results, diversity, vectorizer_options[0]], - [doc_original_1.strip(), num_results, diversity, vectorizer_options[0]], - #[doc_original_2.strip(), num_results, diversity, vectorizer_options[0]], -] - -# parameters -num_results = 5 -diversity = 0.3 - -# interfaces -interface_0 = gr.Interface( - fn=get_kw_html_0, - inputs=[ - gr.Textbox(lines=15, label="Texto do documento"), - gr.Slider(1, 20, value=num_results, step=1., label=f"Número das palavras/frases chave a procurar (0: mínimo - 20: máximo - padrão: {num_results})"), - gr.Slider(0, 1, value=diversity, step=0.1, label=f"Diversidade entre as palavras/frases chave encontradas (0: mínimo - 1: máximo - padrão: {diversity})"), - gr.Radio(choices=vectorizer_options, value=vectorizer_options[0], label=f"Tipo de resultados (keyword: lista de palavras únicas - 3gramword: lista de 1 a 3 palavras - nounfrase: lista de frases nominais)"), - ], - outputs=[ - gr.HTML(label=f"{model_name[0]}"), - gr.Label(show_label=False), - gr.HTML(), - ] -) - -# app -demo = gr.Parallel( - interface_0, - title=title, - description=description, - examples=examples, - allow_flagging="never" - ) - -if __name__ == "__main__": - demo.launch() \ No newline at end of file diff --git a/spaces/politweet-sh/politweet/textclassifier/TextClassifier.py b/spaces/politweet-sh/politweet/textclassifier/TextClassifier.py deleted file mode 100644 index f21eee21e3d62b01600e32ae305662f6ed737e2c..0000000000000000000000000000000000000000 --- a/spaces/politweet-sh/politweet/textclassifier/TextClassifier.py +++ /dev/null @@ -1,617 +0,0 @@ -import os -import time -import warnings -import openai -import pandas as pd -from dotenv import find_dotenv, load_dotenv -from pandas.core.common import SettingWithCopyWarning -from twitterscraper import TwitterScraper -from sentence_transformers import SentenceTransformer -from scipy import spatial -from datetime import date, timedelta - -warnings.simplefilter(action="ignore", category=SettingWithCopyWarning) - -# Set one directory up into ROOT_PATH -ROOT_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) - -dotenv_path = find_dotenv() -load_dotenv(dotenv_path) -OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY") - - -class TextClassifier: - def __init__(self, model_name="text-davinci-002", from_date='2022-01-01', to_date=str(date.today()), - user_list=['jimmieakesson'], - num_tweets=20): - """ - Initializes the TextClassifier. - :param model_name: name of the model from openai. - :param from_date: string of the format 'YYYY-MM-DD'. - :param to_date: string of the format 'YYYY-MM-DD'. - :param num_tweets: integer value of the maximum number of tweets to be scraped. - """ - # Make sure user_name is not empty - assert user_list is not None, "user_name cannot be empty" - - self.ts = TwitterScraper.TwitterScraper(from_date, to_date, num_tweets) - self.model_name = model_name - self.from_date = from_date - self.to_date = to_date - self.num_tweets = num_tweets - self.user_name = user_list - # Assure that scrape_by_user actually gets num_tweets - # add timer in time-loop and stop after 10 seconds - # self.df = self.ts.scrape_by_user(user_name) - self.df = self.ts.scrape_by_several_users(user_list) - - # Check if 'id' is in self.df - if 'id' in self.df.columns: - # Make id as type int64 - self.df.loc[:, 'id'] = self.df.id.copy().apply(lambda x: int(x)) - else: - # If not do nothing - pass - openai.api_key = OPENAI_API_KEY - - def classify_all(self, tweet: str): - """ - Classifies the topic, subtopic, sentiment and target of a user's tweets. - """ - import os - import openai - - valid_tweet = len(tweet.split()) > 4 - if valid_tweet: - openai.api_key = os.getenv("OPENAI_API_KEY") - promptstring = "Decide a Tweet's political TOPIC and SUBTOPIC, without classifying it as 'politics'. Also " \ - "decide whether a political Tweet's " \ - "SENTIMENT is " \ - "positive, " \ - "negative or neutral. Also give the TARGET of the sentiment. \nGive the answer in the form ' (" \ - "TOPIC, SUBTOPIC, SENTIMENT, TARGET)'\n\nTweet: {} \nAnswer: ".format(tweet) - response = openai.Completion.create( - model="text-davinci-002", - prompt=promptstring, - temperature=0, - max_tokens=30, - top_p=1, - frequency_penalty=0.5, - presence_penalty=0 - ) - classification_unclean = response.choices[0]['text'] - classification_clean = self.cleanup_topic_results(classification_unclean) - if classification_clean.lower() == "(topic, subtopic, sentiment, target)": - classification_clean = "(none, none, none, none)" - else: - classification_clean = "(none, none, none, none)" - return classification_clean.lower() - - def classify_all_list(self): - """ - Classifies the topics of a user's tweets. - """ - df_topic = self.df.copy() - df_topic['class_tuple'] = df_topic['tweet'].apply(self.classify_all) - self.df = df_topic - self.split_tuple_into_columns() - return self.df - - @staticmethod - def cleanup_topic_results(text): - """ - Cleanup response from GPT-3 to a string matching the format: "(main_topic, sub_topic, sentiment, target)" - :param text: GPT-3 response - :return: A string on the format: "(main_topic, sub_topic, sentiment, target)" - """ - new_item = text.strip() - new_item = new_item.replace("\n", "") - new_item = new_item.replace(" ", "") - item_control = new_item.replace("(", "") - item_control = item_control.replace(")", "") - item_control = item_control.split(",") - if ' ' or '' in item_control: - item_control = [s.strip() if not (s == ' ' or s == '') else 'none' for s in - item_control] # Replace empty classifications with 'none' - diff = 4 - len(item_control) - if diff < 0: # If response gave more than four predictions - cutout = item_control[diff - 1:] # Cut out the superflous predictions - item_control = item_control[:diff - 1] # Save the rest - new_s = "" - for i in range(len(cutout)): - new_s += cutout[i] - if i < -diff: - new_s += " and " # Merge superflous predictions. E.g. target = 's', 'mp', 'v' -> target = 's and mp and v' - item_control.append(new_s) - elif diff > 0: # If response gave less than four predictions - for i in range(diff): - item_control.append("none") # Fill out tuple with nones - new_item = str(tuple(item_control)) - new_item = new_item.replace("'", "") - return new_item - - def df_to_csv(self, filename="{}/data/twitterdata.csv".format(ROOT_PATH)): - """ - Writes pandas df to csv file. If it already exists, it appends. If not, it creates. It also removes duplicates. - :param filename: - :return: - """ - if not os.path.exists(filename): - self.df.to_csv(filename, index=False) - else: - self.df.to_csv(filename, mode='a', header=False, index=False) - - self.remove_duplicates_from_csv(filename) - - @staticmethod - def remove_duplicates_from_csv(filename="{}/data/twitterdata.csv".format(ROOT_PATH)): - """ - Removes duplicates from csv file. - :param filename: filename of csv file - :return: None - """ - with open(filename, 'r', encoding="utf8") as f: - lines = f.readlines() - with open(filename, 'w', encoding="utf8") as f: - for line in lines: - if line not in lines[lines.index(line) + 1:]: - f.write(line) - - def remove_already_classified_tweets(self, filename="{}/data/twitterdata.csv".format(ROOT_PATH)): - """ - Removes tweets that have already been classified. - :param filename: filename of csv file - :return: None - """ - df = self.df - df = df[df['sentiment'].isnull()] - self.df = df - self.df_to_csv(filename) - - def split_tuple_into_columns(self): - """ - Splits the topics (topic, subtopic, sentiment, target) into columns. - :return: None - """ - df_topic = self.df.copy() - df_topic['topics_temp'] = df_topic['class_tuple'].apply(lambda x: tuple(x[1:-1].split(","))) - df_topic_split = pd.DataFrame(df_topic['topics_temp'].tolist(), - columns=['main_topic', 'sub_topic', 'sentiment', 'target']) - # Manually add columns to self.df - self.df['main_topic'] = df_topic_split['main_topic'].tolist() - self.df['main_topic'] = self.df['main_topic'].replace(["n/a", "", " "], "none", regex=True) - self.df['main_topic'] = self.df['main_topic'].apply( - lambda x: x.strip() if not (len(x) == 1 and x == "-") else "none") - - self.df['sub_topic'] = df_topic_split['sub_topic'].tolist() - # In a few of the outputs from GPT-3 the sub_topic = "sentiment" - self.df['sub_topic'] = self.df['sub_topic'].replace(["n/a", "sentiment", "", " "], "none", regex=True) - self.df['sub_topic'] = self.df['sub_topic'].apply( - lambda x: x.strip() if not (len(x) == 1 and x == "-") else "none") - - self.df['sentiment'] = df_topic_split['sentiment'].tolist() - self.df['sentiment'] = self.df['sentiment'].replace(["n/a", "sentiment", "", " "], "none", regex=True) - self.df['sentiment'] = self.df['sentiment'].apply( - lambda x: x.strip() if not (len(x) == 1 and x == "-") else "none") - - self.df['target'] = df_topic_split['target'].tolist() - self.df['target'] = self.df['target'].replace(["n/a", "", " "], "none", regex=True) - self.df['target'] = self.df['target'].apply(lambda x: x.strip() if not (len(x) == 1 and x == "-") else "none") - - self.df.fillna('none', inplace=True) - - def get_dataframe(self): - """ - Returns the dataframe. - :return: dataframe - """ - return self.df - - def __repr__(self): - """ - Gives a string that describes which user is classified - :return: - """ - return "Classifier for user: " + self.user_name + " with model: " + self.model_name + "." - - def get_database(self, filename="{}/data/twitterdata.csv".format(ROOT_PATH)): - """ - Returns the database containing all dataframes. - :param filename: filename of csv file - :return: - """ - db = pd.read_csv(filename) - return db - - def cleanup_list(self, uncleaned_list): - """ - Cleans up faulty predictions. - :param uncleaned_list: the list to be cleaned - :return: cleaned list - """ - uncleaned_list = [s if not isinstance(s, float) else "none" for s in uncleaned_list] - uncleaned_list = [s if not len(s.split()) > 5 else "none" for s in uncleaned_list] - uncleaned_list = [s if not "swedish" in s else s.replace("swedish", " ") for s in uncleaned_list] - uncleaned_list = [s if not "politics" in s else s.replace("politics", "none") for s in uncleaned_list] - uncleaned_list = [s.replace(" ", " ") for s in uncleaned_list] - cleaned_list = [s.strip() for s in uncleaned_list] - return cleaned_list - - def merge_lists(self, main_topic_list, sub_topic_list): - """ - Merges the topic lists. If either topic is a faulty classification, only the non-faulty topic wil be used. - If both are faulty, the merged topic will be labeled as faulty (ERROR_496). - :param main_topic_list: A list containing main topics - :param sub_topic_list: A list containing sub topics - :return: A list containing string items on the form "main_topic and sub_topic" - """ - new_list = [] - main_topic_list = self.clean_party_names(main_topic_list) - sub_topic_list = self.clean_party_names(sub_topic_list) - for i in range(len(main_topic_list)): - if main_topic_list[i].lower() == "none" and sub_topic_list[ - i].lower() == "none": # If the predictions are faulty - new_list.append("ERROR_496") # Label as ERROR_496 (faulty prediction) - elif main_topic_list[i].lower() == "none": - new_list.append(sub_topic_list[i]) - elif sub_topic_list[i].lower() == "none": - new_list.append(main_topic_list[i]) - else: - new_list.append(main_topic_list[i] + " and " + sub_topic_list[i]) - return new_list - - def file_to_mat(self, classification_type): - """ - Converts a synonym textfile to a matrix in which the rows contain a general topic/target and its related words. - :param classification_type: The type of classification: topic or target - :return: a matrix in which the first element of each row is a general topic/target, and the rest are words related to - the topic - """ - filename = "{}/data/".format(ROOT_PATH) - filename += classification_type + "_synonyms.txt" - with open(filename, encoding='utf-8') as f: - lines = f.read() - lines = lines.split("\n") - - topic_list = [] - temp_list = [] - - for topic in lines: - if not topic.endswith("####"): - temp_list.append(topic) - else: - temp_list.append(topic[:-4]) # Remove the marker (####) - topic_list.append(temp_list) - temp_list = [] - - return topic_list - - def mat_to_list(self, mat): - """ - Converts a matrix from file_to_mat() into one list containing all topics and synonyms, and one list with - mappings for the synonyms. - :param mat: a matrix from file_to_mat() - :return: - """ - full_list = [] - mapped_synonyms = [] - for syns in mat: - for topic in syns: - full_list.append(topic) - mapped_synonyms.append(syns[0]) - return full_list, mapped_synonyms - - def clean_party_names(self, old_topic_list): - """ - Encodes all party names to sentences that will yield a high cosine similarity value when merged with another - topic, without taking the actual party name into account. These sentences have deliberately been composed such - that they pose a low risk of being close (in the sentence embedding-space) to any possible merged topic or - target that may be encountered. - :param old_topic_list: list of topics - :return: list of encoded topics - """ - # Problem 1: When a party name is encountered, we want to bias the merging towards that party since the - # occurrence of a very general main topic (as in the example below) plus a party name as subtopic is frequent. - # Example: main_topic = "politics", sub_topic = "sweden democrats" -> - # combined_topics = "politics and sweden democrats" - # Problem 2: The party names themselves are biased towards certain topics/targets and lead to faulty merges. - # Example: Variations of words such as "Sweden" as the target/topic will be biased towards getting merged with - # "Sweden Democrats". - # Solution: Encode party names with sentences that are HIGHLY unlikely to be close to anything in the embedding - # space and thus enforcing a strong bias in the cosine similarity computation towards the party if encountered. - - party_names = {} - party_names["m"] = "parrot computer is swimming as screen time" - party_names["moderaterna"] = "parrot computer is swimming as screen time" - party_names["moderates"] = "parrot computer is swimming as screen time" - party_names["the moderates"] = "parrot computer is swimming as screen time" - party_names["moderate party"] = "parrot computer is swimming as screen time" - party_names["the moderate party"] = "parrot computer is swimming as screen time" - party_names["the moderaterna party"] = "parrot computer is swimming as screen time" - - party_names["sd"] = "keyboard can hire the yellow elephant in cosmos" - party_names["sverigedemokraterna"] = "keyboard can hire the yellow elephant in cosmos" - party_names["sweden democrats"] = "keyboard can hire the yellow elephant in cosmos" - party_names["the sweden democrats"] = "keyboard can hire the yellow elephant in cosmos" - party_names["the swedish democrats"] = "keyboard can hire the yellow elephant in cosmos" - party_names["swedish democrats"] = "keyboard can hire the yellow elephant in cosmos" - party_names["@jimmieakesson"] = "keyboard can hire the yellow elephant in cosmos" - - party_names["l"] = "red weather jokes with music and the mathematician" - party_names["liberalerna"] = "red weather jokes with music and the mathematician" - party_names["liberals"] = "red weather jokes with music and the mathematician" - party_names["the liberals"] = "red weather jokes with music and the mathematician" - party_names["the liberal party"] = "red weather jokes with music and the mathematician" - party_names["liberal people's party"] = "red weather jokes with music and the mathematician" - party_names["@johanpehrson"] = "red weather jokes with music and the mathematician" - - party_names["mp"] = "ice piano flies with pencil as direction" - party_names["miljöpartiet"] = "ice piano flies with pencil as direction" - party_names["de gröna"] = "ice piano flies with pencil as direction" - party_names["green party"] = "ice piano flies with pencil as direction" - party_names["the green party"] = "ice piano flies with pencil as direction" - party_names["miljopartiet"] = "ice piano flies with pencil as direction" - party_names["@bolund"] = "ice piano flies with pencil as direction" - party_names["@martastenevi"] = "ice piano flies with pencil as direction" - - party_names["s"] = "lamp of fire walks bird gladly tomorrow" - party_names["socialdemokraterna"] = "lamp of fire walks bird gladly tomorrow" - party_names["social democratic party"] = "lamp of fire walks bird gladly tomorrow" - party_names["the social democratic party"] = "lamp of fire walks bird gladly tomorrow" - party_names["social democrats"] = "lamp of fire walks bird gladly tomorrow" - party_names["the social democrats"] = "lamp of fire walks bird gladly tomorrow" - party_names["sosse"] = "lamp of fire walks bird gladly tomorrow" - party_names["sossen"] = "lamp of fire walks bird gladly tomorrow" - party_names["sossar"] = "lamp of fire walks bird gladly tomorrow" - party_names["sossarna"] = "lamp of fire walks bird gladly tomorrow" - party_names["sossarnas"] = "lamp of fire walks bird gladly tomorrow" - party_names["swedish social democrats"] = "lamp of fire walks bird gladly tomorrow" - party_names["@swedishpm"] = "lamp of fire walks bird gladly tomorrow" - - party_names["v"] = "rooftop cats play physics with cardboard fire" - party_names["vänsterpartiet"] = "rooftop cats play physics with cardboard fire" - party_names["left party"] = "rooftop cats play physics with cardboard fire" - party_names["the left party"] = "rooftop cats play physics with cardboard fire" - party_names["@dadgostarnooshi"] = "rooftop cats play physics with cardboard fire" - - party_names["c"] = "differential donuts program sunny waters" - party_names["centerpartiet"] = "differential donuts program sunny waters" - party_names["center party"] = "differential donuts program sunny waters" - party_names["centre party"] = "differential donuts program sunny waters" - party_names["the center party"] = "differential donuts program sunny waters" - party_names["@annieloof"] = "differential donuts program sunny waters" - - party_names["kd"] = "cauchy-riemann met sunglasses after rolling yellow" - party_names["kristdemokraterna"] = "cauchy-riemann met sunglasses after rolling yellow" - party_names["christian democrats"] = "cauchy-riemann met sunglasses after rolling yellow" - party_names["the christian democrats"] = "cauchy-riemann met sunglasses after rolling yellow" - party_names["@buschebba"] = "cauchy-riemann met sunglasses after rolling yellow" - - for i, topic in enumerate(old_topic_list): - topic = topic.lower() - topic = topic.replace(" ", " ") - topic = topic.strip() - if topic in party_names: - old_topic_list[i] = party_names.get(topic) - - return old_topic_list - - def reset_party_names(self, old_topic_list): - """ - Decodes the encoded party names. - :param old_topic_list: list of topics - :return: list of encoded topics - """ - party_names = {} - party_names["m"] = "parrot computer is swimming as screen time" - party_names["sd"] = "keyboard can hire the yellow elephant in cosmos" - party_names["l"] = "red weather jokes with music and the mathematician" - party_names["mp"] = "ice piano flies with pencil as direction" - party_names["s"] = "lamp of fire walks bird gladly tomorrow" - party_names["v"] = "rooftop cats play physics with cardboard fire" - party_names["c"] = "differential donuts program sunny waters" - party_names["kd"] = "cauchy-riemann met sunglasses after rolling yellow" - inverted_dict = {} - # Invert dictionary - for k, v in party_names.items(): - if v not in inverted_dict: - inverted_dict[v] = k - # Update values in old_topic_list - for i, topic in enumerate(old_topic_list): - if topic in inverted_dict.keys(): - old_topic_list[i] = inverted_dict.get(topic) - - return old_topic_list - - def merge_classifications(self, old_list, classification_type): - """ - Merges topics/targets from GPT-3 according to a list of predefined topics/targets. - :param old_list: list of the topics/targets to be merged - :param classification_type: type of classifications: topic or target - :return: list of new topics/targets - """ - # Get the tuple of lists containing all synonyms and general topics/targets - tup_list = self.mat_to_list(self.file_to_mat(classification_type)) - # Save list of synonyms - synonym_list = tup_list[0] - # Save list of mappings between synonym and general topic/target - synonym_mappings = tup_list[1] - # Load embedding model-names - model_list = ['sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2', 'all-MiniLM-L6-v2'] - result_dict = {} - # Encode party names - old_list = self.clean_party_names(old_list) - for model_name in model_list: - model = SentenceTransformer(model_name) - # Encode the topics/targets with the sentence transformer model - old_list_embeddings = model.encode(old_list, batch_size=64, show_progress_bar=True, - convert_to_tensor=True) - # Encode the synonyms with the sentence transformer model - synonym_list_embeddings = model.encode(synonym_list, batch_size=64, show_progress_bar=True, - convert_to_tensor=True) - for i, embedded_classification in enumerate(old_list_embeddings): - result_list = [] - for embedded_synonyms in synonym_list_embeddings: - # Compute the cosine similarity between every classification and synonym - result = 1 - spatial.distance.cosine(embedded_classification, embedded_synonyms) - result_list.append(result) - max_value = max(result_list) - max_index = result_list.index(max_value) - old_classification = old_list[i] - # Extract the general topic/target - new_classification = synonym_mappings[max_index] - # Save the topic/target that yielded the highest cosine similarity value - if old_classification not in result_dict: - result_dict[old_classification] = [(new_classification, max_value, synonym_list[max_index])] - # When we have found the best topics/targets after using the first transformer model - else: - # Append the results from the next model - result_dict[old_classification].append((new_classification, max_value, synonym_list[max_index])) - - new_dict = {} - # Time to replace the old values with the new ones - for old_values in result_dict: - tup_list = result_dict[old_values] - max_tup = max(tup_list, key=lambda item: item[1]) - if classification_type == "topic": - limit = 0.4 - else: - limit = 0.75 - # Discard classification if the old topic/target is not similar to anything in our synonym lists - if max_tup[1] < limit: - max_tup = ("ERROR_9000", "{:.2f}".format(round(max_tup[1], 2)), "none") - else: - max_tup = (max_tup[0], "{:.2f}".format(round(max_tup[1], 2)), max_tup[2]) - new_classification = max_tup - if old_values not in new_dict: - new_dict[old_values] = new_classification - new_list = [] - for old_value in old_list: - new_list.append(new_dict[old_value]) - return new_list - - def merge_all(self): - """ - Merges main+subtopics, targets, and updates the dataframe. - :param df: - :return: - """ - df_topics = self.df.copy() - - sub_topics = df_topics['sub_topic'] - sub_topics = sub_topics.tolist() - sub_topics = self.cleanup_list(sub_topics) - - main_topics = df_topics['main_topic'] - main_topics = main_topics.tolist() - main_topics = self.cleanup_list(main_topics) - - merged_topic_list = self.merge_lists(main_topics, sub_topics) - - targets = df_topics['target'] - targets = targets.tolist() - targets = self.cleanup_list(targets) - - merged_topics = self.merge_classifications(merged_topic_list, "topic") - merged_targets = self.merge_classifications(targets, "target") - - print("The following merges were made: ") - for i, top in enumerate(merged_topic_list): - print("TOPICS: ", top, " -> ", merged_topics[i]) - - t_list = [] - for i in range(len(merged_topics)): - t_list.append(tuple(merged_topics[i]) + tuple(merged_targets[i])) - merged_tuples = t_list - df_topics['merged_tuple'] = merged_tuples - - df = self.split_merged_tuple_into_columns(df_topics) - print("Merging finished...") - self.df = df - - def split_merged_tuple_into_columns(self, df): - """ - Splits the merged tuple (merged topic, merged target) into columns. - :return: None - """ - df_topic = df.copy() - df_topic_split = pd.DataFrame(df_topic['merged_tuple'].tolist(), - columns=['merged_topic', 'cos_sim_topic', 'synonym_topic', 'merged_target', - 'cos_sim_target', 'synonym_target']) - self.df['merged_tuple'] = df_topic['merged_tuple'].tolist() - # Manually add columns to self.df - self.df['merged_topic'] = df_topic_split['merged_topic'].tolist() - self.df['cos_sim_topic'] = df_topic_split['cos_sim_topic'].tolist() - self.df['synonym_topic'] = self.reset_party_names(df_topic_split['synonym_topic'].tolist()) - self.df['merged_target'] = df_topic_split['merged_target'].tolist() - self.df['cos_sim_target'] = df_topic_split['cos_sim_target'].tolist() - self.df['synonym_target'] = self.reset_party_names(df_topic_split['synonym_target'].tolist()) - - return self.df - - def run_main_pipeline(self, filename="{}/data/twitterdata.csv".format(ROOT_PATH)): - """ - Classifies the topics/sentiments of a user's tweets. - #We presume that all tweets inside the twitterdata.csv file are already classified. - :return: None - """ - # Check if file exists, if not, create it - if os.path.exists(filename): - # Fetch tweets from csv file - already_classified_df = pd.read_csv(filename, on_bad_lines='skip') - print("Already classified tweets: {}".format(already_classified_df.shape[0])) - # Create a temporary df where values from already_classified_df that are not it self.df are stored - temp_df = already_classified_df[already_classified_df['id'].isin(self.df['id'])] - # Remove rows from self.df that are not in already_classified_df - self.df = self.df[~self.df['id'].isin(already_classified_df['id'])] - # Only classify non-empty rows - if self.df.shape[0] > 0: - time.sleep(10) - print("Classifying topic, subtopic, sentiment and target of {} tweets...".format(self.df.shape[0])) - self.df = self.classify_all_list() - self.df = self.df.replace({'': 'none'}, regex=True) - self.df = self.df.replace({' ': 'none'}, regex=True) - print("Merging topics...") - self.merge_all() - print("Writing to csv...") - self.df_to_csv(filename) - # Concatenate temp_df and self.df - self.df = pd.concat([temp_df, self.df], ignore_index=True) - print("Appended {}.".format(filename)) - return None - else: - self.df = pd.concat([temp_df, self.df], ignore_index=True) - print("No new tweets to classify.") - return None - else: - print("No csv file found. Continuing without removing already classified tweets.") - print("Classifying topic, subtopic, sentiment and target of {} tweets...".format(self.df.shape[0])) - self.df = self.classify_all_list() - self.df = self.df.replace({'': 'none'}, regex=True) - self.df = self.df.replace({' ': 'none'}, regex=True) - print("Merging topics...") - self.merge_all() - print("Writing to csv file...") - self.df_to_csv(filename) - print("Created {}.".format(filename)) - return None - - -if __name__ == "__main__": - # $6.39 @ 3431 tweets - # $18.00 @ 4608 tweets - # $11.61 to classify 1177 tweets ~ $0.01 / tweet - - # This code snippet allows for scraping and classifying by simply specifying a start and end date. - USER_LIST = ['jimmieakesson', 'BuschEbba', 'annieloof', 'JohanPehrson', 'bolund', 'martastenevi', 'SwedishPM', - 'dadgostarnooshi'] - start_date = date(2022, 8, 4) - end_date = date(2022, 8, 4) - delta = timedelta(days=1) - while start_date <= end_date: - from_date = start_date.strftime("%Y-%m-%d") - start_date += delta - to_date = start_date.strftime("%Y-%m-%d") - print("curr_date: ", from_date) - tc = TextClassifier(from_date=from_date, to_date=to_date, user_list=USER_LIST, num_tweets=6000) - tc.run_main_pipeline() diff --git a/spaces/power2/JoJoGan-powerhow2/e4e/models/stylegan2/op/upfirdn2d.py b/spaces/power2/JoJoGan-powerhow2/e4e/models/stylegan2/op/upfirdn2d.py deleted file mode 100644 index 7bc5a1e331c2bbb1893ac748cfd0f144ff0651b4..0000000000000000000000000000000000000000 --- a/spaces/power2/JoJoGan-powerhow2/e4e/models/stylegan2/op/upfirdn2d.py +++ /dev/null @@ -1,184 +0,0 @@ -import os - -import torch -from torch.autograd import Function -from torch.utils.cpp_extension import load - -module_path = os.path.dirname(__file__) -upfirdn2d_op = load( - 'upfirdn2d', - sources=[ - os.path.join(module_path, 'upfirdn2d.cpp'), - os.path.join(module_path, 'upfirdn2d_kernel.cu'), - ], -) - - -class UpFirDn2dBackward(Function): - @staticmethod - def forward( - ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size - ): - up_x, up_y = up - down_x, down_y = down - g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad - - grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) - - grad_input = upfirdn2d_op.upfirdn2d( - grad_output, - grad_kernel, - down_x, - down_y, - up_x, - up_y, - g_pad_x0, - g_pad_x1, - g_pad_y0, - g_pad_y1, - ) - grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) - - ctx.save_for_backward(kernel) - - pad_x0, pad_x1, pad_y0, pad_y1 = pad - - ctx.up_x = up_x - ctx.up_y = up_y - ctx.down_x = down_x - ctx.down_y = down_y - ctx.pad_x0 = pad_x0 - ctx.pad_x1 = pad_x1 - ctx.pad_y0 = pad_y0 - ctx.pad_y1 = pad_y1 - ctx.in_size = in_size - ctx.out_size = out_size - - return grad_input - - @staticmethod - def backward(ctx, gradgrad_input): - kernel, = ctx.saved_tensors - - gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx.in_size[3], 1) - - gradgrad_out = upfirdn2d_op.upfirdn2d( - gradgrad_input, - kernel, - ctx.up_x, - ctx.up_y, - ctx.down_x, - ctx.down_y, - ctx.pad_x0, - ctx.pad_x1, - ctx.pad_y0, - ctx.pad_y1, - ) - # gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.out_size[0], ctx.out_size[1], ctx.in_size[3]) - gradgrad_out = gradgrad_out.view( - ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1] - ) - - return gradgrad_out, None, None, None, None, None, None, None, None - - -class UpFirDn2d(Function): - @staticmethod - def forward(ctx, input, kernel, up, down, pad): - up_x, up_y = up - down_x, down_y = down - pad_x0, pad_x1, pad_y0, pad_y1 = pad - - kernel_h, kernel_w = kernel.shape - batch, channel, in_h, in_w = input.shape - ctx.in_size = input.shape - - input = input.reshape(-1, in_h, in_w, 1) - - ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) - - out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 - out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 - ctx.out_size = (out_h, out_w) - - ctx.up = (up_x, up_y) - ctx.down = (down_x, down_y) - ctx.pad = (pad_x0, pad_x1, pad_y0, pad_y1) - - g_pad_x0 = kernel_w - pad_x0 - 1 - g_pad_y0 = kernel_h - pad_y0 - 1 - g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 - g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 - - ctx.g_pad = (g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) - - out = upfirdn2d_op.upfirdn2d( - input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1 - ) - # out = out.view(major, out_h, out_w, minor) - out = out.view(-1, channel, out_h, out_w) - - return out - - @staticmethod - def backward(ctx, grad_output): - kernel, grad_kernel = ctx.saved_tensors - - grad_input = UpFirDn2dBackward.apply( - grad_output, - kernel, - grad_kernel, - ctx.up, - ctx.down, - ctx.pad, - ctx.g_pad, - ctx.in_size, - ctx.out_size, - ) - - return grad_input, None, None, None, None - - -def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): - out = UpFirDn2d.apply( - input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1]) - ) - - return out - - -def upfirdn2d_native( - input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1 -): - _, in_h, in_w, minor = input.shape - kernel_h, kernel_w = kernel.shape - - out = input.view(-1, in_h, 1, in_w, 1, minor) - out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) - out = out.view(-1, in_h * up_y, in_w * up_x, minor) - - out = F.pad( - out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)] - ) - out = out[ - :, - max(-pad_y0, 0): out.shape[1] - max(-pad_y1, 0), - max(-pad_x0, 0): out.shape[2] - max(-pad_x1, 0), - :, - ] - - out = out.permute(0, 3, 1, 2) - out = out.reshape( - [-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1] - ) - w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) - out = F.conv2d(out, w) - out = out.reshape( - -1, - minor, - in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, - in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1, - ) - out = out.permute(0, 2, 3, 1) - - return out[:, ::down_y, ::down_x, :] diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fastapi/security/utils.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fastapi/security/utils.py deleted file mode 100644 index fa7a450b74e813e66fd6e9a140d48c29215503bb..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fastapi/security/utils.py +++ /dev/null @@ -1,10 +0,0 @@ -from typing import Optional, Tuple - - -def get_authorization_scheme_param( - authorization_header_value: Optional[str], -) -> Tuple[str, str]: - if not authorization_header_value: - return "", "" - scheme, _, param = authorization_header_value.partition(" ") - return scheme, param diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fsspec/spec.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fsspec/spec.py deleted file mode 100644 index 4ab3b7ee3d8e9440715c5d1746dca8fd0304aa3b..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fsspec/spec.py +++ /dev/null @@ -1,1975 +0,0 @@ -from __future__ import annotations - -import io -import logging -import os -import threading -import warnings -import weakref -from errno import ESPIPE -from glob import has_magic -from hashlib import sha256 -from typing import ClassVar - -from .callbacks import _DEFAULT_CALLBACK -from .config import apply_config, conf -from .dircache import DirCache -from .transaction import Transaction -from .utils import ( - _unstrip_protocol, - isfilelike, - other_paths, - read_block, - stringify_path, - tokenize, -) - -logger = logging.getLogger("fsspec") - - -def make_instance(cls, args, kwargs): - return cls(*args, **kwargs) - - -class _Cached(type): - """ - Metaclass for caching file system instances. - - Notes - ----- - Instances are cached according to - - * The values of the class attributes listed in `_extra_tokenize_attributes` - * The arguments passed to ``__init__``. - - This creates an additional reference to the filesystem, which prevents the - filesystem from being garbage collected when all *user* references go away. - A call to the :meth:`AbstractFileSystem.clear_instance_cache` must *also* - be made for a filesystem instance to be garbage collected. - """ - - def __init__(cls, *args, **kwargs): - super().__init__(*args, **kwargs) - # Note: we intentionally create a reference here, to avoid garbage - # collecting instances when all other references are gone. To really - # delete a FileSystem, the cache must be cleared. - if conf.get("weakref_instance_cache"): # pragma: no cover - # debug option for analysing fork/spawn conditions - cls._cache = weakref.WeakValueDictionary() - else: - cls._cache = {} - cls._pid = os.getpid() - - def __call__(cls, *args, **kwargs): - kwargs = apply_config(cls, kwargs) - extra_tokens = tuple( - getattr(cls, attr, None) for attr in cls._extra_tokenize_attributes - ) - token = tokenize( - cls, cls._pid, threading.get_ident(), *args, *extra_tokens, **kwargs - ) - skip = kwargs.pop("skip_instance_cache", False) - if os.getpid() != cls._pid: - cls._cache.clear() - cls._pid = os.getpid() - if not skip and cls.cachable and token in cls._cache: - cls._latest = token - return cls._cache[token] - else: - obj = super().__call__(*args, **kwargs) - # Setting _fs_token here causes some static linters to complain. - obj._fs_token_ = token - obj.storage_args = args - obj.storage_options = kwargs - if obj.async_impl and obj.mirror_sync_methods: - from .asyn import mirror_sync_methods - - mirror_sync_methods(obj) - - if cls.cachable and not skip: - cls._latest = token - cls._cache[token] = obj - return obj - - -class AbstractFileSystem(metaclass=_Cached): - """ - An abstract super-class for pythonic file-systems - - Implementations are expected to be compatible with or, better, subclass - from here. - """ - - cachable = True # this class can be cached, instances reused - _cached = False - blocksize = 2**22 - sep = "/" - protocol: ClassVar[str | tuple[str, ...]] = "abstract" - _latest = None - async_impl = False - mirror_sync_methods = False - root_marker = "" # For some FSs, may require leading '/' or other character - - #: Extra *class attributes* that should be considered when hashing. - _extra_tokenize_attributes = () - - def __init__(self, *args, **storage_options): - """Create and configure file-system instance - - Instances may be cachable, so if similar enough arguments are seen - a new instance is not required. The token attribute exists to allow - implementations to cache instances if they wish. - - A reasonable default should be provided if there are no arguments. - - Subclasses should call this method. - - Parameters - ---------- - use_listings_cache, listings_expiry_time, max_paths: - passed to ``DirCache``, if the implementation supports - directory listing caching. Pass use_listings_cache=False - to disable such caching. - skip_instance_cache: bool - If this is a cachable implementation, pass True here to force - creating a new instance even if a matching instance exists, and prevent - storing this instance. - asynchronous: bool - loop: asyncio-compatible IOLoop or None - """ - if self._cached: - # reusing instance, don't change - return - self._cached = True - self._intrans = False - self._transaction = None - self._invalidated_caches_in_transaction = [] - self.dircache = DirCache(**storage_options) - - if storage_options.pop("add_docs", None): - warnings.warn("add_docs is no longer supported.", FutureWarning) - - if storage_options.pop("add_aliases", None): - warnings.warn("add_aliases has been removed.", FutureWarning) - # This is set in _Cached - self._fs_token_ = None - - @property - def fsid(self): - """Persistent filesystem id that can be used to compare filesystems - across sessions. - """ - raise NotImplementedError - - @property - def _fs_token(self): - return self._fs_token_ - - def __dask_tokenize__(self): - return self._fs_token - - def __hash__(self): - return int(self._fs_token, 16) - - def __eq__(self, other): - return isinstance(other, type(self)) and self._fs_token == other._fs_token - - def __reduce__(self): - return make_instance, (type(self), self.storage_args, self.storage_options) - - @classmethod - def _strip_protocol(cls, path): - """Turn path from fully-qualified to file-system-specific - - May require FS-specific handling, e.g., for relative paths or links. - """ - if isinstance(path, list): - return [cls._strip_protocol(p) for p in path] - path = stringify_path(path) - protos = (cls.protocol,) if isinstance(cls.protocol, str) else cls.protocol - for protocol in protos: - if path.startswith(protocol + "://"): - path = path[len(protocol) + 3 :] - elif path.startswith(protocol + "::"): - path = path[len(protocol) + 2 :] - path = path.rstrip("/") - # use of root_marker to make minimum required path, e.g., "/" - return path or cls.root_marker - - def unstrip_protocol(self, name: str) -> str: - """Format FS-specific path to generic, including protocol""" - protos = (self.protocol,) if isinstance(self.protocol, str) else self.protocol - for protocol in protos: - if name.startswith(f"{protocol}://"): - return name - return f"{protos[0]}://{name}" - - @staticmethod - def _get_kwargs_from_urls(path): - """If kwargs can be encoded in the paths, extract them here - - This should happen before instantiation of the class; incoming paths - then should be amended to strip the options in methods. - - Examples may look like an sftp path "sftp://user@host:/my/path", where - the user and host should become kwargs and later get stripped. - """ - # by default, nothing happens - return {} - - @classmethod - def current(cls): - """Return the most recently instantiated FileSystem - - If no instance has been created, then create one with defaults - """ - if cls._latest in cls._cache: - return cls._cache[cls._latest] - return cls() - - @property - def transaction(self): - """A context within which files are committed together upon exit - - Requires the file class to implement `.commit()` and `.discard()` - for the normal and exception cases. - """ - if self._transaction is None: - self._transaction = Transaction(self) - return self._transaction - - def start_transaction(self): - """Begin write transaction for deferring files, non-context version""" - self._intrans = True - self._transaction = Transaction(self) - return self.transaction - - def end_transaction(self): - """Finish write transaction, non-context version""" - self.transaction.complete() - self._transaction = None - # The invalid cache must be cleared after the transcation is completed. - for path in self._invalidated_caches_in_transaction: - self.invalidate_cache(path) - self._invalidated_caches_in_transaction.clear() - - def invalidate_cache(self, path=None): - """ - Discard any cached directory information - - Parameters - ---------- - path: string or None - If None, clear all listings cached else listings at or under given - path. - """ - # Not necessary to implement invalidation mechanism, may have no cache. - # But if have, you should call this method of parent class from your - # subclass to ensure expiring caches after transacations correctly. - # See the implementation of FTPFileSystem in ftp.py - if self._intrans: - self._invalidated_caches_in_transaction.append(path) - - def mkdir(self, path, create_parents=True, **kwargs): - """ - Create directory entry at path - - For systems that don't have true directories, may create an for - this instance only and not touch the real filesystem - - Parameters - ---------- - path: str - location - create_parents: bool - if True, this is equivalent to ``makedirs`` - kwargs: - may be permissions, etc. - """ - pass # not necessary to implement, may not have directories - - def makedirs(self, path, exist_ok=False): - """Recursively make directories - - Creates directory at path and any intervening required directories. - Raises exception if, for instance, the path already exists but is a - file. - - Parameters - ---------- - path: str - leaf directory name - exist_ok: bool (False) - If False, will error if the target already exists - """ - pass # not necessary to implement, may not have directories - - def rmdir(self, path): - """Remove a directory, if empty""" - pass # not necessary to implement, may not have directories - - def ls(self, path, detail=True, **kwargs): - """List objects at path. - - This should include subdirectories and files at that location. The - difference between a file and a directory must be clear when details - are requested. - - The specific keys, or perhaps a FileInfo class, or similar, is TBD, - but must be consistent across implementations. - Must include: - - - full path to the entry (without protocol) - - size of the entry, in bytes. If the value cannot be determined, will - be ``None``. - - type of entry, "file", "directory" or other - - Additional information - may be present, appropriate to the file-system, e.g., generation, - checksum, etc. - - May use refresh=True|False to allow use of self._ls_from_cache to - check for a saved listing and avoid calling the backend. This would be - common where listing may be expensive. - - Parameters - ---------- - path: str - detail: bool - if True, gives a list of dictionaries, where each is the same as - the result of ``info(path)``. If False, gives a list of paths - (str). - kwargs: may have additional backend-specific options, such as version - information - - Returns - ------- - List of strings if detail is False, or list of directory information - dicts if detail is True. - """ - raise NotImplementedError - - def _ls_from_cache(self, path): - """Check cache for listing - - Returns listing, if found (may be empty list for a directly that exists - but contains nothing), None if not in cache. - """ - parent = self._parent(path) - if path.rstrip("/") in self.dircache: - return self.dircache[path.rstrip("/")] - try: - files = [ - f - for f in self.dircache[parent] - if f["name"] == path - or (f["name"] == path.rstrip("/") and f["type"] == "directory") - ] - if len(files) == 0: - # parent dir was listed but did not contain this file - raise FileNotFoundError(path) - return files - except KeyError: - pass - - def walk(self, path, maxdepth=None, topdown=True, on_error="omit", **kwargs): - """Return all files belows path - - List all files, recursing into subdirectories; output is iterator-style, - like ``os.walk()``. For a simple list of files, ``find()`` is available. - - When topdown is True, the caller can modify the dirnames list in-place (perhaps - using del or slice assignment), and walk() will - only recurse into the subdirectories whose names remain in dirnames; - this can be used to prune the search, impose a specific order of visiting, - or even to inform walk() about directories the caller creates or renames before - it resumes walk() again. - Modifying dirnames when topdown is False has no effect. (see os.walk) - - Note that the "files" outputted will include anything that is not - a directory, such as links. - - Parameters - ---------- - path: str - Root to recurse into - maxdepth: int - Maximum recursion depth. None means limitless, but not recommended - on link-based file-systems. - topdown: bool (True) - Whether to walk the directory tree from the top downwards or from - the bottom upwards. - on_error: "omit", "raise", a collable - if omit (default), path with exception will simply be empty; - If raise, an underlying exception will be raised; - if callable, it will be called with a single OSError instance as argument - kwargs: passed to ``ls`` - """ - if maxdepth is not None and maxdepth < 1: - raise ValueError("maxdepth must be at least 1") - - path = self._strip_protocol(path) - full_dirs = {} - dirs = {} - files = {} - - detail = kwargs.pop("detail", False) - try: - listing = self.ls(path, detail=True, **kwargs) - except (FileNotFoundError, OSError) as e: - if on_error == "raise": - raise - elif callable(on_error): - on_error(e) - if detail: - return path, {}, {} - return path, [], [] - - for info in listing: - # each info name must be at least [path]/part , but here - # we check also for names like [path]/part/ - pathname = info["name"].rstrip("/") - name = pathname.rsplit("/", 1)[-1] - if info["type"] == "directory" and pathname != path: - # do not include "self" path - full_dirs[name] = pathname - dirs[name] = info - elif pathname == path: - # file-like with same name as give path - files[""] = info - else: - files[name] = info - - if not detail: - dirs = list(dirs) - files = list(files) - - if topdown: - # Yield before recursion if walking top down - yield path, dirs, files - - if maxdepth is not None: - maxdepth -= 1 - if maxdepth < 1: - if not topdown: - yield path, dirs, files - return - - for d in dirs: - yield from self.walk( - full_dirs[d], - maxdepth=maxdepth, - detail=detail, - topdown=topdown, - **kwargs, - ) - - if not topdown: - # Yield after recursion if walking bottom up - yield path, dirs, files - - def find(self, path, maxdepth=None, withdirs=False, detail=False, **kwargs): - """List all files below path. - - Like posix ``find`` command without conditions - - Parameters - ---------- - path : str - maxdepth: int or None - If not None, the maximum number of levels to descend - withdirs: bool - Whether to include directory paths in the output. This is True - when used by glob, but users usually only want files. - kwargs are passed to ``ls``. - """ - # TODO: allow equivalent of -name parameter - path = self._strip_protocol(path) - out = {} - - # Add the root directory if withdirs is requested - # This is needed for posix glob compliance - if withdirs and path != "" and self.isdir(path): - out[path] = self.info(path) - - for _, dirs, files in self.walk(path, maxdepth, detail=True, **kwargs): - if withdirs: - files.update(dirs) - out.update({info["name"]: info for name, info in files.items()}) - if not out and self.isfile(path): - # walk works on directories, but find should also return [path] - # when path happens to be a file - out[path] = {} - names = sorted(out) - if not detail: - return names - else: - return {name: out[name] for name in names} - - def du(self, path, total=True, maxdepth=None, withdirs=False, **kwargs): - """Space used by files and optionally directories within a path - - Directory size does not include the size of its contents. - - Parameters - ---------- - path: str - total: bool - Whether to sum all the file sizes - maxdepth: int or None - Maximum number of directory levels to descend, None for unlimited. - withdirs: bool - Whether to include directory paths in the output. - kwargs: passed to ``find`` - - Returns - ------- - Dict of {path: size} if total=False, or int otherwise, where numbers - refer to bytes used. - """ - sizes = {} - if withdirs and self.isdir(path): - # Include top-level directory in output - info = self.info(path) - sizes[info["name"]] = info["size"] - for f in self.find(path, maxdepth=maxdepth, withdirs=withdirs, **kwargs): - info = self.info(f) - sizes[info["name"]] = info["size"] - if total: - return sum(sizes.values()) - else: - return sizes - - def glob(self, path, maxdepth=None, **kwargs): - """ - Find files by glob-matching. - - If the path ends with '/', only folders are returned. - - We support ``"**"``, - ``"?"`` and ``"[..]"``. We do not support ^ for pattern negation. - - The `maxdepth` option is applied on the first `**` found in the path. - - Search path names that contain embedded characters special to this - implementation of glob may not produce expected results; - e.g., ``foo/bar/*starredfilename*``. - - kwargs are passed to ``ls``. - """ - if maxdepth is not None and maxdepth < 1: - raise ValueError("maxdepth must be at least 1") - - import re - - ends = path.endswith("/") - path = self._strip_protocol(path) - idx_star = path.find("*") if path.find("*") >= 0 else len(path) - idx_qmark = path.find("?") if path.find("?") >= 0 else len(path) - idx_brace = path.find("[") if path.find("[") >= 0 else len(path) - - min_idx = min(idx_star, idx_qmark, idx_brace) - - detail = kwargs.pop("detail", False) - - if not has_magic(path): - if self.exists(path): - if not detail: - return [path] - else: - return {path: self.info(path)} - else: - if not detail: - return [] # glob of non-existent returns empty - else: - return {} - elif "/" in path[:min_idx]: - min_idx = path[:min_idx].rindex("/") - root = path[: min_idx + 1] - depth = path[min_idx + 1 :].count("/") + 1 - else: - root = "" - depth = path[min_idx + 1 :].count("/") + 1 - - if "**" in path: - if maxdepth is not None: - idx_double_stars = path.find("**") - depth_double_stars = path[idx_double_stars:].count("/") + 1 - depth = depth - depth_double_stars + maxdepth - else: - depth = None - - allpaths = self.find(root, maxdepth=depth, withdirs=True, detail=True, **kwargs) - # Escape characters special to python regex, leaving our supported - # special characters in place. - # See https://www.gnu.org/software/bash/manual/html_node/Pattern-Matching.html - # for shell globbing details. - pattern = ( - "^" - + ( - path.replace("\\", r"\\") - .replace(".", r"\.") - .replace("+", r"\+") - .replace("//", "/") - .replace("(", r"\(") - .replace(")", r"\)") - .replace("|", r"\|") - .replace("^", r"\^") - .replace("$", r"\$") - .replace("{", r"\{") - .replace("}", r"\}") - .rstrip("/") - .replace("?", ".") - ) - + "$" - ) - pattern = re.sub("/[*]{2}", "=SLASH_DOUBLE_STARS=", pattern) - pattern = re.sub("[*]{2}/?", "=DOUBLE_STARS=", pattern) - pattern = re.sub("[*]", "[^/]*", pattern) - pattern = re.sub("=SLASH_DOUBLE_STARS=", "(|/.*)", pattern) - pattern = re.sub("=DOUBLE_STARS=", ".*", pattern) - pattern = re.compile(pattern) - - out = { - p: allpaths[p] - for p in sorted(allpaths) - if pattern.match(p.replace("//", "/").rstrip("/")) - } - - # Return directories only when the glob end by a slash - # This is needed for posix glob compliance - if ends: - out = {k: v for k, v in out.items() if v["type"] == "directory"} - - if detail: - return out - else: - return list(out) - - def exists(self, path, **kwargs): - """Is there a file at the given path""" - try: - self.info(path, **kwargs) - return True - except: # noqa: E722 - # any exception allowed bar FileNotFoundError? - return False - - def lexists(self, path, **kwargs): - """If there is a file at the given path (including - broken links)""" - return self.exists(path) - - def info(self, path, **kwargs): - """Give details of entry at path - - Returns a single dictionary, with exactly the same information as ``ls`` - would with ``detail=True``. - - The default implementation should calls ls and could be overridden by a - shortcut. kwargs are passed on to ```ls()``. - - Some file systems might not be able to measure the file's size, in - which case, the returned dict will include ``'size': None``. - - Returns - ------- - dict with keys: name (full path in the FS), size (in bytes), type (file, - directory, or something else) and other FS-specific keys. - """ - path = self._strip_protocol(path) - out = self.ls(self._parent(path), detail=True, **kwargs) - out = [o for o in out if o["name"].rstrip("/") == path] - if out: - return out[0] - out = self.ls(path, detail=True, **kwargs) - path = path.rstrip("/") - out1 = [o for o in out if o["name"].rstrip("/") == path] - if len(out1) == 1: - if "size" not in out1[0]: - out1[0]["size"] = None - return out1[0] - elif len(out1) > 1 or out: - return {"name": path, "size": 0, "type": "directory"} - else: - raise FileNotFoundError(path) - - def checksum(self, path): - """Unique value for current version of file - - If the checksum is the same from one moment to another, the contents - are guaranteed to be the same. If the checksum changes, the contents - *might* have changed. - - This should normally be overridden; default will probably capture - creation/modification timestamp (which would be good) or maybe - access timestamp (which would be bad) - """ - return int(tokenize(self.info(path)), 16) - - def size(self, path): - """Size in bytes of file""" - return self.info(path).get("size", None) - - def sizes(self, paths): - """Size in bytes of each file in a list of paths""" - return [self.size(p) for p in paths] - - def isdir(self, path): - """Is this entry directory-like?""" - try: - return self.info(path)["type"] == "directory" - except OSError: - return False - - def isfile(self, path): - """Is this entry file-like?""" - try: - return self.info(path)["type"] == "file" - except: # noqa: E722 - return False - - def read_text(self, path, encoding=None, errors=None, newline=None, **kwargs): - """Get the contents of the file as a string. - - Parameters - ---------- - path: str - URL of file on this filesystems - encoding, errors, newline: same as `open`. - """ - with self.open( - path, - mode="r", - encoding=encoding, - errors=errors, - newline=newline, - **kwargs, - ) as f: - return f.read() - - def write_text( - self, path, value, encoding=None, errors=None, newline=None, **kwargs - ): - """Write the text to the given file. - - An existing file will be overwritten. - - Parameters - ---------- - path: str - URL of file on this filesystems - value: str - Text to write. - encoding, errors, newline: same as `open`. - """ - with self.open( - path, - mode="w", - encoding=encoding, - errors=errors, - newline=newline, - **kwargs, - ) as f: - return f.write(value) - - def cat_file(self, path, start=None, end=None, **kwargs): - """Get the content of a file - - Parameters - ---------- - path: URL of file on this filesystems - start, end: int - Bytes limits of the read. If negative, backwards from end, - like usual python slices. Either can be None for start or - end of file, respectively - kwargs: passed to ``open()``. - """ - # explicitly set buffering off? - with self.open(path, "rb", **kwargs) as f: - if start is not None: - if start >= 0: - f.seek(start) - else: - f.seek(max(0, f.size + start)) - if end is not None: - if end < 0: - end = f.size + end - return f.read(end - f.tell()) - return f.read() - - def pipe_file(self, path, value, **kwargs): - """Set the bytes of given file""" - with self.open(path, "wb", **kwargs) as f: - f.write(value) - - def pipe(self, path, value=None, **kwargs): - """Put value into path - - (counterpart to ``cat``) - - Parameters - ---------- - path: string or dict(str, bytes) - If a string, a single remote location to put ``value`` bytes; if a dict, - a mapping of {path: bytesvalue}. - value: bytes, optional - If using a single path, these are the bytes to put there. Ignored if - ``path`` is a dict - """ - if isinstance(path, str): - self.pipe_file(self._strip_protocol(path), value, **kwargs) - elif isinstance(path, dict): - for k, v in path.items(): - self.pipe_file(self._strip_protocol(k), v, **kwargs) - else: - raise ValueError("path must be str or dict") - - def cat_ranges( - self, paths, starts, ends, max_gap=None, on_error="return", **kwargs - ): - if max_gap is not None: - raise NotImplementedError - if not isinstance(paths, list): - raise TypeError - if not isinstance(starts, list): - starts = [starts] * len(paths) - if not isinstance(ends, list): - ends = [starts] * len(paths) - if len(starts) != len(paths) or len(ends) != len(paths): - raise ValueError - out = [] - for p, s, e in zip(paths, starts, ends): - try: - out.append(self.cat_file(p, s, e)) - except Exception as e: - if on_error == "return": - out.append(e) - else: - raise - return out - - def cat(self, path, recursive=False, on_error="raise", **kwargs): - """Fetch (potentially multiple) paths' contents - - Parameters - ---------- - recursive: bool - If True, assume the path(s) are directories, and get all the - contained files - on_error : "raise", "omit", "return" - If raise, an underlying exception will be raised (converted to KeyError - if the type is in self.missing_exceptions); if omit, keys with exception - will simply not be included in the output; if "return", all keys are - included in the output, but the value will be bytes or an exception - instance. - kwargs: passed to cat_file - - Returns - ------- - dict of {path: contents} if there are multiple paths - or the path has been otherwise expanded - """ - paths = self.expand_path(path, recursive=recursive) - if ( - len(paths) > 1 - or isinstance(path, list) - or paths[0] != self._strip_protocol(path) - ): - out = {} - for path in paths: - try: - out[path] = self.cat_file(path, **kwargs) - except Exception as e: - if on_error == "raise": - raise - if on_error == "return": - out[path] = e - return out - else: - return self.cat_file(paths[0], **kwargs) - - def get_file( - self, rpath, lpath, callback=_DEFAULT_CALLBACK, outfile=None, **kwargs - ): - """Copy single remote file to local""" - from .implementations.local import LocalFileSystem - - if isfilelike(lpath): - outfile = lpath - elif self.isdir(rpath): - os.makedirs(lpath, exist_ok=True) - return None - - LocalFileSystem(auto_mkdir=True).makedirs(self._parent(lpath), exist_ok=True) - - with self.open(rpath, "rb", **kwargs) as f1: - if outfile is None: - outfile = open(lpath, "wb") - - try: - callback.set_size(getattr(f1, "size", None)) - data = True - while data: - data = f1.read(self.blocksize) - segment_len = outfile.write(data) - if segment_len is None: - segment_len = len(data) - callback.relative_update(segment_len) - finally: - if not isfilelike(lpath): - outfile.close() - - def get( - self, - rpath, - lpath, - recursive=False, - callback=_DEFAULT_CALLBACK, - maxdepth=None, - **kwargs, - ): - """Copy file(s) to local. - - Copies a specific file or tree of files (if recursive=True). If lpath - ends with a "/", it will be assumed to be a directory, and target files - will go within. Can submit a list of paths, which may be glob-patterns - and will be expanded. - - Calls get_file for each source. - """ - if isinstance(lpath, list) and isinstance(rpath, list): - # No need to expand paths when both source and destination - # are provided as lists - rpaths = rpath - lpaths = lpath - else: - from .implementations.local import ( - LocalFileSystem, - make_path_posix, - trailing_sep, - ) - - source_is_str = isinstance(rpath, str) - rpaths = self.expand_path(rpath, recursive=recursive, maxdepth=maxdepth) - if source_is_str and (not recursive or maxdepth is not None): - # Non-recursive glob does not copy directories - rpaths = [p for p in rpaths if not (trailing_sep(p) or self.isdir(p))] - if not rpaths: - return - - if isinstance(lpath, str): - lpath = make_path_posix(lpath) - - source_is_file = len(rpaths) == 1 - dest_is_dir = isinstance(lpath, str) and ( - trailing_sep(lpath) or LocalFileSystem().isdir(lpath) - ) - - exists = source_is_str and ( - (has_magic(rpath) and source_is_file) - or (not has_magic(rpath) and dest_is_dir and not trailing_sep(rpath)) - ) - lpaths = other_paths( - rpaths, - lpath, - exists=exists, - flatten=not source_is_str, - ) - - callback.set_size(len(lpaths)) - for lpath, rpath in callback.wrap(zip(lpaths, rpaths)): - callback.branch(rpath, lpath, kwargs) - self.get_file(rpath, lpath, **kwargs) - - def put_file(self, lpath, rpath, callback=_DEFAULT_CALLBACK, **kwargs): - """Copy single file to remote""" - if os.path.isdir(lpath): - self.makedirs(rpath, exist_ok=True) - return None - - with open(lpath, "rb") as f1: - size = f1.seek(0, 2) - callback.set_size(size) - f1.seek(0) - - self.mkdirs(self._parent(os.fspath(rpath)), exist_ok=True) - with self.open(rpath, "wb", **kwargs) as f2: - while f1.tell() < size: - data = f1.read(self.blocksize) - segment_len = f2.write(data) - if segment_len is None: - segment_len = len(data) - callback.relative_update(segment_len) - - def put( - self, - lpath, - rpath, - recursive=False, - callback=_DEFAULT_CALLBACK, - maxdepth=None, - **kwargs, - ): - """Copy file(s) from local. - - Copies a specific file or tree of files (if recursive=True). If rpath - ends with a "/", it will be assumed to be a directory, and target files - will go within. - - Calls put_file for each source. - """ - if isinstance(lpath, list) and isinstance(rpath, list): - # No need to expand paths when both source and destination - # are provided as lists - rpaths = rpath - lpaths = lpath - else: - from .implementations.local import ( - LocalFileSystem, - make_path_posix, - trailing_sep, - ) - - source_is_str = isinstance(lpath, str) - if source_is_str: - lpath = make_path_posix(lpath) - fs = LocalFileSystem() - lpaths = fs.expand_path(lpath, recursive=recursive, maxdepth=maxdepth) - if source_is_str and (not recursive or maxdepth is not None): - # Non-recursive glob does not copy directories - lpaths = [p for p in lpaths if not (trailing_sep(p) or fs.isdir(p))] - if not lpaths: - return - - source_is_file = len(lpaths) == 1 - dest_is_dir = isinstance(rpath, str) and ( - trailing_sep(rpath) or self.isdir(rpath) - ) - - rpath = ( - self._strip_protocol(rpath) - if isinstance(rpath, str) - else [self._strip_protocol(p) for p in rpath] - ) - exists = source_is_str and ( - (has_magic(lpath) and source_is_file) - or (not has_magic(lpath) and dest_is_dir and not trailing_sep(lpath)) - ) - rpaths = other_paths( - lpaths, - rpath, - exists=exists, - flatten=not source_is_str, - ) - - callback.set_size(len(rpaths)) - for lpath, rpath in callback.wrap(zip(lpaths, rpaths)): - callback.branch(lpath, rpath, kwargs) - self.put_file(lpath, rpath, **kwargs) - - def head(self, path, size=1024): - """Get the first ``size`` bytes from file""" - with self.open(path, "rb") as f: - return f.read(size) - - def tail(self, path, size=1024): - """Get the last ``size`` bytes from file""" - with self.open(path, "rb") as f: - f.seek(max(-size, -f.size), 2) - return f.read() - - def cp_file(self, path1, path2, **kwargs): - raise NotImplementedError - - def copy( - self, path1, path2, recursive=False, maxdepth=None, on_error=None, **kwargs - ): - """Copy within two locations in the filesystem - - on_error : "raise", "ignore" - If raise, any not-found exceptions will be raised; if ignore any - not-found exceptions will cause the path to be skipped; defaults to - raise unless recursive is true, where the default is ignore - """ - if on_error is None and recursive: - on_error = "ignore" - elif on_error is None: - on_error = "raise" - - if isinstance(path1, list) and isinstance(path2, list): - # No need to expand paths when both source and destination - # are provided as lists - paths1 = path1 - paths2 = path2 - else: - from .implementations.local import trailing_sep - - source_is_str = isinstance(path1, str) - paths1 = self.expand_path(path1, recursive=recursive, maxdepth=maxdepth) - if source_is_str and (not recursive or maxdepth is not None): - # Non-recursive glob does not copy directories - paths1 = [p for p in paths1 if not (trailing_sep(p) or self.isdir(p))] - if not paths1: - return - - source_is_file = len(paths1) == 1 - dest_is_dir = isinstance(path2, str) and ( - trailing_sep(path2) or self.isdir(path2) - ) - - exists = source_is_str and ( - (has_magic(path1) and source_is_file) - or (not has_magic(path1) and dest_is_dir and not trailing_sep(path1)) - ) - paths2 = other_paths( - paths1, - path2, - exists=exists, - flatten=not source_is_str, - ) - - for p1, p2 in zip(paths1, paths2): - try: - self.cp_file(p1, p2, **kwargs) - except FileNotFoundError: - if on_error == "raise": - raise - - def expand_path(self, path, recursive=False, maxdepth=None, **kwargs): - """Turn one or more globs or directories into a list of all matching paths - to files or directories. - - kwargs are passed to ``glob`` or ``find``, which may in turn call ``ls`` - """ - - if maxdepth is not None and maxdepth < 1: - raise ValueError("maxdepth must be at least 1") - - if isinstance(path, str): - out = self.expand_path([path], recursive, maxdepth) - else: - out = set() - path = [self._strip_protocol(p) for p in path] - for p in path: - if has_magic(p): - bit = set(self.glob(p, maxdepth=maxdepth, **kwargs)) - out |= bit - if recursive: - # glob call above expanded one depth so if maxdepth is defined - # then decrement it in expand_path call below. If it is zero - # after decrementing then avoid expand_path call. - if maxdepth is not None and maxdepth <= 1: - continue - out |= set( - self.expand_path( - list(bit), - recursive=recursive, - maxdepth=maxdepth - 1 if maxdepth is not None else None, - **kwargs, - ) - ) - continue - elif recursive: - rec = set( - self.find( - p, maxdepth=maxdepth, withdirs=True, detail=False, **kwargs - ) - ) - out |= rec - if p not in out and (recursive is False or self.exists(p)): - # should only check once, for the root - out.add(p) - if not out: - raise FileNotFoundError(path) - return sorted(out) - - def mv(self, path1, path2, recursive=False, maxdepth=None, **kwargs): - """Move file(s) from one location to another""" - if path1 == path2: - logger.debug("%s mv: The paths are the same, so no files were moved.", self) - else: - self.copy(path1, path2, recursive=recursive, maxdepth=maxdepth) - self.rm(path1, recursive=recursive) - - def rm_file(self, path): - """Delete a file""" - self._rm(path) - - def _rm(self, path): - """Delete one file""" - # this is the old name for the method, prefer rm_file - raise NotImplementedError - - def rm(self, path, recursive=False, maxdepth=None): - """Delete files. - - Parameters - ---------- - path: str or list of str - File(s) to delete. - recursive: bool - If file(s) are directories, recursively delete contents and then - also remove the directory - maxdepth: int or None - Depth to pass to walk for finding files to delete, if recursive. - If None, there will be no limit and infinite recursion may be - possible. - """ - path = self.expand_path(path, recursive=recursive, maxdepth=maxdepth) - for p in reversed(path): - self.rm_file(p) - - @classmethod - def _parent(cls, path): - path = cls._strip_protocol(path) - if "/" in path: - parent = path.rsplit("/", 1)[0].lstrip(cls.root_marker) - return cls.root_marker + parent - else: - return cls.root_marker - - def _open( - self, - path, - mode="rb", - block_size=None, - autocommit=True, - cache_options=None, - **kwargs, - ): - """Return raw bytes-mode file-like from the file-system""" - return AbstractBufferedFile( - self, - path, - mode, - block_size, - autocommit, - cache_options=cache_options, - **kwargs, - ) - - def open( - self, - path, - mode="rb", - block_size=None, - cache_options=None, - compression=None, - **kwargs, - ): - """ - Return a file-like object from the filesystem - - The resultant instance must function correctly in a context ``with`` - block. - - Parameters - ---------- - path: str - Target file - mode: str like 'rb', 'w' - See builtin ``open()`` - block_size: int - Some indication of buffering - this is a value in bytes - cache_options : dict, optional - Extra arguments to pass through to the cache. - compression: string or None - If given, open file using compression codec. Can either be a compression - name (a key in ``fsspec.compression.compr``) or "infer" to guess the - compression from the filename suffix. - encoding, errors, newline: passed on to TextIOWrapper for text mode - """ - import io - - path = self._strip_protocol(path) - if "b" not in mode: - mode = mode.replace("t", "") + "b" - - text_kwargs = { - k: kwargs.pop(k) - for k in ["encoding", "errors", "newline"] - if k in kwargs - } - return io.TextIOWrapper( - self.open( - path, - mode, - block_size=block_size, - cache_options=cache_options, - compression=compression, - **kwargs, - ), - **text_kwargs, - ) - else: - ac = kwargs.pop("autocommit", not self._intrans) - f = self._open( - path, - mode=mode, - block_size=block_size, - autocommit=ac, - cache_options=cache_options, - **kwargs, - ) - if compression is not None: - from fsspec.compression import compr - from fsspec.core import get_compression - - compression = get_compression(path, compression) - compress = compr[compression] - f = compress(f, mode=mode[0]) - - if not ac and "r" not in mode: - self.transaction.files.append(f) - return f - - def touch(self, path, truncate=True, **kwargs): - """Create empty file, or update timestamp - - Parameters - ---------- - path: str - file location - truncate: bool - If True, always set file size to 0; if False, update timestamp and - leave file unchanged, if backend allows this - """ - if truncate or not self.exists(path): - with self.open(path, "wb", **kwargs): - pass - else: - raise NotImplementedError # update timestamp, if possible - - def ukey(self, path): - """Hash of file properties, to tell if it has changed""" - return sha256(str(self.info(path)).encode()).hexdigest() - - def read_block(self, fn, offset, length, delimiter=None): - """Read a block of bytes from - - Starting at ``offset`` of the file, read ``length`` bytes. If - ``delimiter`` is set then we ensure that the read starts and stops at - delimiter boundaries that follow the locations ``offset`` and ``offset - + length``. If ``offset`` is zero then we start at zero. The - bytestring returned WILL include the end delimiter string. - - If offset+length is beyond the eof, reads to eof. - - Parameters - ---------- - fn: string - Path to filename - offset: int - Byte offset to start read - length: int - Number of bytes to read. If None, read to end. - delimiter: bytes (optional) - Ensure reading starts and stops at delimiter bytestring - - Examples - -------- - >>> fs.read_block('data/file.csv', 0, 13) # doctest: +SKIP - b'Alice, 100\\nBo' - >>> fs.read_block('data/file.csv', 0, 13, delimiter=b'\\n') # doctest: +SKIP - b'Alice, 100\\nBob, 200\\n' - - Use ``length=None`` to read to the end of the file. - >>> fs.read_block('data/file.csv', 0, None, delimiter=b'\\n') # doctest: +SKIP - b'Alice, 100\\nBob, 200\\nCharlie, 300' - - See Also - -------- - :func:`fsspec.utils.read_block` - """ - with self.open(fn, "rb") as f: - size = f.size - if length is None: - length = size - if size is not None and offset + length > size: - length = size - offset - return read_block(f, offset, length, delimiter) - - def to_json(self): - """ - JSON representation of this filesystem instance - - Returns - ------- - str: JSON structure with keys cls (the python location of this class), - protocol (text name of this class's protocol, first one in case of - multiple), args (positional args, usually empty), and all other - kwargs as their own keys. - """ - import json - - cls = type(self) - cls = ".".join((cls.__module__, cls.__name__)) - proto = ( - self.protocol[0] - if isinstance(self.protocol, (tuple, list)) - else self.protocol - ) - return json.dumps( - dict( - **{"cls": cls, "protocol": proto, "args": self.storage_args}, - **self.storage_options, - ) - ) - - @staticmethod - def from_json(blob): - """ - Recreate a filesystem instance from JSON representation - - See ``.to_json()`` for the expected structure of the input - - Parameters - ---------- - blob: str - - Returns - ------- - file system instance, not necessarily of this particular class. - """ - import json - - from .registry import _import_class, get_filesystem_class - - dic = json.loads(blob) - protocol = dic.pop("protocol") - try: - cls = _import_class(dic.pop("cls")) - except (ImportError, ValueError, RuntimeError, KeyError): - cls = get_filesystem_class(protocol) - return cls(*dic.pop("args", ()), **dic) - - def _get_pyarrow_filesystem(self): - """ - Make a version of the FS instance which will be acceptable to pyarrow - """ - # all instances already also derive from pyarrow - return self - - def get_mapper(self, root="", check=False, create=False, missing_exceptions=None): - """Create key/value store based on this file-system - - Makes a MutableMapping interface to the FS at the given root path. - See ``fsspec.mapping.FSMap`` for further details. - """ - from .mapping import FSMap - - return FSMap( - root, - self, - check=check, - create=create, - missing_exceptions=missing_exceptions, - ) - - @classmethod - def clear_instance_cache(cls): - """ - Clear the cache of filesystem instances. - - Notes - ----- - Unless overridden by setting the ``cachable`` class attribute to False, - the filesystem class stores a reference to newly created instances. This - prevents Python's normal rules around garbage collection from working, - since the instances refcount will not drop to zero until - ``clear_instance_cache`` is called. - """ - cls._cache.clear() - - def created(self, path): - """Return the created timestamp of a file as a datetime.datetime""" - raise NotImplementedError - - def modified(self, path): - """Return the modified timestamp of a file as a datetime.datetime""" - raise NotImplementedError - - # ------------------------------------------------------------------------ - # Aliases - - def read_bytes(self, path, start=None, end=None, **kwargs): - """Alias of `AbstractFileSystem.cat_file`.""" - return self.cat_file(path, start=start, end=end, **kwargs) - - def write_bytes(self, path, value, **kwargs): - """Alias of `AbstractFileSystem.pipe_file`.""" - self.pipe_file(path, value, **kwargs) - - def makedir(self, path, create_parents=True, **kwargs): - """Alias of `AbstractFileSystem.mkdir`.""" - return self.mkdir(path, create_parents=create_parents, **kwargs) - - def mkdirs(self, path, exist_ok=False): - """Alias of `AbstractFileSystem.makedirs`.""" - return self.makedirs(path, exist_ok=exist_ok) - - def listdir(self, path, detail=True, **kwargs): - """Alias of `AbstractFileSystem.ls`.""" - return self.ls(path, detail=detail, **kwargs) - - def cp(self, path1, path2, **kwargs): - """Alias of `AbstractFileSystem.copy`.""" - return self.copy(path1, path2, **kwargs) - - def move(self, path1, path2, **kwargs): - """Alias of `AbstractFileSystem.mv`.""" - return self.mv(path1, path2, **kwargs) - - def stat(self, path, **kwargs): - """Alias of `AbstractFileSystem.info`.""" - return self.info(path, **kwargs) - - def disk_usage(self, path, total=True, maxdepth=None, **kwargs): - """Alias of `AbstractFileSystem.du`.""" - return self.du(path, total=total, maxdepth=maxdepth, **kwargs) - - def rename(self, path1, path2, **kwargs): - """Alias of `AbstractFileSystem.mv`.""" - return self.mv(path1, path2, **kwargs) - - def delete(self, path, recursive=False, maxdepth=None): - """Alias of `AbstractFileSystem.rm`.""" - return self.rm(path, recursive=recursive, maxdepth=maxdepth) - - def upload(self, lpath, rpath, recursive=False, **kwargs): - """Alias of `AbstractFileSystem.put`.""" - return self.put(lpath, rpath, recursive=recursive, **kwargs) - - def download(self, rpath, lpath, recursive=False, **kwargs): - """Alias of `AbstractFileSystem.get`.""" - return self.get(rpath, lpath, recursive=recursive, **kwargs) - - def sign(self, path, expiration=100, **kwargs): - """Create a signed URL representing the given path - - Some implementations allow temporary URLs to be generated, as a - way of delegating credentials. - - Parameters - ---------- - path : str - The path on the filesystem - expiration : int - Number of seconds to enable the URL for (if supported) - - Returns - ------- - URL : str - The signed URL - - Raises - ------ - NotImplementedError : if method is not implemented for a filesystem - """ - raise NotImplementedError("Sign is not implemented for this filesystem") - - def _isfilestore(self): - # Originally inherited from pyarrow DaskFileSystem. Keeping this - # here for backwards compatibility as long as pyarrow uses its - # legacy fsspec-compatible filesystems and thus accepts fsspec - # filesystems as well - return False - - -class AbstractBufferedFile(io.IOBase): - """Convenient class to derive from to provide buffering - - In the case that the backend does not provide a pythonic file-like object - already, this class contains much of the logic to build one. The only - methods that need to be overridden are ``_upload_chunk``, - ``_initiate_upload`` and ``_fetch_range``. - """ - - DEFAULT_BLOCK_SIZE = 5 * 2**20 - _details = None - - def __init__( - self, - fs, - path, - mode="rb", - block_size="default", - autocommit=True, - cache_type="readahead", - cache_options=None, - size=None, - **kwargs, - ): - """ - Template for files with buffered reading and writing - - Parameters - ---------- - fs: instance of FileSystem - path: str - location in file-system - mode: str - Normal file modes. Currently only 'wb', 'ab' or 'rb'. Some file - systems may be read-only, and some may not support append. - block_size: int - Buffer size for reading or writing, 'default' for class default - autocommit: bool - Whether to write to final destination; may only impact what - happens when file is being closed. - cache_type: {"readahead", "none", "mmap", "bytes"}, default "readahead" - Caching policy in read mode. See the definitions in ``core``. - cache_options : dict - Additional options passed to the constructor for the cache specified - by `cache_type`. - size: int - If given and in read mode, suppressed having to look up the file size - kwargs: - Gets stored as self.kwargs - """ - from .core import caches - - self.path = path - self.fs = fs - self.mode = mode - self.blocksize = ( - self.DEFAULT_BLOCK_SIZE if block_size in ["default", None] else block_size - ) - self.loc = 0 - self.autocommit = autocommit - self.end = None - self.start = None - self.closed = False - - if cache_options is None: - cache_options = {} - - if "trim" in kwargs: - warnings.warn( - "Passing 'trim' to control the cache behavior has been deprecated. " - "Specify it within the 'cache_options' argument instead.", - FutureWarning, - ) - cache_options["trim"] = kwargs.pop("trim") - - self.kwargs = kwargs - - if mode not in {"ab", "rb", "wb"}: - raise NotImplementedError("File mode not supported") - if mode == "rb": - if size is not None: - self.size = size - else: - self.size = self.details["size"] - self.cache = caches[cache_type]( - self.blocksize, self._fetch_range, self.size, **cache_options - ) - else: - self.buffer = io.BytesIO() - self.offset = None - self.forced = False - self.location = None - - @property - def details(self): - if self._details is None: - self._details = self.fs.info(self.path) - return self._details - - @details.setter - def details(self, value): - self._details = value - self.size = value["size"] - - @property - def full_name(self): - return _unstrip_protocol(self.path, self.fs) - - @property - def closed(self): - # get around this attr being read-only in IOBase - # use getattr here, since this can be called during del - return getattr(self, "_closed", True) - - @closed.setter - def closed(self, c): - self._closed = c - - def __hash__(self): - if "w" in self.mode: - return id(self) - else: - return int(tokenize(self.details), 16) - - def __eq__(self, other): - """Files are equal if they have the same checksum, only in read mode""" - return self.mode == "rb" and other.mode == "rb" and hash(self) == hash(other) - - def commit(self): - """Move from temp to final destination""" - - def discard(self): - """Throw away temporary file""" - - def info(self): - """File information about this path""" - if "r" in self.mode: - return self.details - else: - raise ValueError("Info not available while writing") - - def tell(self): - """Current file location""" - return self.loc - - def seek(self, loc, whence=0): - """Set current file location - - Parameters - ---------- - loc: int - byte location - whence: {0, 1, 2} - from start of file, current location or end of file, resp. - """ - loc = int(loc) - if not self.mode == "rb": - raise OSError(ESPIPE, "Seek only available in read mode") - if whence == 0: - nloc = loc - elif whence == 1: - nloc = self.loc + loc - elif whence == 2: - nloc = self.size + loc - else: - raise ValueError(f"invalid whence ({whence}, should be 0, 1 or 2)") - if nloc < 0: - raise ValueError("Seek before start of file") - self.loc = nloc - return self.loc - - def write(self, data): - """ - Write data to buffer. - - Buffer only sent on flush() or if buffer is greater than - or equal to blocksize. - - Parameters - ---------- - data: bytes - Set of bytes to be written. - """ - if self.mode not in {"wb", "ab"}: - raise ValueError("File not in write mode") - if self.closed: - raise ValueError("I/O operation on closed file.") - if self.forced: - raise ValueError("This file has been force-flushed, can only close") - out = self.buffer.write(data) - self.loc += out - if self.buffer.tell() >= self.blocksize: - self.flush() - return out - - def flush(self, force=False): - """ - Write buffered data to backend store. - - Writes the current buffer, if it is larger than the block-size, or if - the file is being closed. - - Parameters - ---------- - force: bool - When closing, write the last block even if it is smaller than - blocks are allowed to be. Disallows further writing to this file. - """ - - if self.closed: - raise ValueError("Flush on closed file") - if force and self.forced: - raise ValueError("Force flush cannot be called more than once") - if force: - self.forced = True - - if self.mode not in {"wb", "ab"}: - # no-op to flush on read-mode - return - - if not force and self.buffer.tell() < self.blocksize: - # Defer write on small block - return - - if self.offset is None: - # Initialize a multipart upload - self.offset = 0 - try: - self._initiate_upload() - except: # noqa: E722 - self.closed = True - raise - - if self._upload_chunk(final=force) is not False: - self.offset += self.buffer.seek(0, 2) - self.buffer = io.BytesIO() - - def _upload_chunk(self, final=False): - """Write one part of a multi-block file upload - - Parameters - ========== - final: bool - This is the last block, so should complete file, if - self.autocommit is True. - """ - # may not yet have been initialized, may need to call _initialize_upload - - def _initiate_upload(self): - """Create remote file/upload""" - pass - - def _fetch_range(self, start, end): - """Get the specified set of bytes from remote""" - raise NotImplementedError - - def read(self, length=-1): - """ - Return data from cache, or fetch pieces as necessary - - Parameters - ---------- - length: int (-1) - Number of bytes to read; if <0, all remaining bytes. - """ - length = -1 if length is None else int(length) - if self.mode != "rb": - raise ValueError("File not in read mode") - if length < 0: - length = self.size - self.loc - if self.closed: - raise ValueError("I/O operation on closed file.") - logger.debug("%s read: %i - %i", self, self.loc, self.loc + length) - if length == 0: - # don't even bother calling fetch - return b"" - out = self.cache._fetch(self.loc, self.loc + length) - self.loc += len(out) - return out - - def readinto(self, b): - """mirrors builtin file's readinto method - - https://docs.python.org/3/library/io.html#io.RawIOBase.readinto - """ - out = memoryview(b).cast("B") - data = self.read(out.nbytes) - out[: len(data)] = data - return len(data) - - def readuntil(self, char=b"\n", blocks=None): - """Return data between current position and first occurrence of char - - char is included in the output, except if the end of the tile is - encountered first. - - Parameters - ---------- - char: bytes - Thing to find - blocks: None or int - How much to read in each go. Defaults to file blocksize - which may - mean a new read on every call. - """ - out = [] - while True: - start = self.tell() - part = self.read(blocks or self.blocksize) - if len(part) == 0: - break - found = part.find(char) - if found > -1: - out.append(part[: found + len(char)]) - self.seek(start + found + len(char)) - break - out.append(part) - return b"".join(out) - - def readline(self): - """Read until first occurrence of newline character - - Note that, because of character encoding, this is not necessarily a - true line ending. - """ - return self.readuntil(b"\n") - - def __next__(self): - out = self.readline() - if out: - return out - raise StopIteration - - def __iter__(self): - return self - - def readlines(self): - """Return all data, split by the newline character""" - data = self.read() - lines = data.split(b"\n") - out = [l + b"\n" for l in lines[:-1]] - if data.endswith(b"\n"): - return out - else: - return out + [lines[-1]] - # return list(self) ??? - - def readinto1(self, b): - return self.readinto(b) - - def close(self): - """Close file - - Finalizes writes, discards cache - """ - if getattr(self, "_unclosable", False): - return - if self.closed: - return - if self.mode == "rb": - self.cache = None - else: - if not self.forced: - self.flush(force=True) - - if self.fs is not None: - self.fs.invalidate_cache(self.path) - self.fs.invalidate_cache(self.fs._parent(self.path)) - - self.closed = True - - def readable(self): - """Whether opened for reading""" - return self.mode == "rb" and not self.closed - - def seekable(self): - """Whether is seekable (only in read mode)""" - return self.readable() - - def writable(self): - """Whether opened for writing""" - return self.mode in {"wb", "ab"} and not self.closed - - def __del__(self): - if not self.closed: - self.close() - - def __str__(self): - return f"" - - __repr__ = __str__ - - def __enter__(self): - return self - - def __exit__(self, *args): - self.close() diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/node/dev/node_modules/esbuild-wasm/lib/browser.min.js b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/node/dev/node_modules/esbuild-wasm/lib/browser.min.js deleted file mode 100644 index 04f5cbccd9180a916d604c11bb4f8a5a0c666516..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/node/dev/node_modules/esbuild-wasm/lib/browser.min.js +++ /dev/null @@ -1,22 +0,0 @@ -(module=>{ -"use strict";var ve=Object.defineProperty;var Ke=Object.getOwnPropertyDescriptor;var _e=Object.getOwnPropertyNames;var Ve=Object.prototype.hasOwnProperty;var Ye=(e,t)=>{for(var n in t)ve(e,n,{get:t[n],enumerable:!0})},Je=(e,t,n,i)=>{if(t&&typeof t=="object"||typeof t=="function")for(let u of _e(t))!Ve.call(e,u)&&u!==n&&ve(e,u,{get:()=>t[u],enumerable:!(i=Ke(t,u))||i.enumerable});return e};var Qe=e=>Je(ve({},"__esModule",{value:!0}),e);var ne=(e,t,n)=>new Promise((i,u)=>{var l=p=>{try{s(n.next(p))}catch(b){u(b)}},f=p=>{try{s(n.throw(p))}catch(b){u(b)}},s=p=>p.done?i(p.value):Promise.resolve(p.value).then(l,f);s((n=n.apply(e,t)).next())});var we={};Ye(we,{analyzeMetafile:()=>pt,analyzeMetafileSync:()=>ht,build:()=>ut,buildSync:()=>gt,context:()=>ct,default:()=>vt,formatMessages:()=>dt,formatMessagesSync:()=>yt,initialize:()=>bt,transform:()=>ft,transformSync:()=>mt,version:()=>at});module.exports=Qe(we);function Oe(e){let t=i=>{if(i===null)n.write8(0);else if(typeof i=="boolean")n.write8(1),n.write8(+i);else if(typeof i=="number")n.write8(2),n.write32(i|0);else if(typeof i=="string")n.write8(3),n.write(Z(i));else if(i instanceof Uint8Array)n.write8(4),n.write(i);else if(i instanceof Array){n.write8(5),n.write32(i.length);for(let u of i)t(u)}else{let u=Object.keys(i);n.write8(6),n.write32(u.length);for(let l of u)n.write(Z(l)),t(i[l])}},n=new pe;return n.write32(0),n.write32(e.id<<1|+!e.isRequest),t(e.value),Re(n.buf,n.len-4,0),n.buf.subarray(0,n.len)}function ke(e){let t=()=>{switch(n.read8()){case 0:return null;case 1:return!!n.read8();case 2:return n.read32();case 3:return ie(n.read());case 4:return n.read();case 5:{let f=n.read32(),s=[];for(let p=0;p>>=1;let l=t();if(n.ptr!==e.length)throw new Error("Invalid packet");return{id:i,isRequest:u,value:l}}var pe=class{constructor(t=new Uint8Array(1024)){this.buf=t;this.len=0;this.ptr=0}_write(t){if(this.len+t>this.buf.length){let n=new Uint8Array((this.len+t)*2);n.set(this.buf),this.buf=n}return this.len+=t,this.len-t}write8(t){let n=this._write(1);this.buf[n]=t}write32(t){let n=this._write(4);Re(this.buf,t,n)}write(t){let n=this._write(4+t.length);Re(this.buf,t.length,n),this.buf.set(t,n+4)}_read(t){if(this.ptr+t>this.buf.length)throw new Error("Invalid packet");return this.ptr+=t,this.ptr-t}read8(){return this.buf[this._read(1)]}read32(){return Ee(this.buf,this._read(4))}read(){let t=this.read32(),n=new Uint8Array(t),i=this._read(n.length);return n.set(this.buf.subarray(i,i+t)),n}},Z,ie,xe;if(typeof TextEncoder!="undefined"&&typeof TextDecoder!="undefined"){let e=new TextEncoder,t=new TextDecoder;Z=n=>e.encode(n),ie=n=>t.decode(n),xe='new TextEncoder().encode("")'}else if(typeof Buffer!="undefined")Z=e=>Buffer.from(e),ie=e=>{let{buffer:t,byteOffset:n,byteLength:i}=e;return Buffer.from(t,n,i).toString()},xe='Buffer.from("")';else throw new Error("No UTF-8 codec found");if(!(Z("")instanceof Uint8Array))throw new Error(`Invariant violation: "${xe} instanceof Uint8Array" is incorrectly false - -This indicates that your JavaScript environment is broken. You cannot use -esbuild in this environment because esbuild relies on this invariant. This -is not a problem with esbuild. You need to fix your environment instead. -`);function Ee(e,t){return e[t++]|e[t++]<<8|e[t++]<<16|e[t++]<<24}function Re(e,t,n){e[n++]=t,e[n++]=t>>8,e[n++]=t>>16,e[n++]=t>>24}var Y=JSON.stringify,$e="warning",Me="silent";function Ce(e){if(V(e,"target"),e.indexOf(",")>=0)throw new Error(`Invalid target: ${e}`);return e}var ye=()=>null,I=e=>typeof e=="boolean"?null:"a boolean",y=e=>typeof e=="string"?null:"a string",he=e=>e instanceof RegExp?null:"a RegExp object",se=e=>typeof e=="number"&&e===(e|0)?null:"an integer",Pe=e=>typeof e=="function"?null:"a function",W=e=>Array.isArray(e)?null:"an array",ee=e=>typeof e=="object"&&e!==null&&!Array.isArray(e)?null:"an object",Ge=e=>typeof e=="object"&&e!==null?null:"an array or an object",Xe=e=>e instanceof WebAssembly.Module?null:"a WebAssembly.Module",Ae=e=>typeof e=="object"&&!Array.isArray(e)?null:"an object or null",Fe=e=>typeof e=="string"||typeof e=="boolean"?null:"a string or a boolean",Ze=e=>typeof e=="string"||typeof e=="object"&&e!==null&&!Array.isArray(e)?null:"a string or an object",et=e=>typeof e=="string"||Array.isArray(e)?null:"a string or an array",Te=e=>typeof e=="string"||e instanceof Uint8Array?null:"a string or a Uint8Array",tt=e=>typeof e=="string"||e instanceof URL?null:"a string or a URL";function r(e,t,n,i){let u=e[n];if(t[n+""]=!0,u===void 0)return;let l=i(u);if(l!==null)throw new Error(`${Y(n)} must be ${l}`);return u}function K(e,t,n){for(let i in e)if(!(i in t))throw new Error(`Invalid option ${n}: ${Y(i)}`)}function Be(e){let t=Object.create(null),n=r(e,t,"wasmURL",tt),i=r(e,t,"wasmModule",Xe),u=r(e,t,"worker",I);return K(e,t,"in initialize() call"),{wasmURL:n,wasmModule:i,worker:u}}function Ue(e){let t;if(e!==void 0){t=Object.create(null);for(let n in e){let i=e[n];if(typeof i=="string"||i===!1)t[n]=i;else throw new Error(`Expected ${Y(n)} in mangle cache to map to either a string or false`)}}return t}function be(e,t,n,i,u){let l=r(t,n,"color",I),f=r(t,n,"logLevel",y),s=r(t,n,"logLimit",se);l!==void 0?e.push(`--color=${l}`):i&&e.push("--color=true"),e.push(`--log-level=${f||u}`),e.push(`--log-limit=${s||0}`)}function V(e,t,n){if(typeof e!="string")throw new Error(`Expected value for ${t}${n!==void 0?" "+Y(n):""} to be a string, got ${typeof e} instead`);return e}function je(e,t,n){let i=r(t,n,"legalComments",y),u=r(t,n,"sourceRoot",y),l=r(t,n,"sourcesContent",I),f=r(t,n,"target",et),s=r(t,n,"format",y),p=r(t,n,"globalName",y),b=r(t,n,"mangleProps",he),w=r(t,n,"reserveProps",he),T=r(t,n,"mangleQuoted",I),j=r(t,n,"minify",I),U=r(t,n,"minifySyntax",I),L=r(t,n,"minifyWhitespace",I),Q=r(t,n,"minifyIdentifiers",I),B=r(t,n,"lineLimit",se),z=r(t,n,"drop",W),J=r(t,n,"dropLabels",W),v=r(t,n,"charset",y),m=r(t,n,"treeShaking",I),d=r(t,n,"ignoreAnnotations",I),o=r(t,n,"jsx",y),x=r(t,n,"jsxFactory",y),$=r(t,n,"jsxFragment",y),M=r(t,n,"jsxImportSource",y),D=r(t,n,"jsxDev",I),a=r(t,n,"jsxSideEffects",I),c=r(t,n,"define",ee),h=r(t,n,"logOverride",ee),E=r(t,n,"supported",ee),C=r(t,n,"pure",W),S=r(t,n,"keepNames",I),O=r(t,n,"platform",y),A=r(t,n,"tsconfigRaw",Ze);if(i&&e.push(`--legal-comments=${i}`),u!==void 0&&e.push(`--source-root=${u}`),l!==void 0&&e.push(`--sources-content=${l}`),f&&(Array.isArray(f)?e.push(`--target=${Array.from(f).map(Ce).join(",")}`):e.push(`--target=${Ce(f)}`)),s&&e.push(`--format=${s}`),p&&e.push(`--global-name=${p}`),O&&e.push(`--platform=${O}`),A&&e.push(`--tsconfig-raw=${typeof A=="string"?A:JSON.stringify(A)}`),j&&e.push("--minify"),U&&e.push("--minify-syntax"),L&&e.push("--minify-whitespace"),Q&&e.push("--minify-identifiers"),B&&e.push(`--line-limit=${B}`),v&&e.push(`--charset=${v}`),m!==void 0&&e.push(`--tree-shaking=${m}`),d&&e.push("--ignore-annotations"),z)for(let R of z)e.push(`--drop:${V(R,"drop")}`);if(J&&e.push(`--drop-labels=${Array.from(J).map(R=>V(R,"dropLabels")).join(",")}`),b&&e.push(`--mangle-props=${b.source}`),w&&e.push(`--reserve-props=${w.source}`),T!==void 0&&e.push(`--mangle-quoted=${T}`),o&&e.push(`--jsx=${o}`),x&&e.push(`--jsx-factory=${x}`),$&&e.push(`--jsx-fragment=${$}`),M&&e.push(`--jsx-import-source=${M}`),D&&e.push("--jsx-dev"),a&&e.push("--jsx-side-effects"),c)for(let R in c){if(R.indexOf("=")>=0)throw new Error(`Invalid define: ${R}`);e.push(`--define:${R}=${V(c[R],"define",R)}`)}if(h)for(let R in h){if(R.indexOf("=")>=0)throw new Error(`Invalid log override: ${R}`);e.push(`--log-override:${R}=${V(h[R],"log override",R)}`)}if(E)for(let R in E){if(R.indexOf("=")>=0)throw new Error(`Invalid supported: ${R}`);let k=E[R];if(typeof k!="boolean")throw new Error(`Expected value for supported ${Y(R)} to be a boolean, got ${typeof k} instead`);e.push(`--supported:${R}=${k}`)}if(C)for(let R of C)e.push(`--pure:${V(R,"pure")}`);S&&e.push("--keep-names")}function nt(e,t,n,i,u){var oe;let l=[],f=[],s=Object.create(null),p=null,b=null;be(l,t,s,n,i),je(l,t,s);let w=r(t,s,"sourcemap",Fe),T=r(t,s,"bundle",I),j=r(t,s,"splitting",I),U=r(t,s,"preserveSymlinks",I),L=r(t,s,"metafile",I),Q=r(t,s,"outfile",y),B=r(t,s,"outdir",y),z=r(t,s,"outbase",y),J=r(t,s,"tsconfig",y),v=r(t,s,"resolveExtensions",W),m=r(t,s,"nodePaths",W),d=r(t,s,"mainFields",W),o=r(t,s,"conditions",W),x=r(t,s,"external",W),$=r(t,s,"packages",y),M=r(t,s,"alias",ee),D=r(t,s,"loader",ee),a=r(t,s,"outExtension",ee),c=r(t,s,"publicPath",y),h=r(t,s,"entryNames",y),E=r(t,s,"chunkNames",y),C=r(t,s,"assetNames",y),S=r(t,s,"inject",W),O=r(t,s,"banner",ee),A=r(t,s,"footer",ee),R=r(t,s,"entryPoints",Ge),k=r(t,s,"absWorkingDir",y),F=r(t,s,"stdin",ee),P=(oe=r(t,s,"write",I))!=null?oe:u,q=r(t,s,"allowOverwrite",I),_=r(t,s,"mangleCache",ee);if(s.plugins=!0,K(t,s,`in ${e}() call`),w&&l.push(`--sourcemap${w===!0?"":`=${w}`}`),T&&l.push("--bundle"),q&&l.push("--allow-overwrite"),j&&l.push("--splitting"),U&&l.push("--preserve-symlinks"),L&&l.push("--metafile"),Q&&l.push(`--outfile=${Q}`),B&&l.push(`--outdir=${B}`),z&&l.push(`--outbase=${z}`),J&&l.push(`--tsconfig=${J}`),$&&l.push(`--packages=${$}`),v){let g=[];for(let N of v){if(V(N,"resolve extension"),N.indexOf(",")>=0)throw new Error(`Invalid resolve extension: ${N}`);g.push(N)}l.push(`--resolve-extensions=${g.join(",")}`)}if(c&&l.push(`--public-path=${c}`),h&&l.push(`--entry-names=${h}`),E&&l.push(`--chunk-names=${E}`),C&&l.push(`--asset-names=${C}`),d){let g=[];for(let N of d){if(V(N,"main field"),N.indexOf(",")>=0)throw new Error(`Invalid main field: ${N}`);g.push(N)}l.push(`--main-fields=${g.join(",")}`)}if(o){let g=[];for(let N of o){if(V(N,"condition"),N.indexOf(",")>=0)throw new Error(`Invalid condition: ${N}`);g.push(N)}l.push(`--conditions=${g.join(",")}`)}if(x)for(let g of x)l.push(`--external:${V(g,"external")}`);if(M)for(let g in M){if(g.indexOf("=")>=0)throw new Error(`Invalid package name in alias: ${g}`);l.push(`--alias:${g}=${V(M[g],"alias",g)}`)}if(O)for(let g in O){if(g.indexOf("=")>=0)throw new Error(`Invalid banner file type: ${g}`);l.push(`--banner:${g}=${V(O[g],"banner",g)}`)}if(A)for(let g in A){if(g.indexOf("=")>=0)throw new Error(`Invalid footer file type: ${g}`);l.push(`--footer:${g}=${V(A[g],"footer",g)}`)}if(S)for(let g of S)l.push(`--inject:${V(g,"inject")}`);if(D)for(let g in D){if(g.indexOf("=")>=0)throw new Error(`Invalid loader extension: ${g}`);l.push(`--loader:${g}=${V(D[g],"loader",g)}`)}if(a)for(let g in a){if(g.indexOf("=")>=0)throw new Error(`Invalid out extension: ${g}`);l.push(`--out-extension:${g}=${V(a[g],"out extension",g)}`)}if(R)if(Array.isArray(R))for(let g=0,N=R.length;g{let m=s+v.length;if(m>f.length){let o=new Uint8Array(m*2);o.set(f),f=o}f.set(v,s),s+=v.length;let d=0;for(;d+4<=s;){let o=Ee(f,d);if(d+4+o>s)break;d+=4,L(f.subarray(d,d+o)),d+=o}d>0&&(f.copyWithin(0,d,s),s-=d)},b=v=>{n.didClose=!0,v&&(n.reason=": "+(v.message||v));let m="The service was stopped"+n.reason;for(let d in i)i[d](m,null);i={}},w=(v,m,d)=>{if(n.didClose)return d("The service is no longer running"+n.reason,null);let o=u++;i[o]=(x,$)=>{try{d(x,$)}finally{v&&v.unref()}},v&&v.ref(),e.writeToStdin(Oe({id:o,isRequest:!0,value:m}))},T=(v,m)=>{if(n.didClose)throw new Error("The service is no longer running"+n.reason);e.writeToStdin(Oe({id:v,isRequest:!1,value:m}))},j=(v,m)=>ne(this,null,function*(){try{if(m.command==="ping"){T(v,{});return}if(typeof m.key=="number"){let d=t[m.key];if(d){let o=d[m.command];if(o){yield o(v,m);return}}}throw new Error("Invalid command: "+m.command)}catch(d){let o=[le(d,e,null,void 0,"")];try{T(v,{errors:o})}catch(x){}}}),U=!0,L=v=>{if(U){U=!1;let d=String.fromCharCode(...v);if(d!=="0.19.0")throw new Error(`Cannot start service: Host version "0.19.0" does not match binary version ${Y(d)}`);return}let m=ke(v);if(m.isRequest)j(m.id,m.value);else{let d=i[m.id];delete i[m.id],m.value.error?d(m.value.error,{}):d(null,m.value)}};return{readFromStdout:p,afterClose:b,service:{buildOrContext:({callName:v,refs:m,options:d,isTTY:o,defaultWD:x,callback:$})=>{let M=0,D=l++,a={},c={ref(){++M===1&&m&&m.ref()},unref(){--M===0&&(delete t[D],m&&m.unref())}};t[D]=a,c.ref(),it(v,D,w,T,c,e,a,d,o,x,(h,E)=>{try{$(h,E)}finally{c.unref()}})},transform:({callName:v,refs:m,input:d,options:o,isTTY:x,fs:$,callback:M})=>{let D=qe(),a=c=>{try{if(typeof d!="string"&&!(d instanceof Uint8Array))throw new Error('The input to "transform" must be a string or a Uint8Array');let{flags:h,mangleCache:E}=rt(v,o,x,Me),C={command:"transform",flags:h,inputFS:c!==null,input:c!==null?Z(c):typeof d=="string"?Z(d):d};E&&(C.mangleCache=E),w(m,C,(S,O)=>{if(S)return M(new Error(S),null);let A=ae(O.errors,D),R=ae(O.warnings,D),k=1,F=()=>{if(--k===0){let P={warnings:R,code:O.code,map:O.map,mangleCache:void 0,legalComments:void 0};"legalComments"in O&&(P.legalComments=O==null?void 0:O.legalComments),O.mangleCache&&(P.mangleCache=O==null?void 0:O.mangleCache),M(null,P)}};if(A.length>0)return M(fe("Transform failed",A,R),null);O.codeFS&&(k++,$.readFile(O.code,(P,q)=>{P!==null?M(P,null):(O.code=q,F())})),O.mapFS&&(k++,$.readFile(O.map,(P,q)=>{P!==null?M(P,null):(O.map=q,F())})),F()})}catch(h){let E=[];try{be(E,o,{},x,Me)}catch(S){}let C=le(h,e,D,void 0,"");w(m,{command:"error",flags:E,error:C},()=>{C.detail=D.load(C.detail),M(fe("Transform failed",[C],[]),null)})}};if((typeof d=="string"||d instanceof Uint8Array)&&d.length>1024*1024){let c=a;a=()=>$.writeFile(d,c)}a(null)},formatMessages:({callName:v,refs:m,messages:d,options:o,callback:x})=>{let $=re(d,"messages",null,"");if(!o)throw new Error(`Missing second argument in ${v}() call`);let M={},D=r(o,M,"kind",y),a=r(o,M,"color",I),c=r(o,M,"terminalWidth",se);if(K(o,M,`in ${v}() call`),D===void 0)throw new Error(`Missing "kind" in ${v}() call`);if(D!=="error"&&D!=="warning")throw new Error(`Expected "kind" to be "error" or "warning" in ${v}() call`);let h={command:"format-msgs",messages:$,isWarning:D==="warning"};a!==void 0&&(h.color=a),c!==void 0&&(h.terminalWidth=c),w(m,h,(E,C)=>{if(E)return x(new Error(E),null);x(null,C.messages)})},analyzeMetafile:({callName:v,refs:m,metafile:d,options:o,callback:x})=>{o===void 0&&(o={});let $={},M=r(o,$,"color",I),D=r(o,$,"verbose",I);K(o,$,`in ${v}() call`);let a={command:"analyze-metafile",metafile:d};M!==void 0&&(a.color=M),D!==void 0&&(a.verbose=D),w(m,a,(c,h)=>{if(c)return x(new Error(c),null);x(null,h.result)})}}}}function it(e,t,n,i,u,l,f,s,p,b,w){let T=qe(),j=e==="context",U=(B,z)=>{let J=[];try{be(J,s,{},p,$e)}catch(m){}let v=le(B,l,T,void 0,z);n(u,{command:"error",flags:J,error:v},()=>{v.detail=T.load(v.detail),w(fe(j?"Context failed":"Build failed",[v],[]),null)})},L;if(typeof s=="object"){let B=s.plugins;if(B!==void 0){if(!Array.isArray(B))return U(new Error('"plugins" must be an array'),"");L=B}}if(L&&L.length>0){if(l.isSync)return U(new Error("Cannot use plugins in synchronous API calls"),"");lt(t,n,i,u,l,f,s,L,T).then(B=>{if(!B.ok)return U(B.error,B.pluginName);try{Q(B.requestPlugins,B.runOnEndCallbacks,B.scheduleOnDisposeCallbacks)}catch(z){U(z,"")}},B=>U(B,""));return}try{Q(null,(B,z)=>z([],[]),()=>{})}catch(B){U(B,"")}function Q(B,z,J){let v=l.hasFS,{entries:m,flags:d,write:o,stdinContents:x,stdinResolveDir:$,absWorkingDir:M,nodePaths:D,mangleCache:a}=nt(e,s,p,$e,v);if(o&&!l.hasFS)throw new Error('The "write" option is unavailable in this environment');let c={command:"build",key:t,entries:m,flags:d,write:o,stdinContents:x,stdinResolveDir:$,absWorkingDir:M||b,nodePaths:D,context:j};B&&(c.plugins=B),a&&(c.mangleCache=a);let h=(S,O)=>{let A={errors:ae(S.errors,T),warnings:ae(S.warnings,T),outputFiles:void 0,metafile:void 0,mangleCache:void 0},R=A.errors.slice(),k=A.warnings.slice();S.outputFiles&&(A.outputFiles=S.outputFiles.map(st)),S.metafile&&(A.metafile=JSON.parse(S.metafile)),S.mangleCache&&(A.mangleCache=S.mangleCache),S.writeToStdout!==void 0&&console.log(ie(S.writeToStdout).replace(/\n$/,"")),z(A,(F,P)=>{if(R.length>0||F.length>0){let q=fe("Build failed",R.concat(F),k.concat(P));return O(q,null,F,P)}O(null,A,F,P)})},E,C;j&&(f["on-end"]=(S,O)=>new Promise(A=>{h(O,(R,k,F,P)=>{let q={errors:F,warnings:P};C&&C(R,k),E=void 0,C=void 0,i(S,q),A()})})),n(u,c,(S,O)=>{if(S)return w(new Error(S),null);if(!j)return h(O,(k,F)=>(J(),w(k,F)));if(O.errors.length>0)return w(fe("Context failed",O.errors,O.warnings),null);let A=!1,R={rebuild:()=>(E||(E=new Promise((k,F)=>{let P;C=(_,H)=>{P||(P=()=>_?F(_):k(H))};let q=()=>{n(u,{command:"rebuild",key:t},(H,oe)=>{H?F(new Error(H)):P?P():q()})};q()})),E),watch:(k={})=>new Promise((F,P)=>{if(!l.hasFS)throw new Error('Cannot use the "watch" API in this environment');K(k,{},"in watch() call"),n(u,{command:"watch",key:t},H=>{H?P(new Error(H)):F(void 0)})}),serve:(k={})=>new Promise((F,P)=>{if(!l.hasFS)throw new Error('Cannot use the "serve" API in this environment');let q={},_=r(k,q,"port",se),H=r(k,q,"host",y),oe=r(k,q,"servedir",y),g=r(k,q,"keyfile",y),N=r(k,q,"certfile",y),X=r(k,q,"fallback",y),te=r(k,q,"onRequest",Pe);K(k,q,"in serve() call");let G={command:"serve",key:t,onRequest:!!te};_!==void 0&&(G.port=_),H!==void 0&&(G.host=H),oe!==void 0&&(G.servedir=oe),g!==void 0&&(G.keyfile=g),N!==void 0&&(G.certfile=N),X!==void 0&&(G.fallback=X),n(u,G,(ce,Ie)=>{if(ce)return P(new Error(ce));te&&(f["serve-request"]=(We,ze)=>{te(ze.args),i(We,{})}),F(Ie)})}),cancel:()=>new Promise(k=>{if(A)return k();n(u,{command:"cancel",key:t},()=>{k()})}),dispose:()=>new Promise(k=>{if(A)return k();A=!0,n(u,{command:"dispose",key:t},()=>{k(),J(),u.unref()})})};u.ref(),w(null,R)})}}var lt=(e,t,n,i,u,l,f,s,p)=>ne(void 0,null,function*(){let b=[],w=[],T={},j={},U=[],L=0,Q=0,B=[],z=!1;s=[...s];for(let m of s){let d={};if(typeof m!="object")throw new Error(`Plugin at index ${Q} must be an object`);let o=r(m,d,"name",y);if(typeof o!="string"||o==="")throw new Error(`Plugin at index ${Q} is missing a name`);try{let x=r(m,d,"setup",Pe);if(typeof x!="function")throw new Error("Plugin is missing a setup function");K(m,d,`on plugin ${Y(o)}`);let $={name:o,onStart:!1,onEnd:!1,onResolve:[],onLoad:[]};Q++;let D=x({initialOptions:f,resolve:(a,c={})=>{if(!z)throw new Error('Cannot call "resolve" before plugin setup has completed');if(typeof a!="string")throw new Error("The path to resolve must be a string");let h=Object.create(null),E=r(c,h,"pluginName",y),C=r(c,h,"importer",y),S=r(c,h,"namespace",y),O=r(c,h,"resolveDir",y),A=r(c,h,"kind",y),R=r(c,h,"pluginData",ye);return K(c,h,"in resolve() call"),new Promise((k,F)=>{let P={command:"resolve",path:a,key:e,pluginName:o};if(E!=null&&(P.pluginName=E),C!=null&&(P.importer=C),S!=null&&(P.namespace=S),O!=null&&(P.resolveDir=O),A!=null)P.kind=A;else throw new Error('Must specify "kind" when calling "resolve"');R!=null&&(P.pluginData=p.store(R)),t(i,P,(q,_)=>{q!==null?F(new Error(q)):k({errors:ae(_.errors,p),warnings:ae(_.warnings,p),path:_.path,external:_.external,sideEffects:_.sideEffects,namespace:_.namespace,suffix:_.suffix,pluginData:p.load(_.pluginData)})})})},onStart(a){let c='This error came from the "onStart" callback registered here:',h=ge(new Error(c),u,"onStart");b.push({name:o,callback:a,note:h}),$.onStart=!0},onEnd(a){let c='This error came from the "onEnd" callback registered here:',h=ge(new Error(c),u,"onEnd");w.push({name:o,callback:a,note:h}),$.onEnd=!0},onResolve(a,c){let h='This error came from the "onResolve" callback registered here:',E=ge(new Error(h),u,"onResolve"),C={},S=r(a,C,"filter",he),O=r(a,C,"namespace",y);if(K(a,C,`in onResolve() call for plugin ${Y(o)}`),S==null)throw new Error("onResolve() call is missing a filter");let A=L++;T[A]={name:o,callback:c,note:E},$.onResolve.push({id:A,filter:S.source,namespace:O||""})},onLoad(a,c){let h='This error came from the "onLoad" callback registered here:',E=ge(new Error(h),u,"onLoad"),C={},S=r(a,C,"filter",he),O=r(a,C,"namespace",y);if(K(a,C,`in onLoad() call for plugin ${Y(o)}`),S==null)throw new Error("onLoad() call is missing a filter");let A=L++;j[A]={name:o,callback:c,note:E},$.onLoad.push({id:A,filter:S.source,namespace:O||""})},onDispose(a){U.push(a)},esbuild:u.esbuild});D&&(yield D),B.push($)}catch(x){return{ok:!1,error:x,pluginName:o}}}l["on-start"]=(m,d)=>ne(void 0,null,function*(){let o={errors:[],warnings:[]};yield Promise.all(b.map(D=>ne(void 0,[D],function*({name:x,callback:$,note:M}){try{let a=yield $();if(a!=null){if(typeof a!="object")throw new Error(`Expected onStart() callback in plugin ${Y(x)} to return an object`);let c={},h=r(a,c,"errors",W),E=r(a,c,"warnings",W);K(a,c,`from onStart() callback in plugin ${Y(x)}`),h!=null&&o.errors.push(...re(h,"errors",p,x)),E!=null&&o.warnings.push(...re(E,"warnings",p,x))}}catch(a){o.errors.push(le(a,u,p,M&&M(),x))}}))),n(m,o)}),l["on-resolve"]=(m,d)=>ne(void 0,null,function*(){let o={},x="",$,M;for(let D of d.ids)try{({name:x,callback:$,note:M}=T[D]);let a=yield $({path:d.path,importer:d.importer,namespace:d.namespace,resolveDir:d.resolveDir,kind:d.kind,pluginData:p.load(d.pluginData)});if(a!=null){if(typeof a!="object")throw new Error(`Expected onResolve() callback in plugin ${Y(x)} to return an object`);let c={},h=r(a,c,"pluginName",y),E=r(a,c,"path",y),C=r(a,c,"namespace",y),S=r(a,c,"suffix",y),O=r(a,c,"external",I),A=r(a,c,"sideEffects",I),R=r(a,c,"pluginData",ye),k=r(a,c,"errors",W),F=r(a,c,"warnings",W),P=r(a,c,"watchFiles",W),q=r(a,c,"watchDirs",W);K(a,c,`from onResolve() callback in plugin ${Y(x)}`),o.id=D,h!=null&&(o.pluginName=h),E!=null&&(o.path=E),C!=null&&(o.namespace=C),S!=null&&(o.suffix=S),O!=null&&(o.external=O),A!=null&&(o.sideEffects=A),R!=null&&(o.pluginData=p.store(R)),k!=null&&(o.errors=re(k,"errors",p,x)),F!=null&&(o.warnings=re(F,"warnings",p,x)),P!=null&&(o.watchFiles=me(P,"watchFiles")),q!=null&&(o.watchDirs=me(q,"watchDirs"));break}}catch(a){o={id:D,errors:[le(a,u,p,M&&M(),x)]};break}n(m,o)}),l["on-load"]=(m,d)=>ne(void 0,null,function*(){let o={},x="",$,M;for(let D of d.ids)try{({name:x,callback:$,note:M}=j[D]);let a=yield $({path:d.path,namespace:d.namespace,suffix:d.suffix,pluginData:p.load(d.pluginData)});if(a!=null){if(typeof a!="object")throw new Error(`Expected onLoad() callback in plugin ${Y(x)} to return an object`);let c={},h=r(a,c,"pluginName",y),E=r(a,c,"contents",Te),C=r(a,c,"resolveDir",y),S=r(a,c,"pluginData",ye),O=r(a,c,"loader",y),A=r(a,c,"errors",W),R=r(a,c,"warnings",W),k=r(a,c,"watchFiles",W),F=r(a,c,"watchDirs",W);K(a,c,`from onLoad() callback in plugin ${Y(x)}`),o.id=D,h!=null&&(o.pluginName=h),E instanceof Uint8Array?o.contents=E:E!=null&&(o.contents=Z(E)),C!=null&&(o.resolveDir=C),S!=null&&(o.pluginData=p.store(S)),O!=null&&(o.loader=O),A!=null&&(o.errors=re(A,"errors",p,x)),R!=null&&(o.warnings=re(R,"warnings",p,x)),k!=null&&(o.watchFiles=me(k,"watchFiles")),F!=null&&(o.watchDirs=me(F,"watchDirs"));break}}catch(a){o={id:D,errors:[le(a,u,p,M&&M(),x)]};break}n(m,o)});let J=(m,d)=>d([],[]);w.length>0&&(J=(m,d)=>{ne(void 0,null,function*(){let o=[],x=[];for(let{name:$,callback:M,note:D}of w){let a,c;try{let h=yield M(m);if(h!=null){if(typeof h!="object")throw new Error(`Expected onEnd() callback in plugin ${Y($)} to return an object`);let E={},C=r(h,E,"errors",W),S=r(h,E,"warnings",W);K(h,E,`from onEnd() callback in plugin ${Y($)}`),C!=null&&(a=re(C,"errors",p,$)),S!=null&&(c=re(S,"warnings",p,$))}}catch(h){a=[le(h,u,p,D&&D(),$)]}if(a){o.push(...a);try{m.errors.push(...a)}catch(h){}}if(c){x.push(...c);try{m.warnings.push(...c)}catch(h){}}}d(o,x)})});let v=()=>{for(let m of U)setTimeout(()=>m(),0)};return z=!0,{ok:!0,requestPlugins:B,runOnEndCallbacks:J,scheduleOnDisposeCallbacks:v}});function qe(){let e=new Map,t=0;return{load(n){return e.get(n)},store(n){if(n===void 0)return-1;let i=t++;return e.set(i,n),i}}}function ge(e,t,n){let i,u=!1;return()=>{if(u)return i;u=!0;try{let l=(e.stack+"").split(` -`);l.splice(1,1);let f=Ne(t,l,n);if(f)return i={text:e.message,location:f},i}catch(l){}}}function le(e,t,n,i,u){let l="Internal error",f=null;try{l=(e&&e.message||e)+""}catch(s){}try{f=Ne(t,(e.stack+"").split(` -`),"")}catch(s){}return{id:"",pluginName:u,text:l,location:f,notes:i?[i]:[],detail:n?n.store(e):-1}}function Ne(e,t,n){let i=" at ";if(e.readFileSync&&!t[0].startsWith(i)&&t[1].startsWith(i))for(let u=1;u{if(s===i)return` -...`;if(!f.location)return` -error: ${f.text}`;let{file:p,line:b,column:w}=f.location,T=f.pluginName?`[plugin: ${f.pluginName}] `:"";return` -${p}:${b}:${w}: ERROR: ${T}${f.text}`}).join(""),l=new Error(`${e}${u}`);return l.errors=t,l.warnings=n,l}function ae(e,t){for(let n of e)n.detail=t.load(n.detail);return e}function De(e,t){if(e==null)return null;let n={},i=r(e,n,"file",y),u=r(e,n,"namespace",y),l=r(e,n,"line",se),f=r(e,n,"column",se),s=r(e,n,"length",se),p=r(e,n,"lineText",y),b=r(e,n,"suggestion",y);return K(e,n,t),{file:i||"",namespace:u||"",line:l||0,column:f||0,length:s||0,lineText:p||"",suggestion:b||""}}function re(e,t,n,i){let u=[],l=0;for(let f of e){let s={},p=r(f,s,"id",y),b=r(f,s,"pluginName",y),w=r(f,s,"text",y),T=r(f,s,"location",Ae),j=r(f,s,"notes",W),U=r(f,s,"detail",ye),L=`in element ${l} of "${t}"`;K(f,s,L);let Q=[];if(j)for(let B of j){let z={},J=r(B,z,"text",y),v=r(B,z,"location",Ae);K(B,z,L),Q.push({text:J||"",location:De(v,L)})}u.push({id:p||"",pluginName:b||i,text:w||"",location:De(T,L),notes:Q,detail:n?n.store(U):-1}),l++}return u}function me(e,t){let n=[];for(let i of e){if(typeof i!="string")throw new Error(`${Y(t)} must be an array of strings`);n.push(i)}return n}function st({path:e,contents:t,hash:n}){let i=null;return{path:e,contents:t,hash:n,get text(){let u=this.contents;return(i===null||u!==t)&&(t=u,i=ie(u)),i}}}var at="0.19.0",ut=e=>de().build(e),ct=e=>de().context(e),ft=(e,t)=>de().transform(e,t),dt=(e,t)=>de().formatMessages(e,t),pt=(e,t)=>de().analyzeMetafile(e,t),gt=()=>{throw new Error('The "buildSync" API only works in node')},mt=()=>{throw new Error('The "transformSync" API only works in node')},yt=()=>{throw new Error('The "formatMessagesSync" API only works in node')},ht=()=>{throw new Error('The "analyzeMetafileSync" API only works in node')},ue,Se,de=()=>{if(Se)return Se;throw ue?new Error('You need to wait for the promise returned from "initialize" to be resolved before calling this'):new Error('You need to call "initialize" before calling this')},bt=e=>{e=Be(e||{});let t=e.wasmURL,n=e.wasmModule,i=e.worker!==!1;if(!t&&!n)throw new Error('Must provide either the "wasmURL" option or the "wasmModule" option');if(ue)throw new Error('Cannot call "initialize" more than once');return ue=wt(t||"",n,i),ue.catch(()=>{ue=void 0}),ue},wt=(e,t,n)=>ne(void 0,null,function*(){let i;if(n){let b=new Blob(['onmessage=(postMessage=>{\n// Copyright 2018 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\nvar y=(r,f,m)=>new Promise((c,n)=>{var s=h=>{try{l(m.next(h))}catch(u){n(u)}},i=h=>{try{l(m.throw(h))}catch(u){n(u)}},l=h=>h.done?c(h.value):Promise.resolve(h.value).then(s,i);l((m=m.apply(r,f)).next())});let onmessage,globalThis={};for(let r=self;r;r=Object.getPrototypeOf(r))for(let f of Object.getOwnPropertyNames(r))f in globalThis||Object.defineProperty(globalThis,f,{get:()=>self[f]});(()=>{const r=()=>{const c=new Error("not implemented");return c.code="ENOSYS",c};if(!globalThis.fs){let c="";globalThis.fs={constants:{O_WRONLY:-1,O_RDWR:-1,O_CREAT:-1,O_TRUNC:-1,O_APPEND:-1,O_EXCL:-1},writeSync(n,s){c+=m.decode(s);const i=c.lastIndexOf(`\n`);return i!=-1&&(console.log(c.substring(0,i)),c=c.substring(i+1)),s.length},write(n,s,i,l,h,u){if(i!==0||l!==s.length||h!==null){u(r());return}const g=this.writeSync(n,s);u(null,g)},chmod(n,s,i){i(r())},chown(n,s,i,l){l(r())},close(n,s){s(r())},fchmod(n,s,i){i(r())},fchown(n,s,i,l){l(r())},fstat(n,s){s(r())},fsync(n,s){s(null)},ftruncate(n,s,i){i(r())},lchown(n,s,i,l){l(r())},link(n,s,i){i(r())},lstat(n,s){s(r())},mkdir(n,s,i){i(r())},open(n,s,i,l){l(r())},read(n,s,i,l,h,u){u(r())},readdir(n,s){s(r())},readlink(n,s){s(r())},rename(n,s,i){i(r())},rmdir(n,s){s(r())},stat(n,s){s(r())},symlink(n,s,i){i(r())},truncate(n,s,i){i(r())},unlink(n,s){s(r())},utimes(n,s,i,l){l(r())}}}if(globalThis.process||(globalThis.process={getuid(){return-1},getgid(){return-1},geteuid(){return-1},getegid(){return-1},getgroups(){throw r()},pid:-1,ppid:-1,umask(){throw r()},cwd(){throw r()},chdir(){throw r()}}),!globalThis.crypto)throw new Error("globalThis.crypto is not available, polyfill required (crypto.getRandomValues only)");if(!globalThis.performance)throw new Error("globalThis.performance is not available, polyfill required (performance.now only)");if(!globalThis.TextEncoder)throw new Error("globalThis.TextEncoder is not available, polyfill required");if(!globalThis.TextDecoder)throw new Error("globalThis.TextDecoder is not available, polyfill required");const f=new TextEncoder("utf-8"),m=new TextDecoder("utf-8");globalThis.Go=class{constructor(){this.argv=["js"],this.env={},this.exit=e=>{e!==0&&console.warn("exit code:",e)},this._exitPromise=new Promise(e=>{this._resolveExitPromise=e}),this._pendingEvent=null,this._scheduledTimeouts=new Map,this._nextCallbackTimeoutID=1;const c=(e,t)=>{this.mem.setUint32(e+0,t,!0),this.mem.setUint32(e+4,Math.floor(t/4294967296),!0)},n=e=>{const t=this.mem.getUint32(e+0,!0),o=this.mem.getInt32(e+4,!0);return t+o*4294967296},s=e=>{const t=this.mem.getFloat64(e,!0);if(t===0)return;if(!isNaN(t))return t;const o=this.mem.getUint32(e,!0);return this._values[o]},i=(e,t)=>{if(typeof t=="number"&&t!==0){if(isNaN(t)){this.mem.setUint32(e+4,2146959360,!0),this.mem.setUint32(e,0,!0);return}this.mem.setFloat64(e,t,!0);return}if(t===void 0){this.mem.setFloat64(e,0,!0);return}let a=this._ids.get(t);a===void 0&&(a=this._idPool.pop(),a===void 0&&(a=this._values.length),this._values[a]=t,this._goRefCounts[a]=0,this._ids.set(t,a)),this._goRefCounts[a]++;let d=0;switch(typeof t){case"object":t!==null&&(d=1);break;case"string":d=2;break;case"symbol":d=3;break;case"function":d=4;break}this.mem.setUint32(e+4,2146959360|d,!0),this.mem.setUint32(e,a,!0)},l=e=>{const t=n(e+0),o=n(e+8);return new Uint8Array(this._inst.exports.mem.buffer,t,o)},h=e=>{const t=n(e+0),o=n(e+8),a=new Array(o);for(let d=0;d{const t=n(e+0),o=n(e+8);return m.decode(new DataView(this._inst.exports.mem.buffer,t,o))},g=Date.now()-performance.now();this.importObject={go:{"runtime.wasmExit":e=>{e>>>=0;const t=this.mem.getInt32(e+8,!0);this.exited=!0,delete this._inst,delete this._values,delete this._goRefCounts,delete this._ids,delete this._idPool,this.exit(t)},"runtime.wasmWrite":e=>{e>>>=0;const t=n(e+8),o=n(e+16),a=this.mem.getInt32(e+24,!0);globalThis.fs.writeSync(t,new Uint8Array(this._inst.exports.mem.buffer,o,a))},"runtime.resetMemoryDataView":e=>{e>>>=0,this.mem=new DataView(this._inst.exports.mem.buffer)},"runtime.nanotime1":e=>{e>>>=0,c(e+8,(g+performance.now())*1e6)},"runtime.walltime":e=>{e>>>=0;const t=new Date().getTime();c(e+8,t/1e3),this.mem.setInt32(e+16,t%1e3*1e6,!0)},"runtime.scheduleTimeoutEvent":e=>{e>>>=0;const t=this._nextCallbackTimeoutID;this._nextCallbackTimeoutID++,this._scheduledTimeouts.set(t,setTimeout(()=>{for(this._resume();this._scheduledTimeouts.has(t);)console.warn("scheduleTimeoutEvent: missed timeout event"),this._resume()},n(e+8)+1)),this.mem.setInt32(e+16,t,!0)},"runtime.clearTimeoutEvent":e=>{e>>>=0;const t=this.mem.getInt32(e+8,!0);clearTimeout(this._scheduledTimeouts.get(t)),this._scheduledTimeouts.delete(t)},"runtime.getRandomData":e=>{e>>>=0,crypto.getRandomValues(l(e+8))},"syscall/js.finalizeRef":e=>{e>>>=0;const t=this.mem.getUint32(e+8,!0);if(this._goRefCounts[t]--,this._goRefCounts[t]===0){const o=this._values[t];this._values[t]=null,this._ids.delete(o),this._idPool.push(t)}},"syscall/js.stringVal":e=>{e>>>=0,i(e+24,u(e+8))},"syscall/js.valueGet":e=>{e>>>=0;const t=Reflect.get(s(e+8),u(e+16));e=this._inst.exports.getsp()>>>0,i(e+32,t)},"syscall/js.valueSet":e=>{e>>>=0,Reflect.set(s(e+8),u(e+16),s(e+32))},"syscall/js.valueDelete":e=>{e>>>=0,Reflect.deleteProperty(s(e+8),u(e+16))},"syscall/js.valueIndex":e=>{e>>>=0,i(e+24,Reflect.get(s(e+8),n(e+16)))},"syscall/js.valueSetIndex":e=>{e>>>=0,Reflect.set(s(e+8),n(e+16),s(e+24))},"syscall/js.valueCall":e=>{e>>>=0;try{const t=s(e+8),o=Reflect.get(t,u(e+16)),a=h(e+32),d=Reflect.apply(o,t,a);e=this._inst.exports.getsp()>>>0,i(e+56,d),this.mem.setUint8(e+64,1)}catch(t){e=this._inst.exports.getsp()>>>0,i(e+56,t),this.mem.setUint8(e+64,0)}},"syscall/js.valueInvoke":e=>{e>>>=0;try{const t=s(e+8),o=h(e+16),a=Reflect.apply(t,void 0,o);e=this._inst.exports.getsp()>>>0,i(e+40,a),this.mem.setUint8(e+48,1)}catch(t){e=this._inst.exports.getsp()>>>0,i(e+40,t),this.mem.setUint8(e+48,0)}},"syscall/js.valueNew":e=>{e>>>=0;try{const t=s(e+8),o=h(e+16),a=Reflect.construct(t,o);e=this._inst.exports.getsp()>>>0,i(e+40,a),this.mem.setUint8(e+48,1)}catch(t){e=this._inst.exports.getsp()>>>0,i(e+40,t),this.mem.setUint8(e+48,0)}},"syscall/js.valueLength":e=>{e>>>=0,c(e+16,parseInt(s(e+8).length))},"syscall/js.valuePrepareString":e=>{e>>>=0;const t=f.encode(String(s(e+8)));i(e+16,t),c(e+24,t.length)},"syscall/js.valueLoadString":e=>{e>>>=0;const t=s(e+8);l(e+16).set(t)},"syscall/js.valueInstanceOf":e=>{e>>>=0,this.mem.setUint8(e+24,s(e+8)instanceof s(e+16)?1:0)},"syscall/js.copyBytesToGo":e=>{e>>>=0;const t=l(e+8),o=s(e+32);if(!(o instanceof Uint8Array||o instanceof Uint8ClampedArray)){this.mem.setUint8(e+48,0);return}const a=o.subarray(0,t.length);t.set(a),c(e+40,a.length),this.mem.setUint8(e+48,1)},"syscall/js.copyBytesToJS":e=>{e>>>=0;const t=s(e+8),o=l(e+16);if(!(t instanceof Uint8Array||t instanceof Uint8ClampedArray)){this.mem.setUint8(e+48,0);return}const a=o.subarray(0,t.length);t.set(a),c(e+40,a.length),this.mem.setUint8(e+48,1)},debug:e=>{console.log(e)}}}}run(c){return y(this,null,function*(){if(!(c instanceof WebAssembly.Instance))throw new Error("Go.run: WebAssembly.Instance expected");this._inst=c,this.mem=new DataView(this._inst.exports.mem.buffer),this._values=[NaN,0,null,!0,!1,globalThis,this],this._goRefCounts=new Array(this._values.length).fill(1/0),this._ids=new Map([[0,1],[null,2],[!0,3],[!1,4],[globalThis,5],[this,6]]),this._idPool=[],this.exited=!1;let n=4096;const s=e=>{const t=n,o=f.encode(e+"\\0");return new Uint8Array(this.mem.buffer,n,o.length).set(o),n+=o.length,n%8!==0&&(n+=8-n%8),t},i=this.argv.length,l=[];this.argv.forEach(e=>{l.push(s(e))}),l.push(0),Object.keys(this.env).sort().forEach(e=>{l.push(s(`${e}=${this.env[e]}`))}),l.push(0);const u=n;l.forEach(e=>{this.mem.setUint32(n,e,!0),this.mem.setUint32(n+4,0,!0),n+=8});const g=4096+8192;if(n>=g)throw new Error("total length of command line and environment variables exceeds limit");this._inst.exports.run(i,u),this.exited&&this._resolveExitPromise(),yield this._exitPromise})}_resume(){if(this.exited)throw new Error("Go program has already exited");this._inst.exports.resume(),this.exited&&this._resolveExitPromise()}_makeFuncWrapper(c){const n=this;return function(){const s={id:c,this:this,args:arguments};return n._pendingEvent=s,n._resume(),s.result}}}})(),onmessage=({data:r})=>{let f=new TextDecoder,m=globalThis.fs,c="";m.writeSync=(h,u)=>{if(h===1)postMessage(u);else if(h===2){c+=f.decode(u);let g=c.split(`\n`);g.length>1&&console.log(g.slice(0,-1).join(`\n`)),c=g[g.length-1]}else throw new Error("Bad write");return u.length};let n=[],s,i=0;onmessage=({data:h})=>{h.length>0&&(n.push(h),s&&s())},m.read=(h,u,g,e,t,o)=>{if(h!==0||g!==0||e!==u.length||t!==null)throw new Error("Bad read");if(n.length===0){s=()=>m.read(h,u,g,e,t,o);return}let a=n[0],d=Math.max(0,Math.min(e,a.length-i));u.set(a.subarray(i,i+d),g),i+=d,i===a.length&&(n.shift(),i=0),o(null,d)};let l=new globalThis.Go;l.argv=["","--service=0.19.0"],tryToInstantiateModule(r,l).then(h=>{postMessage(null),l.run(h)},h=>{postMessage(h)})};function tryToInstantiateModule(r,f){return y(this,null,function*(){if(r instanceof WebAssembly.Module)return WebAssembly.instantiate(r,f.importObject);const m=yield fetch(r);if(!m.ok)throw new Error(`Failed to download ${JSON.stringify(r)}`);if("instantiateStreaming"in WebAssembly&&/^application\\/wasm($|;)/i.test(m.headers.get("Content-Type")||""))return(yield WebAssembly.instantiateStreaming(m,f.importObject)).instance;const c=yield m.arrayBuffer();return(yield WebAssembly.instantiate(c,f.importObject)).instance})}return r=>onmessage(r);})(postMessage)'],{type:"text/javascript"});i=new Worker(URL.createObjectURL(b))}else{let b=(postMessage=>{ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -var y=(r,f,m)=>new Promise((c,n)=>{var s=h=>{try{l(m.next(h))}catch(u){n(u)}},i=h=>{try{l(m.throw(h))}catch(u){n(u)}},l=h=>h.done?c(h.value):Promise.resolve(h.value).then(s,i);l((m=m.apply(r,f)).next())});let onmessage,globalThis={};for(let r=self;r;r=Object.getPrototypeOf(r))for(let f of Object.getOwnPropertyNames(r))f in globalThis||Object.defineProperty(globalThis,f,{get:()=>self[f]});(()=>{const r=()=>{const c=new Error("not implemented");return c.code="ENOSYS",c};if(!globalThis.fs){let c="";globalThis.fs={constants:{O_WRONLY:-1,O_RDWR:-1,O_CREAT:-1,O_TRUNC:-1,O_APPEND:-1,O_EXCL:-1},writeSync(n,s){c+=m.decode(s);const i=c.lastIndexOf(` -`);return i!=-1&&(console.log(c.substring(0,i)),c=c.substring(i+1)),s.length},write(n,s,i,l,h,u){if(i!==0||l!==s.length||h!==null){u(r());return}const g=this.writeSync(n,s);u(null,g)},chmod(n,s,i){i(r())},chown(n,s,i,l){l(r())},close(n,s){s(r())},fchmod(n,s,i){i(r())},fchown(n,s,i,l){l(r())},fstat(n,s){s(r())},fsync(n,s){s(null)},ftruncate(n,s,i){i(r())},lchown(n,s,i,l){l(r())},link(n,s,i){i(r())},lstat(n,s){s(r())},mkdir(n,s,i){i(r())},open(n,s,i,l){l(r())},read(n,s,i,l,h,u){u(r())},readdir(n,s){s(r())},readlink(n,s){s(r())},rename(n,s,i){i(r())},rmdir(n,s){s(r())},stat(n,s){s(r())},symlink(n,s,i){i(r())},truncate(n,s,i){i(r())},unlink(n,s){s(r())},utimes(n,s,i,l){l(r())}}}if(globalThis.process||(globalThis.process={getuid(){return-1},getgid(){return-1},geteuid(){return-1},getegid(){return-1},getgroups(){throw r()},pid:-1,ppid:-1,umask(){throw r()},cwd(){throw r()},chdir(){throw r()}}),!globalThis.crypto)throw new Error("globalThis.crypto is not available, polyfill required (crypto.getRandomValues only)");if(!globalThis.performance)throw new Error("globalThis.performance is not available, polyfill required (performance.now only)");if(!globalThis.TextEncoder)throw new Error("globalThis.TextEncoder is not available, polyfill required");if(!globalThis.TextDecoder)throw new Error("globalThis.TextDecoder is not available, polyfill required");const f=new TextEncoder("utf-8"),m=new TextDecoder("utf-8");globalThis.Go=class{constructor(){this.argv=["js"],this.env={},this.exit=e=>{e!==0&&console.warn("exit code:",e)},this._exitPromise=new Promise(e=>{this._resolveExitPromise=e}),this._pendingEvent=null,this._scheduledTimeouts=new Map,this._nextCallbackTimeoutID=1;const c=(e,t)=>{this.mem.setUint32(e+0,t,!0),this.mem.setUint32(e+4,Math.floor(t/4294967296),!0)},n=e=>{const t=this.mem.getUint32(e+0,!0),o=this.mem.getInt32(e+4,!0);return t+o*4294967296},s=e=>{const t=this.mem.getFloat64(e,!0);if(t===0)return;if(!isNaN(t))return t;const o=this.mem.getUint32(e,!0);return this._values[o]},i=(e,t)=>{if(typeof t=="number"&&t!==0){if(isNaN(t)){this.mem.setUint32(e+4,2146959360,!0),this.mem.setUint32(e,0,!0);return}this.mem.setFloat64(e,t,!0);return}if(t===void 0){this.mem.setFloat64(e,0,!0);return}let a=this._ids.get(t);a===void 0&&(a=this._idPool.pop(),a===void 0&&(a=this._values.length),this._values[a]=t,this._goRefCounts[a]=0,this._ids.set(t,a)),this._goRefCounts[a]++;let d=0;switch(typeof t){case"object":t!==null&&(d=1);break;case"string":d=2;break;case"symbol":d=3;break;case"function":d=4;break}this.mem.setUint32(e+4,2146959360|d,!0),this.mem.setUint32(e,a,!0)},l=e=>{const t=n(e+0),o=n(e+8);return new Uint8Array(this._inst.exports.mem.buffer,t,o)},h=e=>{const t=n(e+0),o=n(e+8),a=new Array(o);for(let d=0;d{const t=n(e+0),o=n(e+8);return m.decode(new DataView(this._inst.exports.mem.buffer,t,o))},g=Date.now()-performance.now();this.importObject={go:{"runtime.wasmExit":e=>{e>>>=0;const t=this.mem.getInt32(e+8,!0);this.exited=!0,delete this._inst,delete this._values,delete this._goRefCounts,delete this._ids,delete this._idPool,this.exit(t)},"runtime.wasmWrite":e=>{e>>>=0;const t=n(e+8),o=n(e+16),a=this.mem.getInt32(e+24,!0);globalThis.fs.writeSync(t,new Uint8Array(this._inst.exports.mem.buffer,o,a))},"runtime.resetMemoryDataView":e=>{e>>>=0,this.mem=new DataView(this._inst.exports.mem.buffer)},"runtime.nanotime1":e=>{e>>>=0,c(e+8,(g+performance.now())*1e6)},"runtime.walltime":e=>{e>>>=0;const t=new Date().getTime();c(e+8,t/1e3),this.mem.setInt32(e+16,t%1e3*1e6,!0)},"runtime.scheduleTimeoutEvent":e=>{e>>>=0;const t=this._nextCallbackTimeoutID;this._nextCallbackTimeoutID++,this._scheduledTimeouts.set(t,setTimeout(()=>{for(this._resume();this._scheduledTimeouts.has(t);)console.warn("scheduleTimeoutEvent: missed timeout event"),this._resume()},n(e+8)+1)),this.mem.setInt32(e+16,t,!0)},"runtime.clearTimeoutEvent":e=>{e>>>=0;const t=this.mem.getInt32(e+8,!0);clearTimeout(this._scheduledTimeouts.get(t)),this._scheduledTimeouts.delete(t)},"runtime.getRandomData":e=>{e>>>=0,crypto.getRandomValues(l(e+8))},"syscall/js.finalizeRef":e=>{e>>>=0;const t=this.mem.getUint32(e+8,!0);if(this._goRefCounts[t]--,this._goRefCounts[t]===0){const o=this._values[t];this._values[t]=null,this._ids.delete(o),this._idPool.push(t)}},"syscall/js.stringVal":e=>{e>>>=0,i(e+24,u(e+8))},"syscall/js.valueGet":e=>{e>>>=0;const t=Reflect.get(s(e+8),u(e+16));e=this._inst.exports.getsp()>>>0,i(e+32,t)},"syscall/js.valueSet":e=>{e>>>=0,Reflect.set(s(e+8),u(e+16),s(e+32))},"syscall/js.valueDelete":e=>{e>>>=0,Reflect.deleteProperty(s(e+8),u(e+16))},"syscall/js.valueIndex":e=>{e>>>=0,i(e+24,Reflect.get(s(e+8),n(e+16)))},"syscall/js.valueSetIndex":e=>{e>>>=0,Reflect.set(s(e+8),n(e+16),s(e+24))},"syscall/js.valueCall":e=>{e>>>=0;try{const t=s(e+8),o=Reflect.get(t,u(e+16)),a=h(e+32),d=Reflect.apply(o,t,a);e=this._inst.exports.getsp()>>>0,i(e+56,d),this.mem.setUint8(e+64,1)}catch(t){e=this._inst.exports.getsp()>>>0,i(e+56,t),this.mem.setUint8(e+64,0)}},"syscall/js.valueInvoke":e=>{e>>>=0;try{const t=s(e+8),o=h(e+16),a=Reflect.apply(t,void 0,o);e=this._inst.exports.getsp()>>>0,i(e+40,a),this.mem.setUint8(e+48,1)}catch(t){e=this._inst.exports.getsp()>>>0,i(e+40,t),this.mem.setUint8(e+48,0)}},"syscall/js.valueNew":e=>{e>>>=0;try{const t=s(e+8),o=h(e+16),a=Reflect.construct(t,o);e=this._inst.exports.getsp()>>>0,i(e+40,a),this.mem.setUint8(e+48,1)}catch(t){e=this._inst.exports.getsp()>>>0,i(e+40,t),this.mem.setUint8(e+48,0)}},"syscall/js.valueLength":e=>{e>>>=0,c(e+16,parseInt(s(e+8).length))},"syscall/js.valuePrepareString":e=>{e>>>=0;const t=f.encode(String(s(e+8)));i(e+16,t),c(e+24,t.length)},"syscall/js.valueLoadString":e=>{e>>>=0;const t=s(e+8);l(e+16).set(t)},"syscall/js.valueInstanceOf":e=>{e>>>=0,this.mem.setUint8(e+24,s(e+8)instanceof s(e+16)?1:0)},"syscall/js.copyBytesToGo":e=>{e>>>=0;const t=l(e+8),o=s(e+32);if(!(o instanceof Uint8Array||o instanceof Uint8ClampedArray)){this.mem.setUint8(e+48,0);return}const a=o.subarray(0,t.length);t.set(a),c(e+40,a.length),this.mem.setUint8(e+48,1)},"syscall/js.copyBytesToJS":e=>{e>>>=0;const t=s(e+8),o=l(e+16);if(!(t instanceof Uint8Array||t instanceof Uint8ClampedArray)){this.mem.setUint8(e+48,0);return}const a=o.subarray(0,t.length);t.set(a),c(e+40,a.length),this.mem.setUint8(e+48,1)},debug:e=>{console.log(e)}}}}run(c){return y(this,null,function*(){if(!(c instanceof WebAssembly.Instance))throw new Error("Go.run: WebAssembly.Instance expected");this._inst=c,this.mem=new DataView(this._inst.exports.mem.buffer),this._values=[NaN,0,null,!0,!1,globalThis,this],this._goRefCounts=new Array(this._values.length).fill(1/0),this._ids=new Map([[0,1],[null,2],[!0,3],[!1,4],[globalThis,5],[this,6]]),this._idPool=[],this.exited=!1;let n=4096;const s=e=>{const t=n,o=f.encode(e+"\0");return new Uint8Array(this.mem.buffer,n,o.length).set(o),n+=o.length,n%8!==0&&(n+=8-n%8),t},i=this.argv.length,l=[];this.argv.forEach(e=>{l.push(s(e))}),l.push(0),Object.keys(this.env).sort().forEach(e=>{l.push(s(`${e}=${this.env[e]}`))}),l.push(0);const u=n;l.forEach(e=>{this.mem.setUint32(n,e,!0),this.mem.setUint32(n+4,0,!0),n+=8});const g=4096+8192;if(n>=g)throw new Error("total length of command line and environment variables exceeds limit");this._inst.exports.run(i,u),this.exited&&this._resolveExitPromise(),yield this._exitPromise})}_resume(){if(this.exited)throw new Error("Go program has already exited");this._inst.exports.resume(),this.exited&&this._resolveExitPromise()}_makeFuncWrapper(c){const n=this;return function(){const s={id:c,this:this,args:arguments};return n._pendingEvent=s,n._resume(),s.result}}}})(),onmessage=({data:r})=>{let f=new TextDecoder,m=globalThis.fs,c="";m.writeSync=(h,u)=>{if(h===1)postMessage(u);else if(h===2){c+=f.decode(u);let g=c.split(` -`);g.length>1&&console.log(g.slice(0,-1).join(` -`)),c=g[g.length-1]}else throw new Error("Bad write");return u.length};let n=[],s,i=0;onmessage=({data:h})=>{h.length>0&&(n.push(h),s&&s())},m.read=(h,u,g,e,t,o)=>{if(h!==0||g!==0||e!==u.length||t!==null)throw new Error("Bad read");if(n.length===0){s=()=>m.read(h,u,g,e,t,o);return}let a=n[0],d=Math.max(0,Math.min(e,a.length-i));u.set(a.subarray(i,i+d),g),i+=d,i===a.length&&(n.shift(),i=0),o(null,d)};let l=new globalThis.Go;l.argv=["","--service=0.19.0"],tryToInstantiateModule(r,l).then(h=>{postMessage(null),l.run(h)},h=>{postMessage(h)})};function tryToInstantiateModule(r,f){return y(this,null,function*(){if(r instanceof WebAssembly.Module)return WebAssembly.instantiate(r,f.importObject);const m=yield fetch(r);if(!m.ok)throw new Error(`Failed to download ${JSON.stringify(r)}`);if("instantiateStreaming"in WebAssembly&&/^application\/wasm($|;)/i.test(m.headers.get("Content-Type")||""))return(yield WebAssembly.instantiateStreaming(m,f.importObject)).instance;const c=yield m.arrayBuffer();return(yield WebAssembly.instantiate(c,f.importObject)).instance})}return r=>onmessage(r);})(w=>i.onmessage({data:w}));i={onmessage:null,postMessage:w=>setTimeout(()=>b({data:w})),terminate(){}}}let u,l,f=new Promise((b,w)=>{u=b,l=w});i.onmessage=({data:b})=>{i.onmessage=({data:w})=>s(w),b?l(b):u()},i.postMessage(t||new URL(e,location.href).toString());let{readFromStdout:s,service:p}=Le({writeToStdin(b){i.postMessage(b)},isSync:!1,hasFS:!1,esbuild:we});yield f,Se={build:b=>new Promise((w,T)=>p.buildOrContext({callName:"build",refs:null,options:b,isTTY:!1,defaultWD:"/",callback:(j,U)=>j?T(j):w(U)})),context:b=>new Promise((w,T)=>p.buildOrContext({callName:"context",refs:null,options:b,isTTY:!1,defaultWD:"/",callback:(j,U)=>j?T(j):w(U)})),transform:(b,w)=>new Promise((T,j)=>p.transform({callName:"transform",refs:null,input:b,options:w||{},isTTY:!1,fs:{readFile(U,L){L(new Error("Internal error"),null)},writeFile(U,L){L(null)}},callback:(U,L)=>U?j(U):T(L)})),formatMessages:(b,w)=>new Promise((T,j)=>p.formatMessages({callName:"formatMessages",refs:null,messages:b,options:w,callback:(U,L)=>U?j(U):T(L)})),analyzeMetafile:(b,w)=>new Promise((T,j)=>p.analyzeMetafile({callName:"analyzeMetafile",refs:null,metafile:typeof b=="string"?b:JSON.stringify(b),options:w,callback:(U,L)=>U?j(U):T(L)}))}}),vt=we; -})(typeof module==="object"?module:{set exports(x){(typeof self!=="undefined"?self:this).esbuild=x}}); diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/core/function_base.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/core/function_base.py deleted file mode 100644 index 00e4e6b0ea843fc5fde7a82f16a1e4c31bb65959..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/core/function_base.py +++ /dev/null @@ -1,551 +0,0 @@ -import functools -import warnings -import operator -import types - -import numpy as np -from . import numeric as _nx -from .numeric import result_type, NaN, asanyarray, ndim -from numpy.core.multiarray import add_docstring -from numpy.core import overrides - -__all__ = ['logspace', 'linspace', 'geomspace'] - - -array_function_dispatch = functools.partial( - overrides.array_function_dispatch, module='numpy') - - -def _linspace_dispatcher(start, stop, num=None, endpoint=None, retstep=None, - dtype=None, axis=None): - return (start, stop) - - -@array_function_dispatch(_linspace_dispatcher) -def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, - axis=0): - """ - Return evenly spaced numbers over a specified interval. - - Returns `num` evenly spaced samples, calculated over the - interval [`start`, `stop`]. - - The endpoint of the interval can optionally be excluded. - - .. versionchanged:: 1.16.0 - Non-scalar `start` and `stop` are now supported. - - .. versionchanged:: 1.20.0 - Values are rounded towards ``-inf`` instead of ``0`` when an - integer ``dtype`` is specified. The old behavior can - still be obtained with ``np.linspace(start, stop, num).astype(int)`` - - Parameters - ---------- - start : array_like - The starting value of the sequence. - stop : array_like - The end value of the sequence, unless `endpoint` is set to False. - In that case, the sequence consists of all but the last of ``num + 1`` - evenly spaced samples, so that `stop` is excluded. Note that the step - size changes when `endpoint` is False. - num : int, optional - Number of samples to generate. Default is 50. Must be non-negative. - endpoint : bool, optional - If True, `stop` is the last sample. Otherwise, it is not included. - Default is True. - retstep : bool, optional - If True, return (`samples`, `step`), where `step` is the spacing - between samples. - dtype : dtype, optional - The type of the output array. If `dtype` is not given, the data type - is inferred from `start` and `stop`. The inferred dtype will never be - an integer; `float` is chosen even if the arguments would produce an - array of integers. - - .. versionadded:: 1.9.0 - - axis : int, optional - The axis in the result to store the samples. Relevant only if start - or stop are array-like. By default (0), the samples will be along a - new axis inserted at the beginning. Use -1 to get an axis at the end. - - .. versionadded:: 1.16.0 - - Returns - ------- - samples : ndarray - There are `num` equally spaced samples in the closed interval - ``[start, stop]`` or the half-open interval ``[start, stop)`` - (depending on whether `endpoint` is True or False). - step : float, optional - Only returned if `retstep` is True - - Size of spacing between samples. - - - See Also - -------- - arange : Similar to `linspace`, but uses a step size (instead of the - number of samples). - geomspace : Similar to `linspace`, but with numbers spaced evenly on a log - scale (a geometric progression). - logspace : Similar to `geomspace`, but with the end points specified as - logarithms. - :ref:`how-to-partition` - - Examples - -------- - >>> np.linspace(2.0, 3.0, num=5) - array([2. , 2.25, 2.5 , 2.75, 3. ]) - >>> np.linspace(2.0, 3.0, num=5, endpoint=False) - array([2. , 2.2, 2.4, 2.6, 2.8]) - >>> np.linspace(2.0, 3.0, num=5, retstep=True) - (array([2. , 2.25, 2.5 , 2.75, 3. ]), 0.25) - - Graphical illustration: - - >>> import matplotlib.pyplot as plt - >>> N = 8 - >>> y = np.zeros(N) - >>> x1 = np.linspace(0, 10, N, endpoint=True) - >>> x2 = np.linspace(0, 10, N, endpoint=False) - >>> plt.plot(x1, y, 'o') - [] - >>> plt.plot(x2, y + 0.5, 'o') - [] - >>> plt.ylim([-0.5, 1]) - (-0.5, 1) - >>> plt.show() - - """ - num = operator.index(num) - if num < 0: - raise ValueError("Number of samples, %s, must be non-negative." % num) - div = (num - 1) if endpoint else num - - # Convert float/complex array scalars to float, gh-3504 - # and make sure one can use variables that have an __array_interface__, gh-6634 - start = asanyarray(start) * 1.0 - stop = asanyarray(stop) * 1.0 - - dt = result_type(start, stop, float(num)) - if dtype is None: - dtype = dt - integer_dtype = False - else: - integer_dtype = _nx.issubdtype(dtype, _nx.integer) - - delta = stop - start - y = _nx.arange(0, num, dtype=dt).reshape((-1,) + (1,) * ndim(delta)) - # In-place multiplication y *= delta/div is faster, but prevents the multiplicant - # from overriding what class is produced, and thus prevents, e.g. use of Quantities, - # see gh-7142. Hence, we multiply in place only for standard scalar types. - if div > 0: - _mult_inplace = _nx.isscalar(delta) - step = delta / div - any_step_zero = ( - step == 0 if _mult_inplace else _nx.asanyarray(step == 0).any()) - if any_step_zero: - # Special handling for denormal numbers, gh-5437 - y /= div - if _mult_inplace: - y *= delta - else: - y = y * delta - else: - if _mult_inplace: - y *= step - else: - y = y * step - else: - # sequences with 0 items or 1 item with endpoint=True (i.e. div <= 0) - # have an undefined step - step = NaN - # Multiply with delta to allow possible override of output class. - y = y * delta - - y += start - - if endpoint and num > 1: - y[-1, ...] = stop - - if axis != 0: - y = _nx.moveaxis(y, 0, axis) - - if integer_dtype: - _nx.floor(y, out=y) - - if retstep: - return y.astype(dtype, copy=False), step - else: - return y.astype(dtype, copy=False) - - -def _logspace_dispatcher(start, stop, num=None, endpoint=None, base=None, - dtype=None, axis=None): - return (start, stop, base) - - -@array_function_dispatch(_logspace_dispatcher) -def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, - axis=0): - """ - Return numbers spaced evenly on a log scale. - - In linear space, the sequence starts at ``base ** start`` - (`base` to the power of `start`) and ends with ``base ** stop`` - (see `endpoint` below). - - .. versionchanged:: 1.16.0 - Non-scalar `start` and `stop` are now supported. - - .. versionchanged:: 1.25.0 - Non-scalar 'base` is now supported - - Parameters - ---------- - start : array_like - ``base ** start`` is the starting value of the sequence. - stop : array_like - ``base ** stop`` is the final value of the sequence, unless `endpoint` - is False. In that case, ``num + 1`` values are spaced over the - interval in log-space, of which all but the last (a sequence of - length `num`) are returned. - num : integer, optional - Number of samples to generate. Default is 50. - endpoint : boolean, optional - If true, `stop` is the last sample. Otherwise, it is not included. - Default is True. - base : array_like, optional - The base of the log space. The step size between the elements in - ``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform. - Default is 10.0. - dtype : dtype - The type of the output array. If `dtype` is not given, the data type - is inferred from `start` and `stop`. The inferred type will never be - an integer; `float` is chosen even if the arguments would produce an - array of integers. - axis : int, optional - The axis in the result to store the samples. Relevant only if start, - stop, or base are array-like. By default (0), the samples will be - along a new axis inserted at the beginning. Use -1 to get an axis at - the end. - - .. versionadded:: 1.16.0 - - - Returns - ------- - samples : ndarray - `num` samples, equally spaced on a log scale. - - See Also - -------- - arange : Similar to linspace, with the step size specified instead of the - number of samples. Note that, when used with a float endpoint, the - endpoint may or may not be included. - linspace : Similar to logspace, but with the samples uniformly distributed - in linear space, instead of log space. - geomspace : Similar to logspace, but with endpoints specified directly. - :ref:`how-to-partition` - - Notes - ----- - If base is a scalar, logspace is equivalent to the code - - >>> y = np.linspace(start, stop, num=num, endpoint=endpoint) - ... # doctest: +SKIP - >>> power(base, y).astype(dtype) - ... # doctest: +SKIP - - Examples - -------- - >>> np.logspace(2.0, 3.0, num=4) - array([ 100. , 215.443469 , 464.15888336, 1000. ]) - >>> np.logspace(2.0, 3.0, num=4, endpoint=False) - array([100. , 177.827941 , 316.22776602, 562.34132519]) - >>> np.logspace(2.0, 3.0, num=4, base=2.0) - array([4. , 5.0396842 , 6.34960421, 8. ]) - >>> np.logspace(2.0, 3.0, num=4, base=[2.0, 3.0], axis=-1) - array([[ 4. , 5.0396842 , 6.34960421, 8. ], - [ 9. , 12.98024613, 18.72075441, 27. ]]) - - Graphical illustration: - - >>> import matplotlib.pyplot as plt - >>> N = 10 - >>> x1 = np.logspace(0.1, 1, N, endpoint=True) - >>> x2 = np.logspace(0.1, 1, N, endpoint=False) - >>> y = np.zeros(N) - >>> plt.plot(x1, y, 'o') - [] - >>> plt.plot(x2, y + 0.5, 'o') - [] - >>> plt.ylim([-0.5, 1]) - (-0.5, 1) - >>> plt.show() - - """ - ndmax = np.broadcast(start, stop, base).ndim - start, stop, base = ( - np.array(a, copy=False, subok=True, ndmin=ndmax) - for a in (start, stop, base) - ) - y = linspace(start, stop, num=num, endpoint=endpoint, axis=axis) - base = np.expand_dims(base, axis=axis) - if dtype is None: - return _nx.power(base, y) - return _nx.power(base, y).astype(dtype, copy=False) - - -def _geomspace_dispatcher(start, stop, num=None, endpoint=None, dtype=None, - axis=None): - return (start, stop) - - -@array_function_dispatch(_geomspace_dispatcher) -def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0): - """ - Return numbers spaced evenly on a log scale (a geometric progression). - - This is similar to `logspace`, but with endpoints specified directly. - Each output sample is a constant multiple of the previous. - - .. versionchanged:: 1.16.0 - Non-scalar `start` and `stop` are now supported. - - Parameters - ---------- - start : array_like - The starting value of the sequence. - stop : array_like - The final value of the sequence, unless `endpoint` is False. - In that case, ``num + 1`` values are spaced over the - interval in log-space, of which all but the last (a sequence of - length `num`) are returned. - num : integer, optional - Number of samples to generate. Default is 50. - endpoint : boolean, optional - If true, `stop` is the last sample. Otherwise, it is not included. - Default is True. - dtype : dtype - The type of the output array. If `dtype` is not given, the data type - is inferred from `start` and `stop`. The inferred dtype will never be - an integer; `float` is chosen even if the arguments would produce an - array of integers. - axis : int, optional - The axis in the result to store the samples. Relevant only if start - or stop are array-like. By default (0), the samples will be along a - new axis inserted at the beginning. Use -1 to get an axis at the end. - - .. versionadded:: 1.16.0 - - Returns - ------- - samples : ndarray - `num` samples, equally spaced on a log scale. - - See Also - -------- - logspace : Similar to geomspace, but with endpoints specified using log - and base. - linspace : Similar to geomspace, but with arithmetic instead of geometric - progression. - arange : Similar to linspace, with the step size specified instead of the - number of samples. - :ref:`how-to-partition` - - Notes - ----- - If the inputs or dtype are complex, the output will follow a logarithmic - spiral in the complex plane. (There are an infinite number of spirals - passing through two points; the output will follow the shortest such path.) - - Examples - -------- - >>> np.geomspace(1, 1000, num=4) - array([ 1., 10., 100., 1000.]) - >>> np.geomspace(1, 1000, num=3, endpoint=False) - array([ 1., 10., 100.]) - >>> np.geomspace(1, 1000, num=4, endpoint=False) - array([ 1. , 5.62341325, 31.6227766 , 177.827941 ]) - >>> np.geomspace(1, 256, num=9) - array([ 1., 2., 4., 8., 16., 32., 64., 128., 256.]) - - Note that the above may not produce exact integers: - - >>> np.geomspace(1, 256, num=9, dtype=int) - array([ 1, 2, 4, 7, 16, 32, 63, 127, 256]) - >>> np.around(np.geomspace(1, 256, num=9)).astype(int) - array([ 1, 2, 4, 8, 16, 32, 64, 128, 256]) - - Negative, decreasing, and complex inputs are allowed: - - >>> np.geomspace(1000, 1, num=4) - array([1000., 100., 10., 1.]) - >>> np.geomspace(-1000, -1, num=4) - array([-1000., -100., -10., -1.]) - >>> np.geomspace(1j, 1000j, num=4) # Straight line - array([0. +1.j, 0. +10.j, 0. +100.j, 0.+1000.j]) - >>> np.geomspace(-1+0j, 1+0j, num=5) # Circle - array([-1.00000000e+00+1.22464680e-16j, -7.07106781e-01+7.07106781e-01j, - 6.12323400e-17+1.00000000e+00j, 7.07106781e-01+7.07106781e-01j, - 1.00000000e+00+0.00000000e+00j]) - - Graphical illustration of `endpoint` parameter: - - >>> import matplotlib.pyplot as plt - >>> N = 10 - >>> y = np.zeros(N) - >>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=True), y + 1, 'o') - [] - >>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=False), y + 2, 'o') - [] - >>> plt.axis([0.5, 2000, 0, 3]) - [0.5, 2000, 0, 3] - >>> plt.grid(True, color='0.7', linestyle='-', which='both', axis='both') - >>> plt.show() - - """ - start = asanyarray(start) - stop = asanyarray(stop) - if _nx.any(start == 0) or _nx.any(stop == 0): - raise ValueError('Geometric sequence cannot include zero') - - dt = result_type(start, stop, float(num), _nx.zeros((), dtype)) - if dtype is None: - dtype = dt - else: - # complex to dtype('complex128'), for instance - dtype = _nx.dtype(dtype) - - # Promote both arguments to the same dtype in case, for instance, one is - # complex and another is negative and log would produce NaN otherwise. - # Copy since we may change things in-place further down. - start = start.astype(dt, copy=True) - stop = stop.astype(dt, copy=True) - - out_sign = _nx.ones(_nx.broadcast(start, stop).shape, dt) - # Avoid negligible real or imaginary parts in output by rotating to - # positive real, calculating, then undoing rotation - if _nx.issubdtype(dt, _nx.complexfloating): - all_imag = (start.real == 0.) & (stop.real == 0.) - if _nx.any(all_imag): - start[all_imag] = start[all_imag].imag - stop[all_imag] = stop[all_imag].imag - out_sign[all_imag] = 1j - - both_negative = (_nx.sign(start) == -1) & (_nx.sign(stop) == -1) - if _nx.any(both_negative): - _nx.negative(start, out=start, where=both_negative) - _nx.negative(stop, out=stop, where=both_negative) - _nx.negative(out_sign, out=out_sign, where=both_negative) - - log_start = _nx.log10(start) - log_stop = _nx.log10(stop) - result = logspace(log_start, log_stop, num=num, - endpoint=endpoint, base=10.0, dtype=dtype) - - # Make sure the endpoints match the start and stop arguments. This is - # necessary because np.exp(np.log(x)) is not necessarily equal to x. - if num > 0: - result[0] = start - if num > 1 and endpoint: - result[-1] = stop - - result = out_sign * result - - if axis != 0: - result = _nx.moveaxis(result, 0, axis) - - return result.astype(dtype, copy=False) - - -def _needs_add_docstring(obj): - """ - Returns true if the only way to set the docstring of `obj` from python is - via add_docstring. - - This function errs on the side of being overly conservative. - """ - Py_TPFLAGS_HEAPTYPE = 1 << 9 - - if isinstance(obj, (types.FunctionType, types.MethodType, property)): - return False - - if isinstance(obj, type) and obj.__flags__ & Py_TPFLAGS_HEAPTYPE: - return False - - return True - - -def _add_docstring(obj, doc, warn_on_python): - if warn_on_python and not _needs_add_docstring(obj): - warnings.warn( - "add_newdoc was used on a pure-python object {}. " - "Prefer to attach it directly to the source." - .format(obj), - UserWarning, - stacklevel=3) - try: - add_docstring(obj, doc) - except Exception: - pass - - -def add_newdoc(place, obj, doc, warn_on_python=True): - """ - Add documentation to an existing object, typically one defined in C - - The purpose is to allow easier editing of the docstrings without requiring - a re-compile. This exists primarily for internal use within numpy itself. - - Parameters - ---------- - place : str - The absolute name of the module to import from - obj : str - The name of the object to add documentation to, typically a class or - function name - doc : {str, Tuple[str, str], List[Tuple[str, str]]} - If a string, the documentation to apply to `obj` - - If a tuple, then the first element is interpreted as an attribute of - `obj` and the second as the docstring to apply - ``(method, docstring)`` - - If a list, then each element of the list should be a tuple of length - two - ``[(method1, docstring1), (method2, docstring2), ...]`` - warn_on_python : bool - If True, the default, emit `UserWarning` if this is used to attach - documentation to a pure-python object. - - Notes - ----- - This routine never raises an error if the docstring can't be written, but - will raise an error if the object being documented does not exist. - - This routine cannot modify read-only docstrings, as appear - in new-style classes or built-in functions. Because this - routine never raises an error the caller must check manually - that the docstrings were changed. - - Since this function grabs the ``char *`` from a c-level str object and puts - it into the ``tp_doc`` slot of the type of `obj`, it violates a number of - C-API best-practices, by: - - - modifying a `PyTypeObject` after calling `PyType_Ready` - - calling `Py_INCREF` on the str and losing the reference, so the str - will never be released - - If possible it should be avoided. - """ - new = getattr(__import__(place, globals(), {}, [obj]), obj) - if isinstance(doc, str): - _add_docstring(new, doc.strip(), warn_on_python) - elif isinstance(doc, tuple): - attr, docstring = doc - _add_docstring(getattr(new, attr), docstring.strip(), warn_on_python) - elif isinstance(doc, list): - for attr, docstring in doc: - _add_docstring(getattr(new, attr), docstring.strip(), warn_on_python) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/f2py/tests/src/return_real/foo77.f b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/f2py/tests/src/return_real/foo77.f deleted file mode 100644 index bf43dbf11773d8282f3b9a7d7c4ba9da23ee6f27..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/f2py/tests/src/return_real/foo77.f +++ /dev/null @@ -1,45 +0,0 @@ - function t0(value) - real value - real t0 - t0 = value - end - function t4(value) - real*4 value - real*4 t4 - t4 = value - end - function t8(value) - real*8 value - real*8 t8 - t8 = value - end - function td(value) - double precision value - double precision td - td = value - end - - subroutine s0(t0,value) - real value - real t0 -cf2py intent(out) t0 - t0 = value - end - subroutine s4(t4,value) - real*4 value - real*4 t4 -cf2py intent(out) t4 - t4 = value - end - subroutine s8(t8,value) - real*8 value - real*8 t8 -cf2py intent(out) t8 - t8 = value - end - subroutine sd(td,value) - double precision value - double precision td -cf2py intent(out) td - td = value - end diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/compat/numpy/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/compat/numpy/__init__.py deleted file mode 100644 index 51c9892b64a0862213807e03e7edc9f803609817..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/compat/numpy/__init__.py +++ /dev/null @@ -1,51 +0,0 @@ -""" support numpy compatibility across versions """ -import warnings - -import numpy as np - -from pandas.util.version import Version - -# numpy versioning -_np_version = np.__version__ -_nlv = Version(_np_version) -np_version_gte1p24 = _nlv >= Version("1.24") -np_version_gte1p24p3 = _nlv >= Version("1.24.3") -np_version_gte1p25 = _nlv >= Version("1.25") -is_numpy_dev = _nlv.dev is not None -_min_numpy_ver = "1.22.4" - - -if _nlv < Version(_min_numpy_ver): - raise ImportError( - f"this version of pandas is incompatible with numpy < {_min_numpy_ver}\n" - f"your numpy version is {_np_version}.\n" - f"Please upgrade numpy to >= {_min_numpy_ver} to use this pandas version" - ) - - -np_long: type -np_ulong: type - -if _nlv >= Version("2.0.0.dev0"): - try: - with warnings.catch_warnings(): - warnings.filterwarnings( - "ignore", - r".*In the future `np\.long` will be defined as.*", - FutureWarning, - ) - np_long = np.long # type: ignore[attr-defined] - np_ulong = np.ulong # type: ignore[attr-defined] - except AttributeError: - np_long = np.int_ - np_ulong = np.uint -else: - np_long = np.int_ - np_ulong = np.uint - - -__all__ = [ - "np", - "_np_version", - "is_numpy_dev", -] diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/base/test_fillna.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/base/test_fillna.py deleted file mode 100644 index 7300d3013305a7ca08312ae85cc42ae8950acf23..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/base/test_fillna.py +++ /dev/null @@ -1,60 +0,0 @@ -""" -Though Index.fillna and Series.fillna has separate impl, -test here to confirm these works as the same -""" - -import numpy as np -import pytest - -from pandas import MultiIndex -import pandas._testing as tm -from pandas.tests.base.common import allow_na_ops - - -def test_fillna(index_or_series_obj): - # GH 11343 - obj = index_or_series_obj - - if isinstance(obj, MultiIndex): - msg = "isna is not defined for MultiIndex" - with pytest.raises(NotImplementedError, match=msg): - obj.fillna(0) - return - - # values will not be changed - fill_value = obj.values[0] if len(obj) > 0 else 0 - result = obj.fillna(fill_value) - - tm.assert_equal(obj, result) - - # check shallow_copied - assert obj is not result - - -@pytest.mark.parametrize("null_obj", [np.nan, None]) -def test_fillna_null(null_obj, index_or_series_obj): - # GH 11343 - obj = index_or_series_obj - klass = type(obj) - - if not allow_na_ops(obj): - pytest.skip(f"{klass} doesn't allow for NA operations") - elif len(obj) < 1: - pytest.skip("Test doesn't make sense on empty data") - elif isinstance(obj, MultiIndex): - pytest.skip(f"MultiIndex can't hold '{null_obj}'") - - values = obj._values - fill_value = values[0] - expected = values.copy() - values[0:2] = null_obj - expected[0:2] = fill_value - - expected = klass(expected) - obj = klass(values) - - result = obj.fillna(fill_value) - tm.assert_equal(result, expected) - - # check shallow_copied - assert obj is not result diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/base_class/test_pickle.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/base_class/test_pickle.py deleted file mode 100644 index c670921decb78808fa54a35c45e3d2d15ab57a67..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/base_class/test_pickle.py +++ /dev/null @@ -1,11 +0,0 @@ -from pandas import Index -import pandas._testing as tm - - -def test_pickle_preserves_object_dtype(): - # GH#43188, GH#43155 don't infer numeric dtype - index = Index([1, 2, 3], dtype=object) - - result = tm.round_trip_pickle(index) - assert result.dtype == object - tm.assert_index_equal(index, result) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/series/methods/test_astype.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/series/methods/test_astype.py deleted file mode 100644 index 03fc6cba2902ae9de115bee7320ed61ece691937..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/series/methods/test_astype.py +++ /dev/null @@ -1,642 +0,0 @@ -from datetime import ( - datetime, - timedelta, -) -from importlib import reload -import string -import sys - -import numpy as np -import pytest - -from pandas._libs.tslibs import iNaT -import pandas.util._test_decorators as td - -from pandas import ( - NA, - Categorical, - CategoricalDtype, - DatetimeTZDtype, - Index, - Interval, - NaT, - Series, - Timedelta, - Timestamp, - cut, - date_range, -) -import pandas._testing as tm - - -def rand_str(nchars: int) -> str: - """ - Generate one random byte string. - """ - RANDS_CHARS = np.array( - list(string.ascii_letters + string.digits), dtype=(np.str_, 1) - ) - return "".join(np.random.default_rng(2).choice(RANDS_CHARS, nchars)) - - -class TestAstypeAPI: - def test_astype_unitless_dt64_raises(self): - # GH#47844 - ser = Series(["1970-01-01", "1970-01-01", "1970-01-01"], dtype="datetime64[ns]") - df = ser.to_frame() - - msg = "Casting to unit-less dtype 'datetime64' is not supported" - with pytest.raises(TypeError, match=msg): - ser.astype(np.datetime64) - with pytest.raises(TypeError, match=msg): - df.astype(np.datetime64) - with pytest.raises(TypeError, match=msg): - ser.astype("datetime64") - with pytest.raises(TypeError, match=msg): - df.astype("datetime64") - - def test_arg_for_errors_in_astype(self): - # see GH#14878 - ser = Series([1, 2, 3]) - - msg = ( - r"Expected value of kwarg 'errors' to be one of \['raise', " - r"'ignore'\]\. Supplied value is 'False'" - ) - with pytest.raises(ValueError, match=msg): - ser.astype(np.float64, errors=False) - - ser.astype(np.int8, errors="raise") - - @pytest.mark.parametrize("dtype_class", [dict, Series]) - def test_astype_dict_like(self, dtype_class): - # see GH#7271 - ser = Series(range(0, 10, 2), name="abc") - - dt1 = dtype_class({"abc": str}) - result = ser.astype(dt1) - expected = Series(["0", "2", "4", "6", "8"], name="abc") - tm.assert_series_equal(result, expected) - - dt2 = dtype_class({"abc": "float64"}) - result = ser.astype(dt2) - expected = Series([0.0, 2.0, 4.0, 6.0, 8.0], dtype="float64", name="abc") - tm.assert_series_equal(result, expected) - - dt3 = dtype_class({"abc": str, "def": str}) - msg = ( - "Only the Series name can be used for the key in Series dtype " - r"mappings\." - ) - with pytest.raises(KeyError, match=msg): - ser.astype(dt3) - - dt4 = dtype_class({0: str}) - with pytest.raises(KeyError, match=msg): - ser.astype(dt4) - - # GH#16717 - # if dtypes provided is empty, it should error - if dtype_class is Series: - dt5 = dtype_class({}, dtype=object) - else: - dt5 = dtype_class({}) - - with pytest.raises(KeyError, match=msg): - ser.astype(dt5) - - -class TestAstype: - def test_astype_mixed_object_to_dt64tz(self): - # pre-2.0 this raised ValueError bc of tz mismatch - # xref GH#32581 - ts = Timestamp("2016-01-04 05:06:07", tz="US/Pacific") - ts2 = ts.tz_convert("Asia/Tokyo") - - ser = Series([ts, ts2], dtype=object) - res = ser.astype("datetime64[ns, Europe/Brussels]") - expected = Series( - [ts.tz_convert("Europe/Brussels"), ts2.tz_convert("Europe/Brussels")], - dtype="datetime64[ns, Europe/Brussels]", - ) - tm.assert_series_equal(res, expected) - - @pytest.mark.parametrize("dtype", np.typecodes["All"]) - def test_astype_empty_constructor_equality(self, dtype): - # see GH#15524 - - if dtype not in ( - "S", - "V", # poor support (if any) currently - "M", - "m", # Generic timestamps raise a ValueError. Already tested. - ): - init_empty = Series([], dtype=dtype) - as_type_empty = Series([]).astype(dtype) - tm.assert_series_equal(init_empty, as_type_empty) - - @pytest.mark.parametrize("dtype", [str, np.str_]) - @pytest.mark.parametrize( - "series", - [ - Series([string.digits * 10, rand_str(63), rand_str(64), rand_str(1000)]), - Series([string.digits * 10, rand_str(63), rand_str(64), np.nan, 1.0]), - ], - ) - def test_astype_str_map(self, dtype, series): - # see GH#4405 - result = series.astype(dtype) - expected = series.map(str) - tm.assert_series_equal(result, expected) - - def test_astype_float_to_period(self): - result = Series([np.nan]).astype("period[D]") - expected = Series([NaT], dtype="period[D]") - tm.assert_series_equal(result, expected) - - def test_astype_no_pandas_dtype(self): - # https://github.com/pandas-dev/pandas/pull/24866 - ser = Series([1, 2], dtype="int64") - # Don't have NumpyEADtype in the public API, so we use `.array.dtype`, - # which is a NumpyEADtype. - result = ser.astype(ser.array.dtype) - tm.assert_series_equal(result, ser) - - @pytest.mark.parametrize("dtype", [np.datetime64, np.timedelta64]) - def test_astype_generic_timestamp_no_frequency(self, dtype, request): - # see GH#15524, GH#15987 - data = [1] - ser = Series(data) - - if np.dtype(dtype).name not in ["timedelta64", "datetime64"]: - mark = pytest.mark.xfail(reason="GH#33890 Is assigned ns unit") - request.node.add_marker(mark) - - msg = ( - rf"The '{dtype.__name__}' dtype has no unit\. " - rf"Please pass in '{dtype.__name__}\[ns\]' instead." - ) - with pytest.raises(ValueError, match=msg): - ser.astype(dtype) - - def test_astype_dt64_to_str(self): - # GH#10442 : testing astype(str) is correct for Series/DatetimeIndex - dti = date_range("2012-01-01", periods=3) - result = Series(dti).astype(str) - expected = Series(["2012-01-01", "2012-01-02", "2012-01-03"], dtype=object) - tm.assert_series_equal(result, expected) - - def test_astype_dt64tz_to_str(self): - # GH#10442 : testing astype(str) is correct for Series/DatetimeIndex - dti_tz = date_range("2012-01-01", periods=3, tz="US/Eastern") - result = Series(dti_tz).astype(str) - expected = Series( - [ - "2012-01-01 00:00:00-05:00", - "2012-01-02 00:00:00-05:00", - "2012-01-03 00:00:00-05:00", - ], - dtype=object, - ) - tm.assert_series_equal(result, expected) - - def test_astype_datetime(self): - ser = Series(iNaT, dtype="M8[ns]", index=range(5)) - - ser = ser.astype("O") - assert ser.dtype == np.object_ - - ser = Series([datetime(2001, 1, 2, 0, 0)]) - - ser = ser.astype("O") - assert ser.dtype == np.object_ - - ser = Series([datetime(2001, 1, 2, 0, 0) for i in range(3)]) - - ser[1] = np.nan - assert ser.dtype == "M8[ns]" - - ser = ser.astype("O") - assert ser.dtype == np.object_ - - def test_astype_datetime64tz(self): - ser = Series(date_range("20130101", periods=3, tz="US/Eastern")) - - # astype - result = ser.astype(object) - expected = Series(ser.astype(object), dtype=object) - tm.assert_series_equal(result, expected) - - result = Series(ser.values).dt.tz_localize("UTC").dt.tz_convert(ser.dt.tz) - tm.assert_series_equal(result, ser) - - # astype - object, preserves on construction - result = Series(ser.astype(object)) - expected = ser.astype(object) - tm.assert_series_equal(result, expected) - - # astype - datetime64[ns, tz] - msg = "Cannot use .astype to convert from timezone-naive" - with pytest.raises(TypeError, match=msg): - # dt64->dt64tz astype deprecated - Series(ser.values).astype("datetime64[ns, US/Eastern]") - - with pytest.raises(TypeError, match=msg): - # dt64->dt64tz astype deprecated - Series(ser.values).astype(ser.dtype) - - result = ser.astype("datetime64[ns, CET]") - expected = Series(date_range("20130101 06:00:00", periods=3, tz="CET")) - tm.assert_series_equal(result, expected) - - def test_astype_str_cast_dt64(self): - # see GH#9757 - ts = Series([Timestamp("2010-01-04 00:00:00")]) - res = ts.astype(str) - - expected = Series(["2010-01-04"]) - tm.assert_series_equal(res, expected) - - ts = Series([Timestamp("2010-01-04 00:00:00", tz="US/Eastern")]) - res = ts.astype(str) - - expected = Series(["2010-01-04 00:00:00-05:00"]) - tm.assert_series_equal(res, expected) - - def test_astype_str_cast_td64(self): - # see GH#9757 - - td = Series([Timedelta(1, unit="d")]) - ser = td.astype(str) - - expected = Series(["1 days"]) - tm.assert_series_equal(ser, expected) - - def test_dt64_series_astype_object(self): - dt64ser = Series(date_range("20130101", periods=3)) - result = dt64ser.astype(object) - assert isinstance(result.iloc[0], datetime) - assert result.dtype == np.object_ - - def test_td64_series_astype_object(self): - tdser = Series(["59 Days", "59 Days", "NaT"], dtype="timedelta64[ns]") - result = tdser.astype(object) - assert isinstance(result.iloc[0], timedelta) - assert result.dtype == np.object_ - - @pytest.mark.parametrize( - "data, dtype", - [ - (["x", "y", "z"], "string[python]"), - pytest.param( - ["x", "y", "z"], - "string[pyarrow]", - marks=td.skip_if_no("pyarrow"), - ), - (["x", "y", "z"], "category"), - (3 * [Timestamp("2020-01-01", tz="UTC")], None), - (3 * [Interval(0, 1)], None), - ], - ) - @pytest.mark.parametrize("errors", ["raise", "ignore"]) - def test_astype_ignores_errors_for_extension_dtypes(self, data, dtype, errors): - # https://github.com/pandas-dev/pandas/issues/35471 - ser = Series(data, dtype=dtype) - if errors == "ignore": - expected = ser - result = ser.astype(float, errors="ignore") - tm.assert_series_equal(result, expected) - else: - msg = "(Cannot cast)|(could not convert)" - with pytest.raises((ValueError, TypeError), match=msg): - ser.astype(float, errors=errors) - - @pytest.mark.parametrize("dtype", [np.float16, np.float32, np.float64]) - def test_astype_from_float_to_str(self, dtype): - # https://github.com/pandas-dev/pandas/issues/36451 - ser = Series([0.1], dtype=dtype) - result = ser.astype(str) - expected = Series(["0.1"]) - tm.assert_series_equal(result, expected) - - @pytest.mark.parametrize( - "value, string_value", - [ - (None, "None"), - (np.nan, "nan"), - (NA, ""), - ], - ) - def test_astype_to_str_preserves_na(self, value, string_value): - # https://github.com/pandas-dev/pandas/issues/36904 - ser = Series(["a", "b", value], dtype=object) - result = ser.astype(str) - expected = Series(["a", "b", string_value], dtype=object) - tm.assert_series_equal(result, expected) - - @pytest.mark.parametrize("dtype", ["float32", "float64", "int64", "int32"]) - def test_astype(self, dtype): - ser = Series(np.random.default_rng(2).standard_normal(5), name="foo") - as_typed = ser.astype(dtype) - - assert as_typed.dtype == dtype - assert as_typed.name == ser.name - - @pytest.mark.parametrize("value", [np.nan, np.inf]) - @pytest.mark.parametrize("dtype", [np.int32, np.int64]) - def test_astype_cast_nan_inf_int(self, dtype, value): - # gh-14265: check NaN and inf raise error when converting to int - msg = "Cannot convert non-finite values \\(NA or inf\\) to integer" - ser = Series([value]) - - with pytest.raises(ValueError, match=msg): - ser.astype(dtype) - - @pytest.mark.parametrize("dtype", [int, np.int8, np.int64]) - def test_astype_cast_object_int_fail(self, dtype): - arr = Series(["car", "house", "tree", "1"]) - msg = r"invalid literal for int\(\) with base 10: 'car'" - with pytest.raises(ValueError, match=msg): - arr.astype(dtype) - - def test_astype_float_to_uint_negatives_raise( - self, float_numpy_dtype, any_unsigned_int_numpy_dtype - ): - # GH#45151 We don't cast negative numbers to nonsense values - # TODO: same for EA float/uint dtypes, signed integers? - arr = np.arange(5).astype(float_numpy_dtype) - 3 # includes negatives - ser = Series(arr) - - msg = "Cannot losslessly cast from .* to .*" - with pytest.raises(ValueError, match=msg): - ser.astype(any_unsigned_int_numpy_dtype) - - with pytest.raises(ValueError, match=msg): - ser.to_frame().astype(any_unsigned_int_numpy_dtype) - - with pytest.raises(ValueError, match=msg): - # We currently catch and re-raise in Index.astype - Index(ser).astype(any_unsigned_int_numpy_dtype) - - with pytest.raises(ValueError, match=msg): - ser.array.astype(any_unsigned_int_numpy_dtype) - - def test_astype_cast_object_int(self): - arr = Series(["1", "2", "3", "4"], dtype=object) - result = arr.astype(int) - - tm.assert_series_equal(result, Series(np.arange(1, 5))) - - def test_astype_unicode(self): - # see GH#7758: A bit of magic is required to set - # default encoding to utf-8 - digits = string.digits - test_series = [ - Series([digits * 10, rand_str(63), rand_str(64), rand_str(1000)]), - Series(["データーサイエンス、お前はもう死んでいる"]), - ] - - former_encoding = None - - if sys.getdefaultencoding() == "utf-8": - # GH#45326 as of 2.0 Series.astype matches Index.astype by handling - # bytes with obj.decode() instead of str(obj) - item = "野菜食べないとやばい" - ser = Series([item.encode()]) - result = ser.astype(np.str_) - expected = Series([item]) - tm.assert_series_equal(result, expected) - - for ser in test_series: - res = ser.astype(np.str_) - expec = ser.map(str) - tm.assert_series_equal(res, expec) - - # Restore the former encoding - if former_encoding is not None and former_encoding != "utf-8": - reload(sys) - sys.setdefaultencoding(former_encoding) - - def test_astype_bytes(self): - # GH#39474 - result = Series(["foo", "bar", "baz"]).astype(bytes) - assert result.dtypes == np.dtype("S3") - - def test_astype_nan_to_bool(self): - # GH#43018 - ser = Series(np.nan, dtype="object") - result = ser.astype("bool") - expected = Series(True, dtype="bool") - tm.assert_series_equal(result, expected) - - @pytest.mark.parametrize( - "dtype", - tm.ALL_INT_EA_DTYPES + tm.FLOAT_EA_DTYPES, - ) - def test_astype_ea_to_datetimetzdtype(self, dtype): - # GH37553 - ser = Series([4, 0, 9], dtype=dtype) - result = ser.astype(DatetimeTZDtype(tz="US/Pacific")) - - expected = Series( - { - 0: Timestamp("1969-12-31 16:00:00.000000004-08:00", tz="US/Pacific"), - 1: Timestamp("1969-12-31 16:00:00.000000000-08:00", tz="US/Pacific"), - 2: Timestamp("1969-12-31 16:00:00.000000009-08:00", tz="US/Pacific"), - } - ) - - tm.assert_series_equal(result, expected) - - def test_astype_retain_attrs(self, any_numpy_dtype): - # GH#44414 - ser = Series([0, 1, 2, 3]) - ser.attrs["Location"] = "Michigan" - - result = ser.astype(any_numpy_dtype).attrs - expected = ser.attrs - - tm.assert_dict_equal(expected, result) - - -class TestAstypeString: - @pytest.mark.parametrize( - "data, dtype", - [ - ([True, NA], "boolean"), - (["A", NA], "category"), - (["2020-10-10", "2020-10-10"], "datetime64[ns]"), - (["2020-10-10", "2020-10-10", NaT], "datetime64[ns]"), - ( - ["2012-01-01 00:00:00-05:00", NaT], - "datetime64[ns, US/Eastern]", - ), - ([1, None], "UInt16"), - (["1/1/2021", "2/1/2021"], "period[M]"), - (["1/1/2021", "2/1/2021", NaT], "period[M]"), - (["1 Day", "59 Days", NaT], "timedelta64[ns]"), - # currently no way to parse IntervalArray from a list of strings - ], - ) - def test_astype_string_to_extension_dtype_roundtrip( - self, data, dtype, request, nullable_string_dtype - ): - if dtype == "boolean": - mark = pytest.mark.xfail( - reason="TODO StringArray.astype() with missing values #GH40566" - ) - request.node.add_marker(mark) - # GH-40351 - ser = Series(data, dtype=dtype) - - # Note: just passing .astype(dtype) fails for dtype="category" - # with bc ser.dtype.categories will be object dtype whereas - # result.dtype.categories will have string dtype - result = ser.astype(nullable_string_dtype).astype(ser.dtype) - tm.assert_series_equal(result, ser) - - -class TestAstypeCategorical: - def test_astype_categorical_to_other(self): - cat = Categorical([f"{i} - {i + 499}" for i in range(0, 10000, 500)]) - ser = Series(np.random.default_rng(2).integers(0, 10000, 100)).sort_values() - ser = cut(ser, range(0, 10500, 500), right=False, labels=cat) - - expected = ser - tm.assert_series_equal(ser.astype("category"), expected) - tm.assert_series_equal(ser.astype(CategoricalDtype()), expected) - msg = r"Cannot cast object dtype to float64" - with pytest.raises(ValueError, match=msg): - ser.astype("float64") - - cat = Series(Categorical(["a", "b", "b", "a", "a", "c", "c", "c"])) - exp = Series(["a", "b", "b", "a", "a", "c", "c", "c"]) - tm.assert_series_equal(cat.astype("str"), exp) - s2 = Series(Categorical(["1", "2", "3", "4"])) - exp2 = Series([1, 2, 3, 4]).astype("int") - tm.assert_series_equal(s2.astype("int"), exp2) - - # object don't sort correctly, so just compare that we have the same - # values - def cmp(a, b): - tm.assert_almost_equal(np.sort(np.unique(a)), np.sort(np.unique(b))) - - expected = Series(np.array(ser.values), name="value_group") - cmp(ser.astype("object"), expected) - cmp(ser.astype(np.object_), expected) - - # array conversion - tm.assert_almost_equal(np.array(ser), np.array(ser.values)) - - tm.assert_series_equal(ser.astype("category"), ser) - tm.assert_series_equal(ser.astype(CategoricalDtype()), ser) - - roundtrip_expected = ser.cat.set_categories( - ser.cat.categories.sort_values() - ).cat.remove_unused_categories() - result = ser.astype("object").astype("category") - tm.assert_series_equal(result, roundtrip_expected) - result = ser.astype("object").astype(CategoricalDtype()) - tm.assert_series_equal(result, roundtrip_expected) - - def test_astype_categorical_invalid_conversions(self): - # invalid conversion (these are NOT a dtype) - cat = Categorical([f"{i} - {i + 499}" for i in range(0, 10000, 500)]) - ser = Series(np.random.default_rng(2).integers(0, 10000, 100)).sort_values() - ser = cut(ser, range(0, 10500, 500), right=False, labels=cat) - - msg = ( - "dtype '' " - "not understood" - ) - with pytest.raises(TypeError, match=msg): - ser.astype(Categorical) - with pytest.raises(TypeError, match=msg): - ser.astype("object").astype(Categorical) - - def test_astype_categoricaldtype(self): - ser = Series(["a", "b", "a"]) - result = ser.astype(CategoricalDtype(["a", "b"], ordered=True)) - expected = Series(Categorical(["a", "b", "a"], ordered=True)) - tm.assert_series_equal(result, expected) - - result = ser.astype(CategoricalDtype(["a", "b"], ordered=False)) - expected = Series(Categorical(["a", "b", "a"], ordered=False)) - tm.assert_series_equal(result, expected) - - result = ser.astype(CategoricalDtype(["a", "b", "c"], ordered=False)) - expected = Series( - Categorical(["a", "b", "a"], categories=["a", "b", "c"], ordered=False) - ) - tm.assert_series_equal(result, expected) - tm.assert_index_equal(result.cat.categories, Index(["a", "b", "c"])) - - @pytest.mark.parametrize("name", [None, "foo"]) - @pytest.mark.parametrize("dtype_ordered", [True, False]) - @pytest.mark.parametrize("series_ordered", [True, False]) - def test_astype_categorical_to_categorical( - self, name, dtype_ordered, series_ordered - ): - # GH#10696, GH#18593 - s_data = list("abcaacbab") - s_dtype = CategoricalDtype(list("bac"), ordered=series_ordered) - ser = Series(s_data, dtype=s_dtype, name=name) - - # unspecified categories - dtype = CategoricalDtype(ordered=dtype_ordered) - result = ser.astype(dtype) - exp_dtype = CategoricalDtype(s_dtype.categories, dtype_ordered) - expected = Series(s_data, name=name, dtype=exp_dtype) - tm.assert_series_equal(result, expected) - - # different categories - dtype = CategoricalDtype(list("adc"), dtype_ordered) - result = ser.astype(dtype) - expected = Series(s_data, name=name, dtype=dtype) - tm.assert_series_equal(result, expected) - - if dtype_ordered is False: - # not specifying ordered, so only test once - expected = ser - result = ser.astype("category") - tm.assert_series_equal(result, expected) - - def test_astype_bool_missing_to_categorical(self): - # GH-19182 - ser = Series([True, False, np.nan]) - assert ser.dtypes == np.object_ - - result = ser.astype(CategoricalDtype(categories=[True, False])) - expected = Series(Categorical([True, False, np.nan], categories=[True, False])) - tm.assert_series_equal(result, expected) - - def test_astype_categories_raises(self): - # deprecated GH#17636, removed in GH#27141 - ser = Series(["a", "b", "a"]) - with pytest.raises(TypeError, match="got an unexpected"): - ser.astype("category", categories=["a", "b"], ordered=True) - - @pytest.mark.parametrize("items", [["a", "b", "c", "a"], [1, 2, 3, 1]]) - def test_astype_from_categorical(self, items): - ser = Series(items) - exp = Series(Categorical(items)) - res = ser.astype("category") - tm.assert_series_equal(res, exp) - - def test_astype_from_categorical_with_keywords(self): - # with keywords - lst = ["a", "b", "c", "a"] - ser = Series(lst) - exp = Series(Categorical(lst, ordered=True)) - res = ser.astype(CategoricalDtype(None, ordered=True)) - tm.assert_series_equal(res, exp) - - exp = Series(Categorical(lst, categories=list("abcdef"), ordered=True)) - res = ser.astype(CategoricalDtype(list("abcdef"), ordered=True)) - tm.assert_series_equal(res, exp) - - def test_astype_timedelta64_with_np_nan(self): - # GH45798 - result = Series([Timedelta(1), np.nan], dtype="timedelta64[ns]") - expected = Series([Timedelta(1), NaT], dtype="timedelta64[ns]") - tm.assert_series_equal(result, expected) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/series/methods/test_rename.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/series/methods/test_rename.py deleted file mode 100644 index 93c4fbb7f3c46c4cdcd969c1eb4840805e9307d8..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/series/methods/test_rename.py +++ /dev/null @@ -1,176 +0,0 @@ -from datetime import datetime -import re - -import numpy as np -import pytest - -from pandas import ( - Index, - MultiIndex, - Series, -) -import pandas._testing as tm - - -class TestRename: - def test_rename(self, datetime_series): - ts = datetime_series - renamer = lambda x: x.strftime("%Y%m%d") - renamed = ts.rename(renamer) - assert renamed.index[0] == renamer(ts.index[0]) - - # dict - rename_dict = dict(zip(ts.index, renamed.index)) - renamed2 = ts.rename(rename_dict) - tm.assert_series_equal(renamed, renamed2) - - def test_rename_partial_dict(self): - # partial dict - ser = Series(np.arange(4), index=["a", "b", "c", "d"], dtype="int64") - renamed = ser.rename({"b": "foo", "d": "bar"}) - tm.assert_index_equal(renamed.index, Index(["a", "foo", "c", "bar"])) - - def test_rename_retain_index_name(self): - # index with name - renamer = Series( - np.arange(4), index=Index(["a", "b", "c", "d"], name="name"), dtype="int64" - ) - renamed = renamer.rename({}) - assert renamed.index.name == renamer.index.name - - def test_rename_by_series(self): - ser = Series(range(5), name="foo") - renamer = Series({1: 10, 2: 20}) - result = ser.rename(renamer) - expected = Series(range(5), index=[0, 10, 20, 3, 4], name="foo") - tm.assert_series_equal(result, expected) - - def test_rename_set_name(self): - ser = Series(range(4), index=list("abcd")) - for name in ["foo", 123, 123.0, datetime(2001, 11, 11), ("foo",)]: - result = ser.rename(name) - assert result.name == name - tm.assert_numpy_array_equal(result.index.values, ser.index.values) - assert ser.name is None - - def test_rename_set_name_inplace(self): - ser = Series(range(3), index=list("abc")) - for name in ["foo", 123, 123.0, datetime(2001, 11, 11), ("foo",)]: - ser.rename(name, inplace=True) - assert ser.name == name - - exp = np.array(["a", "b", "c"], dtype=np.object_) - tm.assert_numpy_array_equal(ser.index.values, exp) - - def test_rename_axis_supported(self): - # Supporting axis for compatibility, detailed in GH-18589 - ser = Series(range(5)) - ser.rename({}, axis=0) - ser.rename({}, axis="index") - - with pytest.raises(ValueError, match="No axis named 5"): - ser.rename({}, axis=5) - - def test_rename_inplace(self, datetime_series): - renamer = lambda x: x.strftime("%Y%m%d") - expected = renamer(datetime_series.index[0]) - - datetime_series.rename(renamer, inplace=True) - assert datetime_series.index[0] == expected - - def test_rename_with_custom_indexer(self): - # GH 27814 - class MyIndexer: - pass - - ix = MyIndexer() - ser = Series([1, 2, 3]).rename(ix) - assert ser.name is ix - - def test_rename_with_custom_indexer_inplace(self): - # GH 27814 - class MyIndexer: - pass - - ix = MyIndexer() - ser = Series([1, 2, 3]) - ser.rename(ix, inplace=True) - assert ser.name is ix - - def test_rename_callable(self): - # GH 17407 - ser = Series(range(1, 6), index=Index(range(2, 7), name="IntIndex")) - result = ser.rename(str) - expected = ser.rename(lambda i: str(i)) - tm.assert_series_equal(result, expected) - - assert result.name == expected.name - - def test_rename_none(self): - # GH 40977 - ser = Series([1, 2], name="foo") - result = ser.rename(None) - expected = Series([1, 2]) - tm.assert_series_equal(result, expected) - - def test_rename_series_with_multiindex(self): - # issue #43659 - arrays = [ - ["bar", "baz", "baz", "foo", "qux"], - ["one", "one", "two", "two", "one"], - ] - - index = MultiIndex.from_arrays(arrays, names=["first", "second"]) - ser = Series(np.ones(5), index=index) - result = ser.rename(index={"one": "yes"}, level="second", errors="raise") - - arrays_expected = [ - ["bar", "baz", "baz", "foo", "qux"], - ["yes", "yes", "two", "two", "yes"], - ] - - index_expected = MultiIndex.from_arrays( - arrays_expected, names=["first", "second"] - ) - series_expected = Series(np.ones(5), index=index_expected) - - tm.assert_series_equal(result, series_expected) - - def test_rename_series_with_multiindex_keeps_ea_dtypes(self): - # GH21055 - arrays = [ - Index([1, 2, 3], dtype="Int64").astype("category"), - Index([1, 2, 3], dtype="Int64"), - ] - mi = MultiIndex.from_arrays(arrays, names=["A", "B"]) - ser = Series(1, index=mi) - result = ser.rename({1: 4}, level=1) - - arrays_expected = [ - Index([1, 2, 3], dtype="Int64").astype("category"), - Index([4, 2, 3], dtype="Int64"), - ] - mi_expected = MultiIndex.from_arrays(arrays_expected, names=["A", "B"]) - expected = Series(1, index=mi_expected) - - tm.assert_series_equal(result, expected) - - def test_rename_error_arg(self): - # GH 46889 - ser = Series(["foo", "bar"]) - match = re.escape("[2] not found in axis") - with pytest.raises(KeyError, match=match): - ser.rename({2: 9}, errors="raise") - - def test_rename_copy_false(self, using_copy_on_write): - # GH 46889 - ser = Series(["foo", "bar"]) - ser_orig = ser.copy() - shallow_copy = ser.rename({1: 9}, copy=False) - ser[0] = "foobar" - if using_copy_on_write: - assert ser_orig[0] == shallow_copy[0] - assert ser_orig[1] == shallow_copy[9] - else: - assert ser[0] == shallow_copy[0] - assert ser[1] == shallow_copy[9] diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/tslibs/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/tslibs/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/chardet/metadata/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/chardet/metadata/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/qingxu98/gpt-academic/crazy_functions/test_project/cpp/cppipc/prod_cons.h b/spaces/qingxu98/gpt-academic/crazy_functions/test_project/cpp/cppipc/prod_cons.h deleted file mode 100644 index c9004bb8043a12e32814436baa6262a00c8ef68e..0000000000000000000000000000000000000000 --- a/spaces/qingxu98/gpt-academic/crazy_functions/test_project/cpp/cppipc/prod_cons.h +++ /dev/null @@ -1,433 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include - -#include "libipc/def.h" - -#include "libipc/platform/detail.h" -#include "libipc/circ/elem_def.h" -#include "libipc/utility/log.h" -#include "libipc/utility/utility.h" - -namespace ipc { - -//////////////////////////////////////////////////////////////// -/// producer-consumer implementation -//////////////////////////////////////////////////////////////// - -template -struct prod_cons_impl; - -template <> -struct prod_cons_impl> { - - template - struct elem_t { - std::aligned_storage_t data_ {}; - }; - - alignas(cache_line_size) std::atomic rd_; // read index - alignas(cache_line_size) std::atomic wt_; // write index - - constexpr circ::u2_t cursor() const noexcept { - return 0; - } - - template - bool push(W* /*wrapper*/, F&& f, E* elems) { - auto cur_wt = circ::index_of(wt_.load(std::memory_order_relaxed)); - if (cur_wt == circ::index_of(rd_.load(std::memory_order_acquire) - 1)) { - return false; // full - } - std::forward(f)(&(elems[cur_wt].data_)); - wt_.fetch_add(1, std::memory_order_release); - return true; - } - - /** - * In single-single-unicast, 'force_push' means 'no reader' or 'the only one reader is dead'. - * So we could just disconnect all connections of receiver, and return false. - */ - template - bool force_push(W* wrapper, F&&, E*) { - wrapper->elems()->disconnect_receiver(~static_cast(0u)); - return false; - } - - template - bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) { - auto cur_rd = circ::index_of(rd_.load(std::memory_order_relaxed)); - if (cur_rd == circ::index_of(wt_.load(std::memory_order_acquire))) { - return false; // empty - } - std::forward(f)(&(elems[cur_rd].data_)); - std::forward(out)(true); - rd_.fetch_add(1, std::memory_order_release); - return true; - } -}; - -template <> -struct prod_cons_impl> - : prod_cons_impl> { - - template - bool force_push(W* wrapper, F&&, E*) { - wrapper->elems()->disconnect_receiver(1); - return false; - } - - template class E, std::size_t DS, std::size_t AS> - bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) { - byte_t buff[DS]; - for (unsigned k = 0;;) { - auto cur_rd = rd_.load(std::memory_order_relaxed); - if (circ::index_of(cur_rd) == - circ::index_of(wt_.load(std::memory_order_acquire))) { - return false; // empty - } - std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff)); - if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) { - std::forward(f)(buff); - std::forward(out)(true); - return true; - } - ipc::yield(k); - } - } -}; - -template <> -struct prod_cons_impl> - : prod_cons_impl> { - - using flag_t = std::uint64_t; - - template - struct elem_t { - std::aligned_storage_t data_ {}; - std::atomic f_ct_ { 0 }; // commit flag - }; - - alignas(cache_line_size) std::atomic ct_; // commit index - - template - bool push(W* /*wrapper*/, F&& f, E* elems) { - circ::u2_t cur_ct, nxt_ct; - for (unsigned k = 0;;) { - cur_ct = ct_.load(std::memory_order_relaxed); - if (circ::index_of(nxt_ct = cur_ct + 1) == - circ::index_of(rd_.load(std::memory_order_acquire))) { - return false; // full - } - if (ct_.compare_exchange_weak(cur_ct, nxt_ct, std::memory_order_acq_rel)) { - break; - } - ipc::yield(k); - } - auto* el = elems + circ::index_of(cur_ct); - std::forward(f)(&(el->data_)); - // set flag & try update wt - el->f_ct_.store(~static_cast(cur_ct), std::memory_order_release); - while (1) { - auto cac_ct = el->f_ct_.load(std::memory_order_acquire); - if (cur_ct != wt_.load(std::memory_order_relaxed)) { - return true; - } - if ((~cac_ct) != cur_ct) { - return true; - } - if (!el->f_ct_.compare_exchange_strong(cac_ct, 0, std::memory_order_relaxed)) { - return true; - } - wt_.store(nxt_ct, std::memory_order_release); - cur_ct = nxt_ct; - nxt_ct = cur_ct + 1; - el = elems + circ::index_of(cur_ct); - } - return true; - } - - template - bool force_push(W* wrapper, F&&, E*) { - wrapper->elems()->disconnect_receiver(1); - return false; - } - - template class E, std::size_t DS, std::size_t AS> - bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) { - byte_t buff[DS]; - for (unsigned k = 0;;) { - auto cur_rd = rd_.load(std::memory_order_relaxed); - auto cur_wt = wt_.load(std::memory_order_acquire); - auto id_rd = circ::index_of(cur_rd); - auto id_wt = circ::index_of(cur_wt); - if (id_rd == id_wt) { - auto* el = elems + id_wt; - auto cac_ct = el->f_ct_.load(std::memory_order_acquire); - if ((~cac_ct) != cur_wt) { - return false; // empty - } - if (el->f_ct_.compare_exchange_weak(cac_ct, 0, std::memory_order_relaxed)) { - wt_.store(cur_wt + 1, std::memory_order_release); - } - k = 0; - } - else { - std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff)); - if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) { - std::forward(f)(buff); - std::forward(out)(true); - return true; - } - ipc::yield(k); - } - } - } -}; - -template <> -struct prod_cons_impl> { - - using rc_t = std::uint64_t; - - enum : rc_t { - ep_mask = 0x00000000ffffffffull, - ep_incr = 0x0000000100000000ull - }; - - template - struct elem_t { - std::aligned_storage_t data_ {}; - std::atomic rc_ { 0 }; // read-counter - }; - - alignas(cache_line_size) std::atomic wt_; // write index - alignas(cache_line_size) rc_t epoch_ { 0 }; // only one writer - - circ::u2_t cursor() const noexcept { - return wt_.load(std::memory_order_acquire); - } - - template - bool push(W* wrapper, F&& f, E* elems) { - E* el; - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(wt_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_acquire); - circ::cc_t rem_cc = cur_rc & ep_mask; - if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch_)) { - return false; // has not finished yet - } - // consider rem_cc to be 0 here - if (el->rc_.compare_exchange_weak( - cur_rc, epoch_ | static_cast(cc), std::memory_order_release)) { - break; - } - ipc::yield(k); - } - std::forward(f)(&(el->data_)); - wt_.fetch_add(1, std::memory_order_release); - return true; - } - - template - bool force_push(W* wrapper, F&& f, E* elems) { - E* el; - epoch_ += ep_incr; - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(wt_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_acquire); - circ::cc_t rem_cc = cur_rc & ep_mask; - if (cc & rem_cc) { - ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc); - cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers - if (cc == 0) return false; // no reader - } - // just compare & exchange - if (el->rc_.compare_exchange_weak( - cur_rc, epoch_ | static_cast(cc), std::memory_order_release)) { - break; - } - ipc::yield(k); - } - std::forward(f)(&(el->data_)); - wt_.fetch_add(1, std::memory_order_release); - return true; - } - - template - bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E* elems) { - if (cur == cursor()) return false; // acquire - auto* el = elems + circ::index_of(cur++); - std::forward(f)(&(el->data_)); - for (unsigned k = 0;;) { - auto cur_rc = el->rc_.load(std::memory_order_acquire); - if ((cur_rc & ep_mask) == 0) { - std::forward(out)(true); - return true; - } - auto nxt_rc = cur_rc & ~static_cast(wrapper->connected_id()); - if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) { - std::forward(out)((nxt_rc & ep_mask) == 0); - return true; - } - ipc::yield(k); - } - } -}; - -template <> -struct prod_cons_impl> { - - using rc_t = std::uint64_t; - using flag_t = std::uint64_t; - - enum : rc_t { - rc_mask = 0x00000000ffffffffull, - ep_mask = 0x00ffffffffffffffull, - ep_incr = 0x0100000000000000ull, - ic_mask = 0xff000000ffffffffull, - ic_incr = 0x0000000100000000ull - }; - - template - struct elem_t { - std::aligned_storage_t data_ {}; - std::atomic rc_ { 0 }; // read-counter - std::atomic f_ct_ { 0 }; // commit flag - }; - - alignas(cache_line_size) std::atomic ct_; // commit index - alignas(cache_line_size) std::atomic epoch_ { 0 }; - - circ::u2_t cursor() const noexcept { - return ct_.load(std::memory_order_acquire); - } - - constexpr static rc_t inc_rc(rc_t rc) noexcept { - return (rc & ic_mask) | ((rc + ic_incr) & ~ic_mask); - } - - constexpr static rc_t inc_mask(rc_t rc) noexcept { - return inc_rc(rc) & ~rc_mask; - } - - template - bool push(W* wrapper, F&& f, E* elems) { - E* el; - circ::u2_t cur_ct; - rc_t epoch = epoch_.load(std::memory_order_acquire); - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_relaxed); - circ::cc_t rem_cc = cur_rc & rc_mask; - if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch)) { - return false; // has not finished yet - } - else if (!rem_cc) { - auto cur_fl = el->f_ct_.load(std::memory_order_acquire); - if ((cur_fl != cur_ct) && cur_fl) { - return false; // full - } - } - // consider rem_cc to be 0 here - if (el->rc_.compare_exchange_weak( - cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast(cc), std::memory_order_relaxed) && - epoch_.compare_exchange_weak(epoch, epoch, std::memory_order_acq_rel)) { - break; - } - ipc::yield(k); - } - // only one thread/process would touch here at one time - ct_.store(cur_ct + 1, std::memory_order_release); - std::forward(f)(&(el->data_)); - // set flag & try update wt - el->f_ct_.store(~static_cast(cur_ct), std::memory_order_release); - return true; - } - - template - bool force_push(W* wrapper, F&& f, E* elems) { - E* el; - circ::u2_t cur_ct; - rc_t epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr; - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_acquire); - circ::cc_t rem_cc = cur_rc & rc_mask; - if (cc & rem_cc) { - ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc); - cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers - if (cc == 0) return false; // no reader - } - // just compare & exchange - if (el->rc_.compare_exchange_weak( - cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast(cc), std::memory_order_relaxed)) { - if (epoch == epoch_.load(std::memory_order_acquire)) { - break; - } - else if (push(wrapper, std::forward(f), elems)) { - return true; - } - epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr; - } - ipc::yield(k); - } - // only one thread/process would touch here at one time - ct_.store(cur_ct + 1, std::memory_order_release); - std::forward(f)(&(el->data_)); - // set flag & try update wt - el->f_ct_.store(~static_cast(cur_ct), std::memory_order_release); - return true; - } - - template - bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E(& elems)[N]) { - auto* el = elems + circ::index_of(cur); - auto cur_fl = el->f_ct_.load(std::memory_order_acquire); - if (cur_fl != ~static_cast(cur)) { - return false; // empty - } - ++cur; - std::forward(f)(&(el->data_)); - for (unsigned k = 0;;) { - auto cur_rc = el->rc_.load(std::memory_order_acquire); - if ((cur_rc & rc_mask) == 0) { - std::forward(out)(true); - el->f_ct_.store(cur + N - 1, std::memory_order_release); - return true; - } - auto nxt_rc = inc_rc(cur_rc) & ~static_cast(wrapper->connected_id()); - bool last_one = false; - if ((last_one = (nxt_rc & rc_mask) == 0)) { - el->f_ct_.store(cur + N - 1, std::memory_order_release); - } - if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) { - std::forward(out)(last_one); - return true; - } - ipc::yield(k); - } - } -}; - -} // namespace ipc diff --git a/spaces/quantumiracle-git/OpenBiDexHand/README.md b/spaces/quantumiracle-git/OpenBiDexHand/README.md deleted file mode 100644 index 2577a003120bfa88c6178f7826b920bb972e18d4..0000000000000000000000000000000000000000 --- a/spaces/quantumiracle-git/OpenBiDexHand/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Robotinder -emoji: 🚀 -colorFrom: indigo -colorTo: purple -sdk: gradio -sdk_version: 3.12.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Alien Skin Image Doctor 2.1.1.1116 (Photoshop Plugin) [PATCHED].md b/spaces/quidiaMuxgu/Expedit-SAM/Alien Skin Image Doctor 2.1.1.1116 (Photoshop Plugin) [PATCHED].md deleted file mode 100644 index 19e2da299b13321632e56c2c44c3a425d060abea..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Alien Skin Image Doctor 2.1.1.1116 (Photoshop Plugin) [PATCHED].md +++ /dev/null @@ -1,12 +0,0 @@ -

        Alien Skin Image Doctor 2.1.1.1116 (Photoshop Plugin)


        Download Zip 🌟 https://geags.com/2uCqs5



        - -Developer:alienskin.com; Downloads: 199; Size: 19.44 Mb; Price: - ... Image Doctor is Alien's brand new suite of powerful Photoshop graphics plugins ... Customizable and powerful, Image Doctor's tools allow you to: * Fix ... -Adobe Photoshop Elements 9.0 free download in Russian can be ... -developer:alienskin.com Downloads: 199; Size: 19.44 Mb; Price: - ... -Image Doctor is Alien's brand new suite of powerful Photoshop graphics plugins... -Customizable and powerful, Image Doctor's tools allow you to: * Fix... -Adobe Photoshop Elements 9 free download in Russian can be ... -Developer: 8a78ff9644
        -
        -
        -

        diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Copy ? Text On Screen Pro V2.4.5 Build 119 [Premium] Crack TOPed [Latest].md b/spaces/quidiaMuxgu/Expedit-SAM/Copy ? Text On Screen Pro V2.4.5 Build 119 [Premium] Crack TOPed [Latest].md deleted file mode 100644 index e2d65ea2221c22e41b301345e61fd1b02787543e..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Copy ? Text On Screen Pro V2.4.5 Build 119 [Premium] Crack TOPed [Latest].md +++ /dev/null @@ -1,6 +0,0 @@ -

        Copy – Text On Screen pro v2.4.5 build 119 [Premium] Cracked [Latest]


        Download Ziphttps://geags.com/2uCrsn



        - - 4d29de3e1b
        -
        -
        -

        diff --git a/spaces/quidiaMuxgu/Expedit-SAM/DeepL Pro 1.11.0 Portable !!INSTALL!!.md b/spaces/quidiaMuxgu/Expedit-SAM/DeepL Pro 1.11.0 Portable !!INSTALL!!.md deleted file mode 100644 index b37a1b12ecbe3af281ef5701ee5d165ae974b4a8..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/DeepL Pro 1.11.0 Portable !!INSTALL!!.md +++ /dev/null @@ -1,29 +0,0 @@ - -

        How to Use DeepL Pro 1.11.0 Portable for Fast and Accurate Translations

        -

        DeepL Pro is a powerful online translator that uses neural networks to capture even the slightest nuances and reproduce them in translation unlike any other service. It can translate text in any and all applications on your computer, as well as whole documents with one click. It also offers maximum data security, enhanced customization options, and API access for developers.

        -

        DeepL Pro 1.11.0 Portable


        Downloadhttps://geags.com/2uCsr9



        -

        In this article, we will show you how to use DeepL Pro 1.11.0 Portable, which is a version of the software that does not require installation and can be run from any USB drive or cloud storage. This way, you can enjoy the benefits of DeepL Pro without having to install it on every computer you use.

        -

        Step 1: Download DeepL Pro 1.11.0 Portable

        -

        To download DeepL Pro 1.11.0 Portable, you can use one of the following links[^3^]:

        -
          -
        • Installer: https://www.upload.ee/files/11133854/DPL_1.11.0_.rar.html
        • -
        • Portable: https://filefox.cc/2h3xa4etedoe
        • -
        • Crack: https://www.upload.ee/files/11135215/DeepL_Pro_1.11.0_Crack.rar.html
        • -
        -

        You will need to extract the files from the compressed archive using a program like WinRAR or 7-Zip. The password for the crack file is OMCracked@Nsane.

        -

        Step 2: Run DeepL Pro 1.11.0 Portable

        -

        To run DeepL Pro 1.11.0 Portable, you just need to double-click on the DeepL.exe file in the folder where you extracted the files. You will see a window like this:

        -DeepL Pro window -

        You can drag and drop any text or document you want to translate into the left pane, or type it directly. You can also copy and paste text from any application on your computer.

        -

        You can choose the source and target languages from the drop-down menus at the top of the window, or let DeepL detect them automatically.

        -

        -

        You can also access more options by clicking on the gear icon at the bottom right of the window. Here you can change the font size, enable or disable spell checking, choose whether to show alternative translations or not, and customize your translation settings.

        -

        Step 3: Enjoy Fast and Accurate Translations

        -

        Once you have entered your text or document, you will see the translation appear in the right pane almost instantly. You can edit the translation if you want, or copy and paste it into another application.

        -

        If you want to translate a whole document with one click, you can click on the "Translate document" button at the bottom left of the window. You can choose from Microsoft Word (.docx), PowerPoint (.pptx), PDF (.pdf), and text (.txt) files.

        -

        You will see a preview of the translated document in a new window, where you can also save it or print it.

        -Translated document preview -

        As you can see, using DeepL Pro 1.11.0 Portable is very easy and convenient. You can take it with you anywhere and use it on any computer without installing anything.

        -

        If you want to learn more about DeepL Pro and its features, you can visit their official website[^2^] or read their blog[^2^]. You can also try out their free web translator[^2^] or sign up for a free trial[^2^] of their subscription plans.

        d5da3c52bf
        -
        -
        \ No newline at end of file diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Hauptwerk 4 Crack TOP.md b/spaces/quidiaMuxgu/Expedit-SAM/Hauptwerk 4 Crack TOP.md deleted file mode 100644 index 74fcc82a75d33dec853e752910926346160f3111..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Hauptwerk 4 Crack TOP.md +++ /dev/null @@ -1,6 +0,0 @@ -

        Hauptwerk 4 crack


        Download ✫✫✫ https://geags.com/2uCqTC



        -
        -Adeko 9 Crack 2011 Dodge. Adeko 9 Crack 2011 Dodge ... Hauptwerk 4 Crack. hauptwerk, hauptwerk organ console, hauptwerk console, hauptwerk forum, ... 1fdad05405
        -
        -
        -

        diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Lectii De Pian Pentru Incepatori.pdf ((BETTER)).md b/spaces/quidiaMuxgu/Expedit-SAM/Lectii De Pian Pentru Incepatori.pdf ((BETTER)).md deleted file mode 100644 index 1f5e3a1f6a360aaa2b8ccd7c2633f5c279d78e2f..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Lectii De Pian Pentru Incepatori.pdf ((BETTER)).md +++ /dev/null @@ -1,9 +0,0 @@ -
        -

        Raportaties 1.50-1.13 MB. Leci pentru pian 2.0. Do you like this.? The Portfolio. I heard this. I didnt like this. Categorie: Subcategories: Years: 20072007200520032001 http://www.sportteachersofpeoria.org/lecuri-de-pian-pentru-incepatori.pdf Lectii De Pian Pentru Incepatori.pdf

        Despre Lectii De Pian Pentru Incepatori. Wellcategorized on Prezi along with http://prezi.com/mrdrw1xgx8bb/lectii-de-pian-pentru-incepatori/ .

        -

        Lectii De Pian Pentru Incepatori.pdf


        DOWNLOADhttps://geags.com/2uCrzh



        -

        Download lectii de pian pentru incepatori,lectii de pian pentru incepatori 3 nivele,lectii de piano incepatori 0 nivele,lectii de piano incepatori 0 nivele,lectii de piano 2 nivele,lectii de piano.

        -

        Download lectii de pian pentru incepatori,lectii de piano pentru incepatori 3 nivele,lectii de piano incepatori 0 nivele,lectii de piano incepatori 0 nivele,lectii de piano 2 nivele,lectii de piano.

        -

        3-Prdopte cand esti la pian cea mai importanta, sunt doua lucruri corecte. Incepatorul cu XVII secol (inainte de acest tutorial) sau Chordifys; module pentru iPad/iPhone. Uniun de Dezvoltare (Bend Orbe). Este una dintre numeroasele dezvoltate de Multumire. Categoria nu este desemnat cel mai tare de Instrumentalist. Acest mod ne-a ajutat la MANARMA deschisarea, Print magazine, Diagonal magazine etc. Cercetare pe YouTube (ca Alistel), http://youtube.com/resultat de cuvinte de cercetare care il vor afla la acest instrument? Deschide/inapoi. Compilerile corecte.
        4. Tuturor copiilor si fetelor inainte de acest tutorial: - Cine este vincent? (G) - Cine a fost marea si cea mai mare inginerie? (F C) - Cine este pamantul? (C) - Cine este copilul / rasuta mica? (G) - Cine este palata mare? (C# G) - Cine este vocea unui rapper numit Hot shot? (F C) - Cine este avionul? (C) - Cine este pinguinul? (A) - Cine este cârnatul? (A) - Cine este pinguinul (din Africa)? (A) 5. De ce ar trebui sa-ti faci un instrument pentru tine? (C) Pentru a fi într-un act (de cazare, de plangachit, de ambalare, etc.). De a reprezenta. De a se întoarce in societate. 6. Inspiratia de un bloc de fieries. 7. Sorin incarcate. 8. Ceva care ajunge acolo. 9. Daca. 10. Ceva. 11. Acesta este un act consemnat in Unchi-Zeu: Curoase-Ea. 12. Piata citrica de Paris. 13. Maestralele de folie. 14. Piata Grecia-Colonie. 15. Piata în America-Revolutii. 16. Piata cu familii. 17. Piata cu gloanți. 18. Fara îndoială. Aici puteți descărca manualele de instrument Pianul inițial Piano Video și tuturor instrumentelor disponibile pentru iPad / iPhone.
        14 Cântec pentru instrument Pian. Într-un tutorial piano si muzică de burduf. Tuturor copiilor, fetelor si infantilor sau pe orice copil sau fete de 30 ani care vor să-si invețeasca instrumentul.

        -

        899543212b
        -
        -
        \ No newline at end of file diff --git a/spaces/r3gm/Ultimate-Vocal-Remover-WebUI/demucs/repo.py b/spaces/r3gm/Ultimate-Vocal-Remover-WebUI/demucs/repo.py deleted file mode 100644 index 65ff6b33c7771b7743659d52151da67dc18082a8..0000000000000000000000000000000000000000 --- a/spaces/r3gm/Ultimate-Vocal-Remover-WebUI/demucs/repo.py +++ /dev/null @@ -1,148 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -"""Represents a model repository, including pre-trained models and bags of models. -A repo can either be the main remote repository stored in AWS, or a local repository -with your own models. -""" - -from hashlib import sha256 -from pathlib import Path -import typing as tp - -import torch -import yaml - -from .apply import BagOfModels, Model -from .states import load_model - - -AnyModel = tp.Union[Model, BagOfModels] - - -class ModelLoadingError(RuntimeError): - pass - - -def check_checksum(path: Path, checksum: str): - sha = sha256() - with open(path, 'rb') as file: - while True: - buf = file.read(2**20) - if not buf: - break - sha.update(buf) - actual_checksum = sha.hexdigest()[:len(checksum)] - if actual_checksum != checksum: - raise ModelLoadingError(f'Invalid checksum for file {path}, ' - f'expected {checksum} but got {actual_checksum}') - -class ModelOnlyRepo: - """Base class for all model only repos. - """ - def has_model(self, sig: str) -> bool: - raise NotImplementedError() - - def get_model(self, sig: str) -> Model: - raise NotImplementedError() - - -class RemoteRepo(ModelOnlyRepo): - def __init__(self, models: tp.Dict[str, str]): - self._models = models - - def has_model(self, sig: str) -> bool: - return sig in self._models - - def get_model(self, sig: str) -> Model: - try: - url = self._models[sig] - except KeyError: - raise ModelLoadingError(f'Could not find a pre-trained model with signature {sig}.') - pkg = torch.hub.load_state_dict_from_url(url, map_location='cpu', check_hash=True) - return load_model(pkg) - - -class LocalRepo(ModelOnlyRepo): - def __init__(self, root: Path): - self.root = root - self.scan() - - def scan(self): - self._models = {} - self._checksums = {} - for file in self.root.iterdir(): - if file.suffix == '.th': - if '-' in file.stem: - xp_sig, checksum = file.stem.split('-') - self._checksums[xp_sig] = checksum - else: - xp_sig = file.stem - if xp_sig in self._models: - print('Whats xp? ', xp_sig) - raise ModelLoadingError( - f'Duplicate pre-trained model exist for signature {xp_sig}. ' - 'Please delete all but one.') - self._models[xp_sig] = file - - def has_model(self, sig: str) -> bool: - return sig in self._models - - def get_model(self, sig: str) -> Model: - try: - file = self._models[sig] - except KeyError: - raise ModelLoadingError(f'Could not find pre-trained model with signature {sig}.') - if sig in self._checksums: - check_checksum(file, self._checksums[sig]) - return load_model(file) - - -class BagOnlyRepo: - """Handles only YAML files containing bag of models, leaving the actual - model loading to some Repo. - """ - def __init__(self, root: Path, model_repo: ModelOnlyRepo): - self.root = root - self.model_repo = model_repo - self.scan() - - def scan(self): - self._bags = {} - for file in self.root.iterdir(): - if file.suffix == '.yaml': - self._bags[file.stem] = file - - def has_model(self, name: str) -> bool: - return name in self._bags - - def get_model(self, name: str) -> BagOfModels: - try: - yaml_file = self._bags[name] - except KeyError: - raise ModelLoadingError(f'{name} is neither a single pre-trained model or ' - 'a bag of models.') - bag = yaml.safe_load(open(yaml_file)) - signatures = bag['models'] - models = [self.model_repo.get_model(sig) for sig in signatures] - weights = bag.get('weights') - segment = bag.get('segment') - return BagOfModels(models, weights, segment) - - -class AnyModelRepo: - def __init__(self, model_repo: ModelOnlyRepo, bag_repo: BagOnlyRepo): - self.model_repo = model_repo - self.bag_repo = bag_repo - - def has_model(self, name_or_sig: str) -> bool: - return self.model_repo.has_model(name_or_sig) or self.bag_repo.has_model(name_or_sig) - - def get_model(self, name_or_sig: str) -> AnyModel: - print('name_or_sig: ', name_or_sig) - if self.model_repo.has_model(name_or_sig): - return self.model_repo.get_model(name_or_sig) - else: - return self.bag_repo.get_model(name_or_sig) diff --git a/spaces/rachittshah/doc-qa/README.md b/spaces/rachittshah/doc-qa/README.md deleted file mode 100644 index a6b9ff85ca6b9d808b6f20231dd9be36146238dd..0000000000000000000000000000000000000000 --- a/spaces/rachittshah/doc-qa/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Doc Qa -emoji: ⚡ -colorFrom: red -colorTo: gray -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/radames/NYTimes-homepage-rearranged/client/README.md b/spaces/radames/NYTimes-homepage-rearranged/client/README.md deleted file mode 100644 index 94a48f5ff868a92f77d5e2ca3b81ba6b20b9b719..0000000000000000000000000000000000000000 --- a/spaces/radames/NYTimes-homepage-rearranged/client/README.md +++ /dev/null @@ -1,40 +0,0 @@ -# create-svelte - -Everything you need to build a Svelte project, powered by [`create-svelte`](https://github.com/sveltejs/kit/tree/master/packages/create-svelte). - -## Creating a project - -If you're seeing this, you've probably already done this step. Congrats! - -```bash -# create a new project in the current directory -npm init svelte@next - -# create a new project in my-app -npm init svelte@next my-app -``` - -> Note: the `@next` is temporary - -## Developing - -Once you've created a project and installed dependencies with `npm install` (or `pnpm install` or `yarn`), start a development server: - -```bash -npm run dev - -# or start the server and open the app in a new browser tab -npm run dev -- --open -``` - -## Building - -To create a production version of your app: - -```bash -npm run build -``` - -You can preview the production build with `npm run preview`. - -> To deploy your app, you may need to install an [adapter](https://kit.svelte.dev/docs/adapters) for your target environment. diff --git a/spaces/radames/whisper.cpp-wasm/index.html b/spaces/radames/whisper.cpp-wasm/index.html deleted file mode 100644 index 99f679584f5fbff0db3de6aac40f1e36c8c85791..0000000000000000000000000000000000000000 --- a/spaces/radames/whisper.cpp-wasm/index.html +++ /dev/null @@ -1,886 +0,0 @@ - - - - whisper.cpp : WASM example - - - - - -
        - - Minimal - whisper.cpp - example running fully in the browser - -

        - - Usage instructions:
        -
          -
        • - Load a ggml model file (you can obtain one from - here, recommended: - tiny or base) -
        • -
        • - Select audio file to transcribe or record audio from the microphone - (sample: jfk.wav) -
        • -
        • Click on the "Transcribe" button to start the transcription
        • -
        - - Note that the computation is quite heavy and may take a few seconds to - complete.
        - The transcription results will be displayed in the text area below.

        - Important: -
          -
        • - your browser must support WASM SIMD instructions for this to work -
        • -
        • - Firefox cannot load files larger than 256 MB - use Chrome instead -
        • -
        - - More examples: - main | - bench | - stream | - command | - talk | - -
        - -
        - Whisper models:

        - - - - - - - -

        - Quantized models:

        - - - - - -
        - - - - -
        - -
        - - -
        - Input: - - - - -
        - -
        - -
        - Audio file: - -
        - - - -
        @GIT_SHA1@ - | Commit subject: @GIT_COMMIT_SUBJECT@ | - Source Code - | - -
        -
        - - - - - - diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Darulkitap V3 Indir The Best Way to Study Islam Online.md b/spaces/raedeXanto/academic-chatgpt-beta/Darulkitap V3 Indir The Best Way to Study Islam Online.md deleted file mode 100644 index 58eab60af55207cb860df3c350de0e05e778a195..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Darulkitap V3 Indir The Best Way to Study Islam Online.md +++ /dev/null @@ -1,166 +0,0 @@ -
        -

        What is Darulkitap V3?

        -

        Darulkitap V3 is a digital Islamic encyclopedia that contains more than 900 Islamic books from various fields and disciplines. It is a valuable resource for anyone who wants to learn more about Islam and its teachings. Darulkitap V3 is the latest version of the encyclopedia that has been updated with new books, corrections, and improvements. It has a user-friendly interface that allows you to browse, search, read, and bookmark the books you want. You can also customize the font size, color, and background of the text according to your preference.

        -

        Why should you download Darulkitap V3?

        -

        If you are interested in Islamic studies, Darulkitap V3 is a must-have for you. Here are some of the benefits of using Darulkitap V3:

        -

        Darulkitap V3 Indir


        Download ————— https://tinourl.com/2uL0Ec



        -
          -
        • It provides you with a comprehensive and authentic collection of Islamic books from various sources and perspectives.
        • -
        • It covers a wide range of topics such as tafsir, hadith, fiqh, siyer, akaid, kalam, tasawwuf, history, literature, and more.
        • -
        • It helps you to deepen your knowledge and understanding of Islam and its principles.
        • -
        • It enables you to compare and contrast different opinions and interpretations on various issues.
        • -
        • It saves you time and money by giving you access to hundreds of books in one place.
        • -
        • It enhances your reading experience by offering you various features and tools such as bookmarks, notes, highlights, zoom, etc.
        • -
        -

        How to download Darulkitap V3?

        -

        Downloading Darulkitap V3 is easy and simple. Just follow these steps:

        -
          -
        1. Go to the official website of Darulkitap at https://www.darulkitap.com/
        2. -
        3. Click on the "Download" button on the homepage.
        4. -
        5. Select the version that suits your operating system (Windows or Mac).
        6. -
        7. Wait for the download to complete.
        8. -
        9. Open the downloaded file and follow the instructions to install Darulkitap V3 on your computer.
        10. -
        -

        How to use Darulkitap V3?

        -

        Using Darulkitap V3 is easy and intuitive. Here are some of the main functions and tools that you can use:

        -
          -
        • To browse the books by category, click on the "Kitaplar" button on the top menu bar. You will see a list of categories such as tafsir, hadith, fiqh, etc. Click on any category to see the subcategories and the books under them.
        • -
        • To search for a specific book or keyword, click on the "Ara" button on the top menu bar. You will see a search box where you can type your query. You can also filter your search by category, author, publisher, or language.
        • -
        • To read a book, click on its title or cover image. You will see the book's details such as author, publisher, year, pages, etc. Click on the "Oku" button to open the book. You can use the arrows on the bottom right corner to navigate through the pages.
        • -
        • To bookmark a page or a book, click on the "İşaretle" button on the top right corner. You will see a list of your bookmarks under the "İşaretlerim" button on the top menu bar. You can also delete or edit your bookmarks from there.
        • -
        • To add notes or highlights to a page or a book, click on the "Notlar" button on the top right corner. You will see a list of your notes and highlights under the "Notlarım" button on the top menu bar. You can also delete or edit your notes and highlights from there.
        • -
        • To customize the font size, color, or background of the text, click on the "Ayarlar" button on the top right corner. You will see a panel where you can adjust these settings according to your preference.
        • -
        -

        What are the contents of Darulkitap V3?

        -

        Darulkitap V3 contains more than 900 Islamic books from various fields and disciplines. Here are some of the main categories and subcategories that you can find in Darulkitap V3:

        -

        Tafsir

        -

        Tafsir is the explanation and interpretation of the Quran. It helps us to understand the meaning, context, and implications of Allah's words. Tafsir can be based on different sources such as linguistic analysis, historical context, hadith reports, rational arguments, etc. Tafsir can also be classified into different types such as thematic tafsir, chronological tafsir, comparative tafsir, etc.

        -

        Darulkitap V3 download free
        -Darulkitap V3 full version
        -Darulkitap V3 crack
        -Darulkitap V3 serial key
        -Darulkitap V3 activation code
        -Darulkitap V3 online
        -Darulkitap V3 for PC
        -Darulkitap V3 for Windows
        -Darulkitap V3 for Mac
        -Darulkitap V3 for Android
        -Darulkitap V3 for iOS
        -Darulkitap V3 for Linux
        -Darulkitap V3 APK
        -Darulkitap V3 PDF
        -Darulkitap V3 e-book
        -Darulkitap V3 audio book
        -Darulkitap V3 video course
        -Darulkitap V3 review
        -Darulkitap V3 testimonials
        -Darulkitap V3 features
        -Darulkitap V3 benefits
        -Darulkitap V3 pros and cons
        -Darulkitap V3 comparison
        -Darulkitap V3 alternatives
        -Darulkitap V3 competitors
        -Darulkitap V3 discount
        -Darulkitap V3 coupon code
        -Darulkitap V3 promo code
        -Darulkitap V3 offer
        -Darulkitap V3 deal
        -Darulkitap V3 bonus
        -Darulkitap V3 refund policy
        -Darulkitap V3 guarantee
        -Darulkitap V3 support
        -Darulkitap V3 contact
        -Darulkitap V3 FAQ
        -Darulkitap V3 how to use
        -Darulkitap V3 tutorial
        -Darulkitap V3 guide
        -Darulkitap V3 tips and tricks
        -Darulkitap V3 best practices
        -Darulkitap V3 case studies
        -Darulkitap V3 success stories
        -Darulkitap V3 examples
        -Darulkitap V3 templates
        -Darulkitap V3 tools and resources
        -Darulkitap V3 updates and news
        -Darulkitap V3 blog posts and articles
        -Darulkitap V3 forum and community

        -

        Examples of tafsir books in Darulkitap V3

        - - - - - - - - - -
        TitleAuthorDescription
        Tefsir-i KebirFahreddin RaziA classical tafsir that covers various aspects such as linguistic analysis, theological debates, philosophical arguments, scientific facts, etc.
        Tefsir-i TaberiMuhammad ibn Jarir al-TabariA classical tafsir that relies mainly on hadith reports from Prophet Muhammad (PBUH) and his companions.
        Tefsir-i İbn KesirIsmail ibn KathirA classical tafsir that summarizes al-Tabari's tafsir with additional comments from other sources.
        Tefsir-i KurtubiMuhammad ibn Ahmad al-QurtubiA classical tafsir that focuses on fiqh issues derived from Quranic verses.
        Tefsir-i ZemahşeriMahmud ibn Umar al-ZamakhshariA classical tafsir that emphasizes linguistic analysis and rhetorical devices used in Quranic verses.
        Tefsir-i BeydaviNasr al-Din al-BaydawiA classical tafsir that simplifies al-Zamakhshari's tafsir with additional comments from other sources.
        Tefsiri Kuran'dan ÖğrenmekMuhammed EsedA modern tafsir that tries to understand Quranic verses based on their own context rather than external sources.
        Kur'an

        Hadith

        -

        Hadith is the sayings and actions of Prophet Muhammad (PBUH) and his companions. It is a source of guidance and legislation for Muslims after the Quran. Hadith can be classified into different types such as sahih (authentic), hasan (good), da'if (weak), etc. based on the reliability of the narrators and the chain of transmission. Hadith can also be categorized into different topics such as faith, worship, manners, ethics, etc.

        -

        Examples of hadith books in Darulkitap V3

        - - - - - - - - - - - - -
        TitleAuthorDescription
        Sahih al-BukhariMuhammad ibn Isma'il al-BukhariThe most authentic and widely accepted collection of hadith that covers various aspects of Islam.
        Sahih MuslimMuslim ibn al-HajjajThe second most authentic and widely accepted collection of hadith that covers various aspects of Islam.
        Sunan Abu DawudAbu Dawud al-SijistaniA collection of hadith that focuses mainly on fiqh issues derived from hadith.
        Sunan al-TirmidhiMuhammad ibn Isa al-TirmidhiA collection of hadith that includes comments and evaluations on the authenticity and value of hadith.
        Sunan al-Nasa'iAhmad ibn Shu'ayb al-Nasa'iA collection of hadith that focuses mainly on rituals and worship.
        Sunan ibn MajahMuhammad ibn Yazid ibn MajahA collection of hadith that covers various topics such as fiqh, history, ethics, etc.
        Muwatta Imam MalikMalik ibn AnasA collection of hadith and fiqh opinions that represents the practice of the people of Madinah.
        Musnad Ahmad ibn HanbalAhmad ibn HanbalA large collection of hadith arranged according to the names of the companions who narrated them.
        Shama'il al-MuhammadiyyaMuhammad ibn Isa al-TirmidhiA collection of hadith that describes the physical and moral characteristics of Prophet Muhammad (PBUH).
        Al-Adab al-MufradMuhammad ibn Isma'il al-BukhariA collection of hadith that deals with manners and ethics in Islam.
        -

        Fiqh

        -

        Fiqh is the Islamic jurisprudence and law. It is derived from the Quran, the hadith, the consensus (ijma') of the scholars, and the analogy (qiyas) based on these sources. Fiqh covers various aspects of life such as worship, transactions, family, inheritance, criminal law, etc. Fiqh can also be divided into different schools of thought (madhhab) such as Hanafi, Maliki, Shafi'i, Hanbali, etc. based on different methodologies and interpretations.

        -

        Examples of fiqh books in Darulkitap V3

        - - -
        TitleAuthorDescription
        Hidayat al-Nahw (The Guidance to Arabic Grammar)Siyer -

        Siyer is the biography and history of Prophet Muhammad (PBUH) and his companions. It is a source of inspiration and guidance for Muslims to follow their example and learn from their achievements and challenges. Siyer can be based on different sources such as hadith, historical records, oral traditions, etc. Siyer can also be divided into different phases such as the pre-Islamic period, the Meccan period, the Medinan period, the conquests, etc.

        -

        Examples of siyer books in Darulkitap V3

        - - - - - - - - -
        TitleAuthorDescription
        Siyer-i NebiIbn HishamA classical siyer book that is based on the earlier work of Ibn Ishaq. It covers the life of Prophet Muhammad (PBUH) from his birth to his death.
        Ar-Raheeq Al-Makhtum (The Sealed Nectar)Safiur Rahman al-MubarakpuriA modern siyer book that won the first prize in a competition held by the Muslim World League. It covers the life of Prophet Muhammad (PBUH) in a comprehensive and authentic way.
        Hayatu's Sahabe (The Lives of the Companions)Muhammad Yusuf KandhlawiA collection of stories and anecdotes about the companions of Prophet Muhammad (PBUH) and their virtues and sacrifices.
        Al-Sira al-Nabawiyya (The Life of the Prophet)Ibn KathirA classical siyer book that is based on various sources such as hadith, history, poetry, etc. It covers the life of Prophet Muhammad (PBUH) and his companions in detail.
        Asr-ı Saadet (The Era of Bliss)Mehmed Emin YıldırımA modern siyer book that is written in a simple and engaging style. It covers the life of Prophet Muhammad (PBUH) and his companions in chronological order.
        Hz. Muhammed (SAV) İçin Bülbülün Kırk Şarkısı (The Nightingale's Forty Songs for Prophet Muhammad)İskender PalaA poetic siyer book that expresses love and admiration for Prophet Muhammad (PBUH) through forty songs.
        -

        Conclusion

        -

        Darulkitap V3 is a digital Islamic encyclopedia that contains more than 900 Islamic books from various fields and disciplines. It is a valuable resource for anyone who wants to learn more about Islam and its teachings. Darulkitap V3 is easy to download and use, and it offers various features and tools to enhance your reading experience. Darulkitap V3 covers a wide range of topics such as tafsir, hadith, fiqh, siyer, and more. You can browse, search, read, and bookmark the books you want. You can also compare and contrast different opinions and interpretations on various issues. Darulkitap V3 is a must-have for anyone who is interested in Islamic studies.

        -

        If you want to download Darulkitap V3, you can visit their official website at https://www.darulkitap.com/ and follow the instructions there. You can also contact them for any questions or feedback at info@darulkitap.com.

        -

        We hope that this article has given you some useful information about Darulkitap V3 and its contents. We encourage you to download Darulkitap V3 and explore its rich and authentic collection of Islamic books. May Allah guide us all to the truth and grant us knowledge and wisdom.

        -

        FAQs

        -
          -
        • Q: What is Darulkitap V3?
        • -
        • A: Darulkitap V3 is a digital Islamic encyclopedia that contains more than 900 Islamic books from various fields and disciplines.
        • -
        • Q: How can I download Darulkitap V3?
        • -
        • A: You can download Darulkitap V3 from their official website at https://www.darulkitap.com/ by clicking on the "Download" button and selecting the version that suits your operating system.
        • -
        • Q: What are some of the main categories of books in Darulkitap V3?
        • -
        • A: Some of the main categories of books in Darulkitap V3 are tafsir, hadith, fiqh, siyer, akaid, kalam, tasawwuf, history, literature, etc.
        • -
        • Q: What are some of the features and tools that Darulkitap V3 offers?
        • -
        • A: Some of the features and tools that Darulkitap V3 offers are bookmarks, notes, highlights, zoom, font size, color, background, etc.
        • -
        • Q: What are some of the benefits of using Darulkitap V3?
        • -
        • A: Some of the benefits of using Darulkitap V3 are:
        • -
            -
          • It provides you with a comprehensive and authentic collection of Islamic books from various sources and perspectives.
          • -
          • It covers a wide range of topics such as tafsir, hadith, fiqh

            Akaid

            -

            Akaid is the Islamic creed and belief. It is based on the Quran, the hadith, and the rational proofs that establish the existence and attributes of Allah, the angels, the prophets, the scriptures, the day of judgment, the destiny, etc. Akaid can also be divided into different schools of thought (madhhab) such as Ash'ari, Maturidi, Athari, etc. based on different methodologies and interpretations.

            -

            Examples of akaid books in Darulkitap V3

            - - - - - - - - - - - - -
            TitleAuthorDescription
            Akaid-i NasafiUmar al-NasafiA classical akaid book that summarizes the Maturidi creed and belief.
            Sharh al-Aqaid al-NasafiyyaSa'd al-Din al-TaftazaniA classical akaid book that explains and defends the Maturidi creed and belief.
            Al-Aqida al-TahawiyyaAbu Ja'far al-TahawiA classical akaid book that summarizes the Sunni creed and belief.
            Sharh al-Aqida al-TahawiyyaIbn Abi al-Izz al-HanafiA classical akaid book that explains and defends the Sunni creed and belief.
            Al-Fiqh al-AkbarAbu HanifaA classical akaid book that discusses the basic principles of Islamic belief.
            Al-Aqida al-WasitiyyaIbn TaymiyyaA classical akaid book that summarizes the Athari creed and belief.
            Al-Aqida al-SafadiyyaIbn Qudama al-MaqdisiA classical akaid book that summarizes the Athari creed and belief.
            Kitab al-TawhidMuhammad ibn Abd al-WahhabA modern akaid book that emphasizes the concept of monotheism and warns against polytheism.
            Islam Kay Bunyadi Aqaid (The Basic Beliefs of Islam)Muhammad Tahir-ul-QadriA modern akaid book that explains the basic beliefs of Islam in a simple and comprehensive way.
            Tüm Rasullerin Ortak Daveti (The Common Call of All Prophets)Mehmed Emin YıldırımA modern akaid book that shows the unity and harmony of all prophets in their message of monotheism and submission to Allah.
            -

            0a6ba089eb
            -
            -
            \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Download Windows 7 Loader Vista Slic Loader 2.4.8 X86.and.x64 .rar Indows 7 Loader Vista Sl 5 What You Need to Know Before You Download and Install.md b/spaces/raedeXanto/academic-chatgpt-beta/Download Windows 7 Loader Vista Slic Loader 2.4.8 X86.and.x64 .rar Indows 7 Loader Vista Sl 5 What You Need to Know Before You Download and Install.md deleted file mode 100644 index 5c501a65ad7be6ac5ff65427936e79a486b36225..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Download Windows 7 Loader Vista Slic Loader 2.4.8 X86.and.x64 .rar Indows 7 Loader Vista Sl 5 What You Need to Know Before You Download and Install.md +++ /dev/null @@ -1,117 +0,0 @@ -
            -

            What is Windows 7 Loader Vista Slic Loader 2.4.8?

            -

            If you are looking for a way to activate your Windows 7 or Vista operating system without paying for a license key, you may have come across a tool called Windows 7 Loader Vista Slic Loader 2.4.8. But what is this tool and how does it work?

            -

            Download Windows 7 Loader Vista Slic Loader 2.4.8 X86.and.x64 .rar Indows 7 Loader Vista Sl 5


            DOWNLOADhttps://tinourl.com/2uL5iB



            -

            Windows 7 Loader Vista Slic Loader 2.4.8 is a software that can bypass the activation process of Windows 7 and Vista by injecting a System Licensed Internal Code (SLIC) into your system's BIOS. This way, your system will be recognized as a genuine and licensed one by Microsoft's servers.

            -

            The tool has several features that make it a popular choice among users who want to activate their Windows 7 or Vista for free. Some of these features are:

            -
              -
            • It can activate any version of Windows 7 and Vista, including Starter, Home Basic, Home Premium, Professional, Ultimate, Enterprise, etc.
            • -
            • It can activate both 32-bit and 64-bit versions of Windows 7 and Vista.
            • -
            • It can activate Windows 7 and Vista offline or online.
            • -
            • It can activate Windows 7 and Vista without modifying any system files or registry entries.
            • -
            • It can activate Windows 7 and Vista without affecting your system's performance or stability.
            • -
            -

            Why do you need Windows 7 Loader Vista Slic Loader 2.4.8?

            -

            You may be wondering why you need to use Windows 7 Loader Vista Slic Loader 2.4.8 to activate your Windows 7 or Vista operating system. After all, you can always buy a license key from Microsoft or use another activator that is available online.

            -

            However, there are some benefits of using Windows 7 Loader Vista Slic Loader 2.4.8 that you may not get from other methods. These benefits are:

            -
              -
            • You can save money by not buying a license key from Microsoft.
            • -
            • You can enjoy all the features and functions of Windows 7 and Vista without any limitations or restrictions.
            • -
            • You can avoid annoying notifications and reminders that ask you to activate your Windows 7 or Vista.
            • -
            • You can prevent your system from being blacklisted or blocked by Microsoft's servers.
            • -
            • You can update your system with the latest security patches and bug fixes from Microsoft without any problems.
            • -
            -

            How to download Windows 7 Loader Vista Slic Loader 2.4.8?

            -

            If you are interested in using Windows 7 Loader Vista Slic Loader 2.4.8 to activate your Windows 7 or Vista operating system, you need to download the tool from a reliable source first.

            -

            There are many websites that claim to offer the download link for Windows 7 Loader Vista Slic Loader 2.4.8, but not all of them are trustworthy or safe. Some of them may contain viruses, malware, spyware, or other harmful programs that can damage your system or steal your personal information.

            -

            To avoid such risks, you should only download Windows 7 Loader Vista Slic Loader 2.4.8 from a reputable website that has positive reviews and feedback from other users.

            -

            One such website is https://github.com/Dir3ctr1x/EzWindSLIC, which is the official GitHub repository for the tool. Here, you can find the latest version of the tool along with detailed instructions on how to use it.

            -

            How to download Windows 7 Loader Vista Slic Loader 2.4.8 for free
            -Windows 7 Loader Vista Slic Loader 2.4.8 activation crack download
            -Windows 7 Loader Vista Slic Loader 2.4.8 X86 and X64 compatible
            -Download Windows 7 Loader Vista Slic Loader 2.4.8 rar file
            -Windows 7 Loader Vista Slic Loader 2.4.8 latest version download
            -Windows 7 Loader Vista Slic Loader 2.4.8 tutorial and guide
            -Windows 7 Loader Vista Slic Loader 2.4.8 reviews and ratings
            -Windows 7 Loader Vista Slic Loader 2.4.8 features and benefits
            -Windows 7 Loader Vista Slic Loader 2.4.8 system requirements and compatibility
            -Windows 7 Loader Vista Slic Loader 2.4.8 download link and password
            -Windows 7 Loader Vista Slic Loader 2.4.8 virus scan and safety
            -Windows 7 Loader Vista Slic Loader 2.4.8 alternatives and comparisons
            -Windows 7 Loader Vista Slic Loader 2.4.8 support and customer service
            -Windows 7 Loader Vista Slic Loader 2.4.8 license key and serial number
            -Windows 7 Loader Vista Slic Loader 2.4.8 update and patch download
            -Windows 7 Loader Vista Slic Loader 2.4.8 error and problem fix
            -Windows 7 Loader Vista Slic Loader 2.4.8 backup and restore
            -Windows 7 Loader Vista Slic Loader 2.4.8 uninstall and remove
            -Windows 7 Loader Vista Slic Loader 2.4.8 tips and tricks
            -Windows 7 Loader Vista Slic Loader 2.4.8 forum and community
            -Download Windows 7 Ultimate with Vista Slic Loader 2.4.8
            -Download Windows 7 Professional with Vista Slic Loader 2.4.8
            -Download Windows 7 Home Premium with Vista Slic Loader 2.4.8
            -Download Windows 7 Starter with Vista Slic Loader 2.4.8
            -Download Windows Vista Ultimate with Slic Loader 2.4.8
            -Download Windows Vista Business with Slic Loader 2.4.8
            -Download Windows Vista Home Basic with Slic Loader 2.4.8
            -Download Windows Vista Starter with Slic Loader 2.4.8
            -Download Slic Toolkit v3 for Windows XP, Vista, and Win7
            -Download OEM Cert Collection for Win XP, WinVista, Win7
            -Download Daz's Bootloader v1 for Win XP, WinVista, Win7
            -Download Chew-WGA v0 for Win XP, WinVista, Win7
            -Download RemoveWAT v3 for Win XP, WinVista, Win7
            -Download KMSpico v10 for Win XP, WinVista, Win7
            -Download Microsoft Toolkit v3 for Win XP, WinVista, Win7
            -Download Re-loader Activator v6 for Win XP, WinVista, Win7
            -Download KMSPico Portable v11 for Win XP, WinVista, Win7
            -Download KMSAuto Net v1 for Win XP, WinVista, Win7
            -Download KMSAuto Lite v1 for Win XP, WinVista, Win7
            -Download KMSAuto Helper v1 for Win XP, WinVista, Win7
            -Download KMSAuto Easy v1 for Win XP, WinVista, Win7
            -Download KMS Cleaner v3 for Win XP, WinVista, Win7
            -Download KMS Online v3 for Win XP, WinVista, Win7
            -Download KMS VL All v9 for Win XP, WinVista, Win7
            -Download KMS Activator Ultimate v5 for Win XP, WinVista, Win7
            -Download KMS Nano v25 for Win XP, WinVista, Win7
            -Download KMS Magic v10 for Win XP, WinVista, Win7
            -Download KMS Server Service v3 for Win XP, WinVista, Win7
            -Download KMS GUI ELDI v17 for Win XP,WinVista ,Win7

            -

            How to use Windows 7 Loader Vista Slic Loader 2.4.8?

            -

            After downloading Windows 7 Loader Vista Slic Loader 2.4.8 from a reliable source, you need to follow these steps to use it:

            -
              -
            1. Extract the downloaded file using a program like WinRAR or WinZip.
            2. -
            3. Run the extracted file as an administrator.
            4. -
            5. Select your operating system (Windows 7 or Vista) from the drop-down menu.
            6. -
            7. Select your edition (Starter, Home Basic, Home Premium, Professional, Ultimate, Enterprise, etc.) from the drop-down menu.
            8. -
            9. Select your architecture (32-bit or 64-bit) from the drop-down menu.
            10. -
            11. Click on "Install" button to start the activation process.
            12. -
            13. Wait for a few seconds until the activation is completed.
            14. -
            15. Restart your system to apply the changes.
            16. -
            -

            Congratulations! You have successfully activated your Windows 7 or Vista operating system using Windows Loader Vista Slic Loader 2 . 4 . 8!

            -

            What are the advantages of Windows Loader Vista Slic Loader 2 . 4 . 8?

            -

            Windows Loader Vista Slic Loader 2 . 4 . 8 is one of the best tools for activating Windows and Vista operating systems for free. It has several advantages that make it superior to other activators available online. Some of these advantages are:

            -

            Compatibility

            -

            Windows Loader Vista Slic Loader 2 . 4 . 8 is compatible with various versions of and Vista operating systems, including Starter, Home Basic, Home Premium, Professional, Ultimate, Enterprise, etc. It also supports both -bit and -bit versions of and Vista. This means that you can use it on any system that runs on or Vista without any issues.

            -

            Safety

            -

            Windows Loader Vista Slic Loader 2 . 4 . 8 is safe to use as it does not contain any viruses, malware, spyware, or other harmful programs that can damage your system or steal your personal information. It also does not modify any system files or registry entries that may affect your system's performance or stability. It only injects a SLIC into your system's BIOS that allows your system to be recognized as a genuine and licensed one by Microsoft's servers.

            -

            Simplicity

            -

            Windows Loader Vista Slic Loader 2 . 4 . 8 is simple to use as it does not require any technical skills or knowledge to operate. It has a user-friendly interface that guides you through the activation process step by step. You only need to select your operating system, edition,

            Efficiency

            -

            Windows Loader Vista Slic Loader 2 . 4 . 8 is efficient as it can activate Windows and Vista in a matter of seconds. It does not require any reboot or restart to complete the activation process. It also does not consume any system resources or affect your system's speed or performance.

            -

            Permanence

            -

            Windows Loader Vista Slic Loader 2 . 4 . 8 is permanent as it provides a genuine and lasting activation of Windows and Vista. It does not expire or require any renewal or reactivation. It also passes the Windows Genuine Advantage (WGA) validation and allows you to access all the features and services of Microsoft, such as Windows Update, Microsoft Office, Microsoft Security Essentials, etc.

            -

            What are the disadvantages of Windows Loader Vista Slic Loader 2 . 4 . 8?

            -

            Windows Loader Vista Slic Loader 2 . 4 . 8 is not a perfect tool and it has some disadvantages that you should be aware of before using it. These disadvantages are:

            -

            Legality

            -

            Windows Loader Vista Slic Loader 2 . 4 . 8 is illegal and violates Microsoft's terms of service. It is considered a form of software piracy and theft that can result in legal consequences and penalties. It also infringes the intellectual property rights of Microsoft and its partners.

            -

            Reliability

            -

            Windows Loader Vista Slic Loader 2 . 4 . 8 is not reliable and may not work on some systems or may cause errors. It may fail to activate Windows 7 or Vista due to various factors, such as incompatible hardware, corrupted BIOS, outdated drivers, etc. It may also cause system instability, crashes, blue screens, boot failures, etc.

            -

            Updates

            -

            Windows 7 Loader Vista Slic Loader 2.4.8 is not compatible with future updates of Windows 7 or Vista. It may be detected and disabled by Microsoft's servers or anti-piracy measures. It may also prevent you from installing or receiving important updates that can improve your system's security and performance.

            -

            Conclusion

            -

            In conclusion, Windows 7 Loader Vista Slic Loader 2.4.8 is a tool that can activate Windows 7 or Vista operating systems for free by injecting a SLIC into your system's BIOS. It has several advantages, such as compatibility, safety, simplicity, efficiency, and permanence. However, it also has some disadvantages, such as legality, reliability, and updates. Therefore, you should use it at your own risk and discretion.

            -

            If you want to download Windows 7 Loader Vista Slic Loader 2.4.8, you can visit https://github.com/Dir3ctr1x/EzWindSLIC, which is the official GitHub repository for the tool. Here, you can find the latest version of the tool along with detailed instructions on how to use it.

            -

            If you have any questions or feedback about Windows 7 Loader Vista Slic Loader 2.4.8, feel free to leave a comment below.

            -

            Thank you for reading this article and I hope you found it helpful and informative.

            - **FAQs** Q: Is Windows 7 Loader Vista Slic Loader 2.4.8 safe to use? A: Windows 7 Loader Vista Slic Loader 2.4.8 is safe to use as long as you download it from a reliable source and scan it with an antivirus program before using it. However, it is illegal and violates Microsoft's terms of service. Q: Does Windows 7 Loader Vista Slic Loader 2.4.8 work on UEFI-GPT systems? A: Yes, Windows 7 Loader Vista Slic Loader 2.4.8 works on UEFI-GPT systems using SLIC emulation technique. Q: How can I uninstall Windows 7 Loader Vista Slic Loader 2.4.8? A: To uninstall Windows 7 Loader Vista Slic Loader 2.4.8, you need to run the tool again and select "Uninstall" option from the drop-down menu. Q: What are some alternatives to Windows 7 Loader Vista Slic Loader 2.4.8? A: Some alternatives to Windows 7 Loader Vista Slic Loader 2.4.8 are: - Windows Loader by Daz: A popular tool that can activate Windows 7 by emulating an OEM certificate. - KMSpico: A tool that can activate Windows 7 by emulating a KMS server. - Microsoft Toolkit: A tool that can activate Windows 7 by using EZ-Activator or AutoKMS modules. Q: Where can I find more information about Windows 7 Loader Vista Slic Loader 2.4.8? A: You can find more information about Windows 7 Loader Vista Slic Loader 2.4.8 on its official GitHub repository or on various online forums.

            0a6ba089eb
            -
            -
            \ No newline at end of file diff --git a/spaces/rbigare/stablediffusionapi-architecture-tuned-model/README.md b/spaces/rbigare/stablediffusionapi-architecture-tuned-model/README.md deleted file mode 100644 index 3dfc5212b6c85f9458c9d3ccfdebc541bc117fd3..0000000000000000000000000000000000000000 --- a/spaces/rbigare/stablediffusionapi-architecture-tuned-model/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Stablediffusionapi Architecture Tuned Model -emoji: 🐠 -colorFrom: purple -colorTo: red -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Adobe.Acrobat.3D.V8.10.Keygen-HeartBug.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Adobe.Acrobat.3D.V8.10.Keygen-HeartBug.md deleted file mode 100644 index dbb0237eec28ac2239bddfd45cb502603dec2a66..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Adobe.Acrobat.3D.V8.10.Keygen-HeartBug.md +++ /dev/null @@ -1,11 +0,0 @@ -

            Adobe.Acrobat.3D.V8.10.Keygen-HeartBug


            Download File ✸✸✸ https://urlgoal.com/2uCNa8



            -
            -... diotHiernalcoria [url= software xinje plc ... 9 Oct 2019 ... -... DiotHiernalcoria, DiotHiernalcoria, DiotHiernalcoria, DiotHiernalcoria, DiotHiernalcoria, DiotHiernalcoria, DiotHiernalcoria, DiotHiernalcoria, DiotHiernalcoria, diotHiernalcoria. -Here you can order children's clothing in bulk from the manufacturer. -We guarantee high quality and low price for all products. -Buy clothes. -Buy women's dutiks with free shipping in Russia. 8a78ff9644
            -
            -
            -

            diff --git a/spaces/renatotn7/teste2/gfpgan/models/__init__.py b/spaces/renatotn7/teste2/gfpgan/models/__init__.py deleted file mode 100644 index 6afad57a3794b867dabbdb617a16355a24d6a8b3..0000000000000000000000000000000000000000 --- a/spaces/renatotn7/teste2/gfpgan/models/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -import importlib -from basicsr.utils import scandir -from os import path as osp - -# automatically scan and import model modules for registry -# scan all the files that end with '_model.py' under the model folder -model_folder = osp.dirname(osp.abspath(__file__)) -model_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(model_folder) if v.endswith('_model.py')] -# import all the model modules -_model_modules = [importlib.import_module(f'gfpgan.models.{file_name}') for file_name in model_filenames] diff --git a/spaces/rfrossard/Image-and-3D-Model-Creator/PIFu/lib/model/ConvFilters.py b/spaces/rfrossard/Image-and-3D-Model-Creator/PIFu/lib/model/ConvFilters.py deleted file mode 100644 index 1348ddea27e1bb3b0a65592bf78c92305dce0bd7..0000000000000000000000000000000000000000 --- a/spaces/rfrossard/Image-and-3D-Model-Creator/PIFu/lib/model/ConvFilters.py +++ /dev/null @@ -1,112 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -import torchvision.models.resnet as resnet -import torchvision.models.vgg as vgg - - -class MultiConv(nn.Module): - def __init__(self, filter_channels): - super(MultiConv, self).__init__() - self.filters = [] - - for l in range(0, len(filter_channels) - 1): - self.filters.append( - nn.Conv2d(filter_channels[l], filter_channels[l + 1], kernel_size=4, stride=2)) - self.add_module("conv%d" % l, self.filters[l]) - - def forward(self, image): - ''' - :param image: [BxC_inxHxW] tensor of input image - :return: list of [BxC_outxHxW] tensors of output features - ''' - y = image - # y = F.relu(self.bn0(self.conv0(y)), True) - feat_pyramid = [y] - for i, f in enumerate(self.filters): - y = f(y) - if i != len(self.filters) - 1: - y = F.leaky_relu(y) - # y = F.max_pool2d(y, kernel_size=2, stride=2) - feat_pyramid.append(y) - return feat_pyramid - - -class Vgg16(torch.nn.Module): - def __init__(self): - super(Vgg16, self).__init__() - vgg_pretrained_features = vgg.vgg16(pretrained=True).features - self.slice1 = torch.nn.Sequential() - self.slice2 = torch.nn.Sequential() - self.slice3 = torch.nn.Sequential() - self.slice4 = torch.nn.Sequential() - self.slice5 = torch.nn.Sequential() - - for x in range(4): - self.slice1.add_module(str(x), vgg_pretrained_features[x]) - for x in range(4, 9): - self.slice2.add_module(str(x), vgg_pretrained_features[x]) - for x in range(9, 16): - self.slice3.add_module(str(x), vgg_pretrained_features[x]) - for x in range(16, 23): - self.slice4.add_module(str(x), vgg_pretrained_features[x]) - for x in range(23, 30): - self.slice5.add_module(str(x), vgg_pretrained_features[x]) - - def forward(self, X): - h = self.slice1(X) - h_relu1_2 = h - h = self.slice2(h) - h_relu2_2 = h - h = self.slice3(h) - h_relu3_3 = h - h = self.slice4(h) - h_relu4_3 = h - h = self.slice5(h) - h_relu5_3 = h - - return [h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3] - - -class ResNet(nn.Module): - def __init__(self, model='resnet18'): - super(ResNet, self).__init__() - - if model == 'resnet18': - net = resnet.resnet18(pretrained=True) - elif model == 'resnet34': - net = resnet.resnet34(pretrained=True) - elif model == 'resnet50': - net = resnet.resnet50(pretrained=True) - else: - raise NameError('Unknown Fan Filter setting!') - - self.conv1 = net.conv1 - - self.pool = net.maxpool - self.layer0 = nn.Sequential(net.conv1, net.bn1, net.relu) - self.layer1 = net.layer1 - self.layer2 = net.layer2 - self.layer3 = net.layer3 - self.layer4 = net.layer4 - - def forward(self, image): - ''' - :param image: [BxC_inxHxW] tensor of input image - :return: list of [BxC_outxHxW] tensors of output features - ''' - - y = image - feat_pyramid = [] - y = self.layer0(y) - feat_pyramid.append(y) - y = self.layer1(self.pool(y)) - feat_pyramid.append(y) - y = self.layer2(y) - feat_pyramid.append(y) - y = self.layer3(y) - feat_pyramid.append(y) - y = self.layer4(y) - feat_pyramid.append(y) - - return feat_pyramid diff --git a/spaces/rfrossard/Image-and-3D-Model-Creator/PIFu/lib/model/HGPIFuNet.py b/spaces/rfrossard/Image-and-3D-Model-Creator/PIFu/lib/model/HGPIFuNet.py deleted file mode 100644 index 4771715345afcf326b3b0e64717517801fe75a1c..0000000000000000000000000000000000000000 --- a/spaces/rfrossard/Image-and-3D-Model-Creator/PIFu/lib/model/HGPIFuNet.py +++ /dev/null @@ -1,142 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from .BasePIFuNet import BasePIFuNet -from .SurfaceClassifier import SurfaceClassifier -from .DepthNormalizer import DepthNormalizer -from .HGFilters import * -from ..net_util import init_net - - -class HGPIFuNet(BasePIFuNet): - ''' - HG PIFu network uses Hourglass stacks as the image filter. - It does the following: - 1. Compute image feature stacks and store it in self.im_feat_list - self.im_feat_list[-1] is the last stack (output stack) - 2. Calculate calibration - 3. If training, it index on every intermediate stacks, - If testing, it index on the last stack. - 4. Classification. - 5. During training, error is calculated on all stacks. - ''' - - def __init__(self, - opt, - projection_mode='orthogonal', - error_term=nn.MSELoss(), - ): - super(HGPIFuNet, self).__init__( - projection_mode=projection_mode, - error_term=error_term) - - self.name = 'hgpifu' - - self.opt = opt - self.num_views = self.opt.num_views - - self.image_filter = HGFilter(opt) - - self.surface_classifier = SurfaceClassifier( - filter_channels=self.opt.mlp_dim, - num_views=self.opt.num_views, - no_residual=self.opt.no_residual, - last_op=nn.Sigmoid()) - - self.normalizer = DepthNormalizer(opt) - - # This is a list of [B x Feat_i x H x W] features - self.im_feat_list = [] - self.tmpx = None - self.normx = None - - self.intermediate_preds_list = [] - - init_net(self) - - def filter(self, images): - ''' - Filter the input images - store all intermediate features. - :param images: [B, C, H, W] input images - ''' - self.im_feat_list, self.tmpx, self.normx = self.image_filter(images) - # If it is not in training, only produce the last im_feat - if not self.training: - self.im_feat_list = [self.im_feat_list[-1]] - - def query(self, points, calibs, transforms=None, labels=None): - ''' - Given 3D points, query the network predictions for each point. - Image features should be pre-computed before this call. - store all intermediate features. - query() function may behave differently during training/testing. - :param points: [B, 3, N] world space coordinates of points - :param calibs: [B, 3, 4] calibration matrices for each image - :param transforms: Optional [B, 2, 3] image space coordinate transforms - :param labels: Optional [B, Res, N] gt labeling - :return: [B, Res, N] predictions for each point - ''' - if labels is not None: - self.labels = labels - - xyz = self.projection(points, calibs, transforms) - xy = xyz[:, :2, :] - z = xyz[:, 2:3, :] - - in_img = (xy[:, 0] >= -1.0) & (xy[:, 0] <= 1.0) & (xy[:, 1] >= -1.0) & (xy[:, 1] <= 1.0) - - z_feat = self.normalizer(z, calibs=calibs) - - if self.opt.skip_hourglass: - tmpx_local_feature = self.index(self.tmpx, xy) - - self.intermediate_preds_list = [] - - for im_feat in self.im_feat_list: - # [B, Feat_i + z, N] - point_local_feat_list = [self.index(im_feat, xy), z_feat] - - if self.opt.skip_hourglass: - point_local_feat_list.append(tmpx_local_feature) - - point_local_feat = torch.cat(point_local_feat_list, 1) - - # out of image plane is always set to 0 - pred = in_img[:,None].float() * self.surface_classifier(point_local_feat) - self.intermediate_preds_list.append(pred) - - self.preds = self.intermediate_preds_list[-1] - - def get_im_feat(self): - ''' - Get the image filter - :return: [B, C_feat, H, W] image feature after filtering - ''' - return self.im_feat_list[-1] - - def get_error(self): - ''' - Hourglass has its own intermediate supervision scheme - ''' - error = 0 - for preds in self.intermediate_preds_list: - error += self.error_term(preds, self.labels) - error /= len(self.intermediate_preds_list) - - return error - - def forward(self, images, points, calibs, transforms=None, labels=None): - # Get image feature - self.filter(images) - - # Phase 2: point query - self.query(points=points, calibs=calibs, transforms=transforms, labels=labels) - - # get the prediction - res = self.get_preds() - - # get the error - error = self.get_error() - - return res, error \ No newline at end of file diff --git a/spaces/rfrossard/Image-and-3D-Model-Creator/PIFu/lib/model/ResBlkPIFuNet.py b/spaces/rfrossard/Image-and-3D-Model-Creator/PIFu/lib/model/ResBlkPIFuNet.py deleted file mode 100644 index 26848408569fd3903a338e023aefb832f942f0e3..0000000000000000000000000000000000000000 --- a/spaces/rfrossard/Image-and-3D-Model-Creator/PIFu/lib/model/ResBlkPIFuNet.py +++ /dev/null @@ -1,201 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from .BasePIFuNet import BasePIFuNet -import functools -from .SurfaceClassifier import SurfaceClassifier -from .DepthNormalizer import DepthNormalizer -from ..net_util import * - - -class ResBlkPIFuNet(BasePIFuNet): - def __init__(self, opt, - projection_mode='orthogonal'): - if opt.color_loss_type == 'l1': - error_term = nn.L1Loss() - elif opt.color_loss_type == 'mse': - error_term = nn.MSELoss() - - super(ResBlkPIFuNet, self).__init__( - projection_mode=projection_mode, - error_term=error_term) - - self.name = 'respifu' - self.opt = opt - - norm_type = get_norm_layer(norm_type=opt.norm_color) - self.image_filter = ResnetFilter(opt, norm_layer=norm_type) - - self.surface_classifier = SurfaceClassifier( - filter_channels=self.opt.mlp_dim_color, - num_views=self.opt.num_views, - no_residual=self.opt.no_residual, - last_op=nn.Tanh()) - - self.normalizer = DepthNormalizer(opt) - - init_net(self) - - def filter(self, images): - ''' - Filter the input images - store all intermediate features. - :param images: [B, C, H, W] input images - ''' - self.im_feat = self.image_filter(images) - - def attach(self, im_feat): - self.im_feat = torch.cat([im_feat, self.im_feat], 1) - - def query(self, points, calibs, transforms=None, labels=None): - ''' - Given 3D points, query the network predictions for each point. - Image features should be pre-computed before this call. - store all intermediate features. - query() function may behave differently during training/testing. - :param points: [B, 3, N] world space coordinates of points - :param calibs: [B, 3, 4] calibration matrices for each image - :param transforms: Optional [B, 2, 3] image space coordinate transforms - :param labels: Optional [B, Res, N] gt labeling - :return: [B, Res, N] predictions for each point - ''' - if labels is not None: - self.labels = labels - - xyz = self.projection(points, calibs, transforms) - xy = xyz[:, :2, :] - z = xyz[:, 2:3, :] - - z_feat = self.normalizer(z) - - # This is a list of [B, Feat_i, N] features - point_local_feat_list = [self.index(self.im_feat, xy), z_feat] - # [B, Feat_all, N] - point_local_feat = torch.cat(point_local_feat_list, 1) - - self.preds = self.surface_classifier(point_local_feat) - - def forward(self, images, im_feat, points, calibs, transforms=None, labels=None): - self.filter(images) - - self.attach(im_feat) - - self.query(points, calibs, transforms, labels) - - res = self.get_preds() - error = self.get_error() - - return res, error - -class ResnetBlock(nn.Module): - """Define a Resnet block""" - - def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias, last=False): - """Initialize the Resnet block - A resnet block is a conv block with skip connections - We construct a conv block with build_conv_block function, - and implement skip connections in function. - Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf - """ - super(ResnetBlock, self).__init__() - self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias, last) - - def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias, last=False): - """Construct a convolutional block. - Parameters: - dim (int) -- the number of channels in the conv layer. - padding_type (str) -- the name of padding layer: reflect | replicate | zero - norm_layer -- normalization layer - use_dropout (bool) -- if use dropout layers. - use_bias (bool) -- if the conv layer uses bias or not - Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU)) - """ - conv_block = [] - p = 0 - if padding_type == 'reflect': - conv_block += [nn.ReflectionPad2d(1)] - elif padding_type == 'replicate': - conv_block += [nn.ReplicationPad2d(1)] - elif padding_type == 'zero': - p = 1 - else: - raise NotImplementedError('padding [%s] is not implemented' % padding_type) - - conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)] - if use_dropout: - conv_block += [nn.Dropout(0.5)] - - p = 0 - if padding_type == 'reflect': - conv_block += [nn.ReflectionPad2d(1)] - elif padding_type == 'replicate': - conv_block += [nn.ReplicationPad2d(1)] - elif padding_type == 'zero': - p = 1 - else: - raise NotImplementedError('padding [%s] is not implemented' % padding_type) - if last: - conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias)] - else: - conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)] - - return nn.Sequential(*conv_block) - - def forward(self, x): - """Forward function (with skip connections)""" - out = x + self.conv_block(x) # add skip connections - return out - - -class ResnetFilter(nn.Module): - """Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations. - We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style) - """ - - def __init__(self, opt, input_nc=3, output_nc=256, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, - n_blocks=6, padding_type='reflect'): - """Construct a Resnet-based generator - Parameters: - input_nc (int) -- the number of channels in input images - output_nc (int) -- the number of channels in output images - ngf (int) -- the number of filters in the last conv layer - norm_layer -- normalization layer - use_dropout (bool) -- if use dropout layers - n_blocks (int) -- the number of ResNet blocks - padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero - """ - assert (n_blocks >= 0) - super(ResnetFilter, self).__init__() - if type(norm_layer) == functools.partial: - use_bias = norm_layer.func == nn.InstanceNorm2d - else: - use_bias = norm_layer == nn.InstanceNorm2d - - model = [nn.ReflectionPad2d(3), - nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias), - norm_layer(ngf), - nn.ReLU(True)] - - n_downsampling = 2 - for i in range(n_downsampling): # add downsampling layers - mult = 2 ** i - model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias), - norm_layer(ngf * mult * 2), - nn.ReLU(True)] - - mult = 2 ** n_downsampling - for i in range(n_blocks): # add ResNet blocks - if i == n_blocks - 1: - model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, - use_dropout=use_dropout, use_bias=use_bias, last=True)] - else: - model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, - use_dropout=use_dropout, use_bias=use_bias)] - - if opt.use_tanh: - model += [nn.Tanh()] - self.model = nn.Sequential(*model) - - def forward(self, input): - """Standard forward""" - return self.model(input) diff --git a/spaces/rinong/StyleGAN-NADA/e4e/options/train_options.py b/spaces/rinong/StyleGAN-NADA/e4e/options/train_options.py deleted file mode 100644 index 583ea1423fdc9a649cd7044d74d554bf0ac2bf51..0000000000000000000000000000000000000000 --- a/spaces/rinong/StyleGAN-NADA/e4e/options/train_options.py +++ /dev/null @@ -1,84 +0,0 @@ -from argparse import ArgumentParser -from configs.paths_config import model_paths - - -class TrainOptions: - - def __init__(self): - self.parser = ArgumentParser() - self.initialize() - - def initialize(self): - self.parser.add_argument('--exp_dir', type=str, help='Path to experiment output directory') - self.parser.add_argument('--dataset_type', default='ffhq_encode', type=str, - help='Type of dataset/experiment to run') - self.parser.add_argument('--encoder_type', default='Encoder4Editing', type=str, help='Which encoder to use') - - self.parser.add_argument('--batch_size', default=4, type=int, help='Batch size for training') - self.parser.add_argument('--test_batch_size', default=2, type=int, help='Batch size for testing and inference') - self.parser.add_argument('--workers', default=4, type=int, help='Number of train dataloader workers') - self.parser.add_argument('--test_workers', default=2, type=int, - help='Number of test/inference dataloader workers') - - self.parser.add_argument('--learning_rate', default=0.0001, type=float, help='Optimizer learning rate') - self.parser.add_argument('--optim_name', default='ranger', type=str, help='Which optimizer to use') - self.parser.add_argument('--train_decoder', default=False, type=bool, help='Whether to train the decoder model') - self.parser.add_argument('--start_from_latent_avg', action='store_true', - help='Whether to add average latent vector to generate codes from encoder.') - self.parser.add_argument('--lpips_type', default='alex', type=str, help='LPIPS backbone') - - self.parser.add_argument('--lpips_lambda', default=0.8, type=float, help='LPIPS loss multiplier factor') - self.parser.add_argument('--id_lambda', default=0.1, type=float, help='ID loss multiplier factor') - self.parser.add_argument('--l2_lambda', default=1.0, type=float, help='L2 loss multiplier factor') - - self.parser.add_argument('--stylegan_weights', default=model_paths['stylegan_ffhq'], type=str, - help='Path to StyleGAN model weights') - self.parser.add_argument('--stylegan_size', default=1024, type=int, - help='size of pretrained StyleGAN Generator') - self.parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to pSp model checkpoint') - - self.parser.add_argument('--max_steps', default=500000, type=int, help='Maximum number of training steps') - self.parser.add_argument('--image_interval', default=100, type=int, - help='Interval for logging train images during training') - self.parser.add_argument('--board_interval', default=50, type=int, - help='Interval for logging metrics to tensorboard') - self.parser.add_argument('--val_interval', default=1000, type=int, help='Validation interval') - self.parser.add_argument('--save_interval', default=None, type=int, help='Model checkpoint interval') - - # Discriminator flags - self.parser.add_argument('--w_discriminator_lambda', default=0, type=float, help='Dw loss multiplier') - self.parser.add_argument('--w_discriminator_lr', default=2e-5, type=float, help='Dw learning rate') - self.parser.add_argument("--r1", type=float, default=10, help="weight of the r1 regularization") - self.parser.add_argument("--d_reg_every", type=int, default=16, - help="interval for applying r1 regularization") - self.parser.add_argument('--use_w_pool', action='store_true', - help='Whether to store a latnet codes pool for the discriminator\'s training') - self.parser.add_argument("--w_pool_size", type=int, default=50, - help="W\'s pool size, depends on --use_w_pool") - - # e4e specific - self.parser.add_argument('--delta_norm', type=int, default=2, help="norm type of the deltas") - self.parser.add_argument('--delta_norm_lambda', type=float, default=2e-4, help="lambda for delta norm loss") - - # Progressive training - self.parser.add_argument('--progressive_steps', nargs='+', type=int, default=None, - help="The training steps of training new deltas. steps[i] starts the delta_i training") - self.parser.add_argument('--progressive_start', type=int, default=None, - help="The training step to start training the deltas, overrides progressive_steps") - self.parser.add_argument('--progressive_step_every', type=int, default=2_000, - help="Amount of training steps for each progressive step") - - # Save additional training info to enable future training continuation from produced checkpoints - self.parser.add_argument('--save_training_data', action='store_true', - help='Save intermediate training data to resume training from the checkpoint') - self.parser.add_argument('--sub_exp_dir', default=None, type=str, help='Name of sub experiment directory') - self.parser.add_argument('--keep_optimizer', action='store_true', - help='Whether to continue from the checkpoint\'s optimizer') - self.parser.add_argument('--resume_training_from_ckpt', default=None, type=str, - help='Path to training checkpoint, works when --save_training_data was set to True') - self.parser.add_argument('--update_param_list', nargs='+', type=str, default=None, - help="Name of training parameters to update the loaded training checkpoint") - - def parse(self): - opts = self.parser.parse_args() - return opts diff --git a/spaces/rohan13/grady/main.py b/spaces/rohan13/grady/main.py deleted file mode 100644 index 716e1367d4913737d4947fc11cbed4531d1f16a1..0000000000000000000000000000000000000000 --- a/spaces/rohan13/grady/main.py +++ /dev/null @@ -1,10 +0,0 @@ -from utils import get_search_index, generate_answer, set_model_and_embeddings - -def index(model): - set_model_and_embeddings(model) - get_search_index(model) - return True - -def run(question, model): - index(model) - return generate_answer(question) diff --git a/spaces/rorallitri/biomedical-language-models/logs/Adobe Photoshop Lightroom CC 2.4.1 Crack Activation Keys 2020.md b/spaces/rorallitri/biomedical-language-models/logs/Adobe Photoshop Lightroom CC 2.4.1 Crack Activation Keys 2020.md deleted file mode 100644 index a39186df778ffc5b19632b72d34440ffd23b36cc..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Adobe Photoshop Lightroom CC 2.4.1 Crack Activation Keys 2020.md +++ /dev/null @@ -1,7 +0,0 @@ -
            -

            You may check out our different plans and compare prices. It has some of our best features in a very easy to use interface. You may switch between your desktop version of Photoshop and Lightroom mobile anytime with just a single touch.

            -

            You can use Photoshop if you work with video clips in there. You can import, edit, and make transitions for video on an iPad and your desktop through the timeline panel.. There are separate apps for PhotoManager, Lightroom for iOS, and Lightroom for Android.. Lightroom Classic is less powerful version of Lightroom and designed for regular desktop or laptop photo editing with images stored on the user's own computer.. The CC in the name indicates that both Lightroom versions are part of Adobes Creative Cloud system which charges a monthly fee for using the software.. At the end of the trial period you can simply stop using Photoshop and cancel your subscription, or do nothing and have your subscription start automatically.. (Image credit: Adobe). No problems for us. Click this link (opens in new tab) to start your free Creative Cloud trial.. Youll be offered a choice of subscription plans for your 7-day trial, including the Adobe Photography Plan, Photoshop only or Adobes all apps plan..

            -

            Adobe Photoshop Lightroom CC 2.4.1 Crack Activation Keys 2020


            Downloadhttps://tinurll.com/2uzlWk



            -

            If your device is a Windows tablet it can be used to edit photos and video.. Lightroom Classic is a photo editing and organizing tool for professionals.. Just like all Adobe software the apps come with a free trial period.. There are separate apps for PhotoManager, Lightroom for iOS, and Lightroom for Android.. If your photo edit career requires the use of Adobe Creative Suite, you can use Creative Cloud for a 14-day trial on your desktop computer. (opens in new tab).. Adobe (opens in new tab). Adobe (opens in new tab).

            899543212b
            -
            -
            \ No newline at end of file diff --git a/spaces/saefro991/aet_demo/dataset.py b/spaces/saefro991/aet_demo/dataset.py deleted file mode 100644 index 87a0c3b909bcdd47b25e1f5471375146c790dbef..0000000000000000000000000000000000000000 --- a/spaces/saefro991/aet_demo/dataset.py +++ /dev/null @@ -1,344 +0,0 @@ -import pickle -import pathlib -import torch -from torch.utils.data.dataloader import DataLoader -import pytorch_lightning as pl -import numpy as np -import yaml -import torchaudio -import pyworld -import pysptk -import random - - -class DataModule(pl.LightningDataModule): - def __init__(self, config): - super().__init__() - self.config = config - self.batchsize = config["train"]["batchsize"] - self.preprocessed_dir = pathlib.Path(config["general"]["preprocessed_path"]) - - def setup(self, stage): - - if not self.preprocessed_dir.exists(): - raise RuntimeError("Preprocessed directory was not be found") - - if "dual" in self.config: - if self.config["dual"]["enable"]: - task_config = yaml.load( - open(self.config["dual"]["config_path"], "r"), - Loader=yaml.FullLoader, - ) - task_preprocessed_dir = ( - self.preprocessed_dir.parent - / pathlib.Path(task_config["general"]["preprocessed_path"]).name - ) - if not task_preprocessed_dir.exists(): - raise RuntimeError( - "Preprocessed directory for multi-task learning was not be found" - ) - - self.flnames = { - "train": "train.txt", - "val": "val.txt", - "test": "test.txt", - } - - def get_ds(self, phase): - ds = Dataset(self.flnames[phase], self.config) - return ds - - def get_loader(self, phase): - ds = self.get_ds(phase) - dl = DataLoader( - ds, - self.batchsize, - shuffle=True if phase == "train" else False, - num_workers=self.config["train"]["num_workers"], - drop_last=True, - ) - return dl - - def train_dataloader(self): - return self.get_loader(phase="train") - - def val_dataloader(self): - return self.get_loader(phase="val") - - def test_dataloader(self): - return self.get_loader(phase="test") - - -class Dataset(torch.utils.data.Dataset): - def __init__(self, filetxt, config): - - self.preprocessed_dir = pathlib.Path(config["general"]["preprocessed_path"]) - self.config = config - self.spec_module = torchaudio.transforms.MelSpectrogram( - sample_rate=config["preprocess"]["sampling_rate"], - n_fft=config["preprocess"]["fft_length"], - win_length=config["preprocess"]["frame_length"], - hop_length=config["preprocess"]["frame_shift"], - f_min=config["preprocess"]["fmin"], - f_max=config["preprocess"]["fmax"], - n_mels=config["preprocess"]["n_mels"], - power=1, - center=True, - norm="slaney", - mel_scale="slaney", - ) - self.resample_candidate = [8000, 11025, 12000, 16000] - self.quantization_candidate = range(2 ** 6, 2 ** 10 + 2, 2) - self.segment_length = config["preprocess"]["segment_length"] - - with open(self.preprocessed_dir / filetxt, "r") as fr: - self.filelist = [pathlib.Path(path.strip("\n")) for path in fr] - - self.d_out = dict() - for item in ["wavs", "wavsaux"]: - self.d_out[item] = [] - - for wp in self.filelist: - - if config["general"]["corpus_type"] == "single": - basename = str(wp.stem) - else: - basename = str(wp.parent.name) + "-" + str(wp.stem) - - with open(self.preprocessed_dir / "{}.pickle".format(basename), "rb") as fw: - d_preprocessed = pickle.load(fw) - - for item in ["wavs", "wavsaux"]: - try: - self.d_out[item].extend(d_preprocessed[item]) - except: - pass - - for item in ["wavs", "wavsaux"]: - if self.d_out[item] != None: - self.d_out[item] = np.asarray(self.d_out[item]) - - if "dual" in self.config: - if self.config["dual"]["enable"]: - task_config = yaml.load( - open(config["dual"]["config_path"], "r"), - Loader=yaml.FullLoader, - ) - task_preprocessed_dir = ( - self.preprocessed_dir.parent - / pathlib.Path(task_config["general"]["preprocessed_path"]).name - ) - with open(task_preprocessed_dir / filetxt, "r") as fr: - task_filelist = [pathlib.Path(path.strip("\n")) for path in fr] - self.d_out["wavstask"] = [] - for wp in task_filelist: - if task_config["general"]["corpus_type"] == "single": - basename = str(wp.stem) - else: - basename = str(wp.parent.name) + "-" + str(wp.stem) - with open( - task_preprocessed_dir / "{}.pickle".format(basename), "rb" - ) as fw: - d_preprocessed = pickle.load(fw) - self.d_out["wavstask"].extend(d_preprocessed["wavs"]) - self.d_out["wavstask"] = np.asarray(self.d_out["wavstask"]) - - def __len__(self): - return len(self.d_out["wavs"]) - - def __getitem__(self, idx): - - d_batch = {} - - if self.d_out["wavs"].size > 0: - d_batch["wavs"] = torch.from_numpy(self.d_out["wavs"][idx]) - if self.segment_length > 0: - d_batch["wavs"] = self.get_segment(d_batch["wavs"], self.segment_length) - - if self.d_out["wavsaux"].size > 0: - d_batch["wavsaux"] = torch.from_numpy(self.d_out["wavsaux"][idx]) - if self.segment_length > 0: - d_batch["wavsaux"] = self.get_segment( - d_batch["wavsaux"], self.segment_length - ) - - if self.config["general"]["stage"] == "pretrain": - if self.config["train"]["augment"]: - d_batch["wavs"] = self.augmentation(d_batch["wavsaux"]) - d_batch["wavs"] = self.normalize_waveform(d_batch["wavs"], db=-3) - d_batch["wavsaux"] = self.normalize_waveform(d_batch["wavsaux"], db=-3) - if len(d_batch["wavs"]) != len(d_batch["wavsaux"]): - min_seq_len = min(len(d_batch["wavs"]), len(d_batch["wavsaux"])) - d_batch["wavs"] = d_batch["wavs"][:min_seq_len] - d_batch["wavsaux"] = d_batch["wavsaux"][:min_seq_len] - d_batch["melspecs"] = self.calc_spectrogram(d_batch["wavs"]) - if self.config["general"]["feature_type"] == "melspec": - d_batch["melspecsaux"] = self.calc_spectrogram(d_batch["wavsaux"]) - elif self.config["general"]["feature_type"] == "vocfeats": - d_batch["melceps"] = self.calc_melcep(d_batch["wavsaux"]) - d_batch["f0s"] = self.calc_f0(d_batch["wavs"]) - d_batch["melcepssrc"] = self.calc_melcep(d_batch["wavs"]) - else: - raise NotImplementedError() - - elif self.config["general"]["stage"].startswith("ssl"): - d_batch["wavs"] = self.normalize_waveform(d_batch["wavs"], db=-3) - d_batch["melspecs"] = self.calc_spectrogram(d_batch["wavs"]) - if self.config["general"]["feature_type"] == "vocfeats": - d_batch["f0s"] = self.calc_f0(d_batch["wavs"]) - d_batch["melcepssrc"] = self.calc_melcep(d_batch["wavs"]) - if self.d_out["wavsaux"].size > 0: - d_batch["wavsaux"] = self.normalize_waveform(d_batch["wavsaux"], db=-3) - if self.config["general"]["feature_type"] == "melspec": - d_batch["melspecsaux"] = self.calc_spectrogram(d_batch["wavsaux"]) - elif self.config["general"]["feature_type"] == "vocfeats": - d_batch["melceps"] = self.calc_melcep(d_batch["wavsaux"]) - if "dual" in self.config: - if self.config["dual"]["enable"]: - d_batch["wavstask"] = torch.from_numpy(self.d_out["wavstask"][idx]) - d_batch["wavstask"] = self.get_segment( - d_batch["wavstask"], self.segment_length - ) - d_batch["wavstask"] = self.normalize_waveform( - d_batch["wavstask"], db=-3 - ) - if self.config["general"]["feature_type"] == "melspec": - d_batch["melspecstask"] = self.calc_spectrogram( - d_batch["wavstask"] - ) - elif self.config["general"]["feature_type"] == "vocfeats": - d_batch["melcepstask"] = self.calc_melcep(d_batch["wavstask"]) - else: - raise NotImplementedError() - else: - raise NotImplementedError() - - return d_batch - - def calc_spectrogram(self, wav): - specs = self.spec_module(wav) - log_spec = torch.log( - torch.clamp_min(specs, self.config["preprocess"]["min_magnitude"]) - * self.config["preprocess"]["comp_factor"] - ).to(torch.float32) - return log_spec - - def calc_melcep(self, wav): - wav = wav.numpy() - _, sp, _ = pyworld.wav2world( - wav.astype(np.float64), - self.config["preprocess"]["sampling_rate"], - fft_size=self.config["preprocess"]["fft_length"], - frame_period=( - self.config["preprocess"]["frame_shift"] - / self.config["preprocess"]["sampling_rate"] - * 1000 - ), - ) - melcep = pysptk.sp2mc( - sp, - order=self.config["preprocess"]["cep_order"], - alpha=pysptk.util.mcepalpha(self.config["preprocess"]["sampling_rate"]), - ).transpose(1, 0) - melcep = torch.from_numpy(melcep).to(torch.float32) - return melcep - - def calc_f0(self, wav): - if self.config["preprocess"]["f0_extractor"] == "dio": - return self.calc_f0_dio(wav) - elif self.config["preprocess"]["f0_extractor"] == "harvest": - return self.calc_f0_harvest(wav) - elif self.config["preprocess"]["f0_extractor"] == "swipe": - return self.calc_f0_swipe(wav) - else: - raise NotImplementedError() - - def calc_f0_dio(self, wav): - wav = wav.numpy() - _f0, _t = pyworld.dio( - wav.astype(np.float64), - self.config["preprocess"]["sampling_rate"], - frame_period=( - self.config["preprocess"]["frame_shift"] - / self.config["preprocess"]["sampling_rate"] - * 1000 - ), - ) - f0 = pyworld.stonemask( - wav.astype(np.float64), _f0, _t, self.config["preprocess"]["sampling_rate"] - ) - f0 = torch.from_numpy(f0).to(torch.float32) - return f0 - - def calc_f0_harvest(self, wav): - wav = wav.numpy() - _f0, _t = pyworld.harvest( - wav.astype(np.float64), - self.config["preprocess"]["sampling_rate"], - frame_period=( - self.config["preprocess"]["frame_shift"] - / self.config["preprocess"]["sampling_rate"] - * 1000 - ), - ) - f0 = pyworld.stonemask( - wav.astype(np.float64), _f0, _t, self.config["preprocess"]["sampling_rate"] - ) - f0 = torch.from_numpy(f0).to(torch.float32) - return f0 - - def calc_f0_swipe(self, wav): - wav = wav.numpy() - f0 = pysptk.sptk.swipe( - wav.astype(np.float64), - fs=self.config["preprocess"]["sampling_rate"], - min=71, - max=800, - hopsize=self.config["preprocess"]["frame_shift"], - otype="f0", - ) - f0 = torch.from_numpy(f0).to(torch.float32) - return f0 - - def augmentation(self, wav): - wav /= torch.max(torch.abs(wav)) - new_freq = random.choice(self.resample_candidate) - new_quantization = random.choice(self.quantization_candidate) - mulaw_encoder = torchaudio.transforms.MuLawEncoding( - quantization_channels=new_quantization - ) - wav_quantized = mulaw_encoder(wav) / new_quantization * 2.0 - 1.0 - downsampler = torchaudio.transforms.Resample( - orig_freq=self.config["preprocess"]["sampling_rate"], - new_freq=new_freq, - resampling_method="sinc_interpolation", - lowpass_filter_width=6, - dtype=torch.float32, - ) - upsampler = torchaudio.transforms.Resample( - orig_freq=new_freq, - new_freq=self.config["preprocess"]["sampling_rate"], - resampling_method="sinc_interpolation", - lowpass_filter_width=6, - dtype=torch.float32, - ) - wav_processed = upsampler(downsampler(wav_quantized)) - return wav_processed - - def normalize_waveform(self, wav, db=-3): - wav, _ = torchaudio.sox_effects.apply_effects_tensor( - wav.unsqueeze(0), - self.config["preprocess"]["sampling_rate"], - [["norm", "{}".format(db)]], - ) - return wav.squeeze(0) - - def get_segment(self, wav, segment_length): - seg_size = self.config["preprocess"]["sampling_rate"] * segment_length - if len(wav) >= seg_size: - max_wav_start = len(wav) - seg_size - wav_start = random.randint(0, max_wav_start) - wav = wav[wav_start : wav_start + seg_size] - else: - wav = torch.nn.functional.pad(wav, (0, seg_size - len(wav)), "constant") - return wav diff --git a/spaces/sahshd/ChuanhuChatGPT/run_Linux.sh b/spaces/sahshd/ChuanhuChatGPT/run_Linux.sh deleted file mode 100644 index 2d26597ae47519f42336ccffc16646713a192ae1..0000000000000000000000000000000000000000 --- a/spaces/sahshd/ChuanhuChatGPT/run_Linux.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash - -# 获取脚本所在目录 -script_dir=$(dirname "$(readlink -f "$0")") - -# 将工作目录更改为脚本所在目录 -cd "$script_dir" || exit - -# 检查Git仓库是否有更新 -git remote update -pwd - -if ! git status -uno | grep 'up to date' > /dev/null; then - # 如果有更新,关闭当前运行的服务器 - pkill -f ChuanhuChatbot.py - - # 拉取最新更改 - git pull - - # 安装依赖 - pip3 install -r requirements.txt - - # 重新启动服务器 - nohup python3 ChuanhuChatbot.py & -fi - -# 检查ChuanhuChatbot.py是否在运行 -if ! pgrep -f ChuanhuChatbot.py > /dev/null; then - # 如果没有运行,启动服务器 - nohup python3 ChuanhuChatbot.py & -fi diff --git a/spaces/samcaicn/bingai/src/components/ui/icons.tsx b/spaces/samcaicn/bingai/src/components/ui/icons.tsx deleted file mode 100644 index 0ca5bee838afedafae3eddbfe2612edba1586f9c..0000000000000000000000000000000000000000 --- a/spaces/samcaicn/bingai/src/components/ui/icons.tsx +++ /dev/null @@ -1,489 +0,0 @@ -'use client' - -import * as React from 'react' - -import { cn } from '@/lib/utils' - -function IconNextChat({ - className, - inverted, - ...props -}: React.ComponentProps<'svg'> & { inverted?: boolean }) { - const id = React.useId() - - return ( - - - - - - - - - - - - - - - - - - - - - - ) -} - -function IconOpenAI({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - OpenAI icon - - - ) -} - -function IconGitHub({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - GitHub - - - ) -} - -function IconSeparator({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - ) -} - -function IconArrowDown({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconArrowRight({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconUser({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconPlus({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconArrowElbow({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconSpinner({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconMessage({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconTrash({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconRefresh({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconStop({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconSidebar({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconMoon({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconSun({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconCopy({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconCheck({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconDownload({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconClose({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconEdit({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconShare({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconUsers({ className, ...props }: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconExternalLink({ - className, - ...props -}: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -function IconChevronUpDown({ - className, - ...props -}: React.ComponentProps<'svg'>) { - return ( - - - - ) -} - -export { - IconEdit, - IconNextChat, - IconOpenAI, - IconGitHub, - IconSeparator, - IconArrowDown, - IconArrowRight, - IconUser, - IconPlus, - IconArrowElbow, - IconSpinner, - IconMessage, - IconTrash, - IconRefresh, - IconStop, - IconSidebar, - IconMoon, - IconSun, - IconCopy, - IconCheck, - IconDownload, - IconClose, - IconShare, - IconUsers, - IconExternalLink, - IconChevronUpDown -} diff --git a/spaces/sanjay7178/FAS-demo/app.py b/spaces/sanjay7178/FAS-demo/app.py deleted file mode 100644 index de12a78c03a8242c88b670eb05e93c2d0cdb5c2b..0000000000000000000000000000000000000000 --- a/spaces/sanjay7178/FAS-demo/app.py +++ /dev/null @@ -1,94 +0,0 @@ -import aiohttp -import gradio as gr -import numba -import requests -import base64 -from PIL import Image -import io -import json -from numba import jit -import matplotlib.pyplot as plt -import os - - -examples = ["examples/0002_01_00_01_55.jpg", -"examples/0-spoof.jpg", -"examples/0.jpg", -"examples/3.jpg", -"examples/6-mask.jpg", -"examples/AGL752VM_id147_s0_150.png", -"examples/FT720P_G780_REDMI4X_id0_s0_105.png", -"examples/7.jpg"] - - -async def spoof_trigger(b64): - url = os.getenv('url') - payload = {"img": b64} - headers = { - 'x-functions-key': os.getenv('token'), - 'Content-Type': 'text/plain' - } - - async with aiohttp.ClientSession() as session: - async with session.post(url, json=payload, headers=headers) as response: - response_text = await response.text() - return response_text -# @jit -async def predict_image(img): - # Convert NumPy array to PIL Image - img = Image.fromarray(img.astype('uint8')) - - # Create a BytesIO object - buffer = io.BytesIO() - - # Save the PIL Image to the BytesIO object - img.save(buffer, format='JPEG') - - # Get the base64 representation - img_base64 = base64.b64encode(buffer.getvalue()).decode() - - print(len(img_base64)) - - res = await spoof_trigger(img_base64) - # print(json.loads(res)) - spoof_res = json.loads(res)['spoof_res'] - annotated_image = json.loads(res)['annotated_image'] - conf_score = float( json.loads(spoof_res)['confidence_score']) - - # img_base64 to plot - img = Image.open(io.BytesIO(base64.b64decode(annotated_image))) - confidences = {'Real': conf_score, 'Fake': 1-conf_score} - - return (confidences,img) - - -with gr.Blocks(title="Spoof-Demo", css="#custom_header {min-height: 3rem; text-align: center} #custom_title {min-height: 3rem; text-align: center}") as demo : - gr.Markdown("# Face Antispoof-Demo", elem_id="custom_title") - gr.Markdown("## Gradio Demo for Face Antispoofing Detection using DeepPairNet based on ResNet50", elem_id="custom_header") - gr.Markdown("## 👨‍💻 Only for research preview Intended" ,elem_id="custom_header") - with gr.Row(): - with gr.Column(): - with gr.Box(): - gr.Markdown("### Input") - image = gr.Image(source="webcam",label="Input Image",invert_color=False,image_mode="RGB") - image.style(height=240) - btn = gr.Button(text="Submit") - btn.style(full_width=True) - with gr.Column(): - with gr.Box(): - gr.Markdown("### Output") - output_image = gr.Image(label="Output Image") - output_image.style(height=240) - label_probs = gr.outputs.Label() - - btn.click(predict_image, image , outputs=[label_probs,output_image ],api_name="Face Antispoofing") - gr.Examples( - examples=examples, - inputs=image, - outputs = output_image, - fn=predict_image, - cache_examples=False, - ) - -if __name__ == "__main__": - demo.launch(debug=True) \ No newline at end of file diff --git a/spaces/scedlatioru/img-to-music/example/[P3D] Prepar3D V4 Academic Professional Plus 4.3.29.25520 Hack Tool.md b/spaces/scedlatioru/img-to-music/example/[P3D] Prepar3D V4 Academic Professional Plus 4.3.29.25520 Hack Tool.md deleted file mode 100644 index f3893485bbf9801071c6568ab4d61b6463006977..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/[P3D] Prepar3D V4 Academic Professional Plus 4.3.29.25520 Hack Tool.md +++ /dev/null @@ -1,42 +0,0 @@ -

            [P3D] Prepar3D V4 Academic Professional Plus 4.3.29.25520 Hack Tool


            Download File ✦✦✦ https://gohhs.com/2uEzwZ



            -
            -OS Windows 7 Pro SP1 32bit (x86) YMMV: - -Fixed rendering in P3D to work with Nvidia's game-changing Fermi Cards with proper FSM: - -Works with both the original rendering engine and the new (rebranded) version of the engine. - -Simultaneously supports both "preferred" and "useful" settings, e.g. a user could simultaneously use both NVIDIA's and P3D's rendering engines. - -Allows unlimited tweaks (stray reflections, shadows and the like). - -Supports most of the systems for which P3D is required. - -Nvidia FSM: - -- SetGPUOverride=1 to force FSM 0 to be used - -- SetGPUOverride=0 to force FSM 1 to be used - -- SetGPUOverride=1 to force FSM 2 to be used - -- SetGPUOverride=2 to force FSM 3 to be used - -- SetGPUOverride=3 to force FSM 4 to be used - -Works with Fermi, Kepler and Maxwell graphics cards. - -Supports both "preferred" and "useful" settings, e.g. a user could simultaneously use both NVIDIA's and P3D's rendering engines. - -Features: - -Best CPU frequency: - -- Overclocking in Game/Fullscreen mode supported: - -Game: - -- Switch game resolution 4fefd39f24
            -
            -
            -

            diff --git a/spaces/shawndimantha/hackaithon_generate_email/app.py b/spaces/shawndimantha/hackaithon_generate_email/app.py deleted file mode 100644 index 7ca7bea295c1d96ac4548b89e83aaaca9b6f25b9..0000000000000000000000000000000000000000 --- a/spaces/shawndimantha/hackaithon_generate_email/app.py +++ /dev/null @@ -1,43 +0,0 @@ -import streamlit as st -import openai -import os - -# Get OpenAI API key from Secrets -openai.api_key = st.secrets["OPENAI_API_KEY"] -#st.secrets["OPENAI_API_KEY"] -#os.environ.get('OPENAI_API_KEY') - -# Define function to generate response to email -def generate_response(email_text): - # Create prompt - prompt = f"I am sharing an email with you. Please generate a response to this email and be flattering of the sender, but assume I do not have a lot of time to meet with people.\n\n{email_text}\n\nYour response: " - - # Generate response - response = openai.Completion.create( - engine="text-davinci-003", - prompt=prompt, - max_tokens=1024, - n=1, - stop=None, - temperature=0.7 - # Add additional parameters here, such as prompt_id, file, or codify - ) - - # Return response - return response.choices[0].text - -# Create Streamlit interface -st.title("Email Response Generator") - -# Input email text -email_text = st.text_area("Enter email text", height = 200) - - -# Generate response -if email_text: - response = generate_response(email_text) - st.write("Response:") - st.write(response) - -# Display interface -st.stop() diff --git a/spaces/sidharthism/fashion-eye/visualize.py b/spaces/sidharthism/fashion-eye/visualize.py deleted file mode 100644 index 433ae2ea8963c56a37e5e91932ad6d359495ed47..0000000000000000000000000000000000000000 --- a/spaces/sidharthism/fashion-eye/visualize.py +++ /dev/null @@ -1,314 +0,0 @@ -# Copyright 2020 Erik Härkönen. All rights reserved. -# This file is licensed to you under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. You may obtain a copy -# of the License at http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software distributed under -# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS -# OF ANY KIND, either express or implied. See the License for the specific language -# governing permissions and limitations under the License. - -# Patch for broken CTRL+C handler -# https://github.com/ContinuumIO/anaconda-issues/issues/905 -import os -os.environ['FOR_DISABLE_CONSOLE_CTRL_HANDLER'] = '1' - -import torch, json, numpy as np -from types import SimpleNamespace -import matplotlib.pyplot as plt -from pathlib import Path -from os import makedirs -from PIL import Image -from netdissect import proggan, nethook, easydict, zdataset -from netdissect.modelconfig import create_instrumented_model -from estimators import get_estimator -from models import get_instrumented_model -from scipy.cluster.vq import kmeans -import re -import sys -import datetime -import argparse -from tqdm import trange -from config import Config -from decomposition import get_random_dirs, get_or_compute, get_max_batch_size, SEED_VISUALIZATION -from utils import pad_frames - -def x_closest(p): - distances = np.sqrt(np.sum((X - p)**2, axis=-1)) - idx = np.argmin(distances) - return distances[idx], X[idx] - -def make_gif(imgs, duration_secs, outname): - head, *tail = [Image.fromarray((x * 255).astype(np.uint8)) for x in imgs] - ms_per_frame = 1000 * duration_secs / instances - head.save(outname, format='GIF', append_images=tail, save_all=True, duration=ms_per_frame, loop=0) - -def make_mp4(imgs, duration_secs, outname): - import shutil - import subprocess as sp - - FFMPEG_BIN = shutil.which("ffmpeg") - assert FFMPEG_BIN is not None, 'ffmpeg not found, install with "conda install -c conda-forge ffmpeg"' - assert len(imgs[0].shape) == 3, 'Invalid shape of frame data' - - resolution = imgs[0].shape[0:2] - fps = int(len(imgs) / duration_secs) - - command = [ FFMPEG_BIN, - '-y', # overwrite output file - '-f', 'rawvideo', - '-vcodec','rawvideo', - '-s', f'{resolution[0]}x{resolution[1]}', # size of one frame - '-pix_fmt', 'rgb24', - '-r', f'{fps}', - '-i', '-', # imput from pipe - '-an', # no audio - '-c:v', 'libx264', - '-preset', 'slow', - '-crf', '17', - str(Path(outname).with_suffix('.mp4')) ] - - frame_data = np.concatenate([(x * 255).astype(np.uint8).reshape(-1) for x in imgs]) - with sp.Popen(command, stdin=sp.PIPE, stdout=sp.PIPE, stderr=sp.PIPE) as p: - ret = p.communicate(frame_data.tobytes()) - if p.returncode != 0: - print(ret[1].decode("utf-8")) - raise sp.CalledProcessError(p.returncode, command) - - -def make_grid(latent, lat_mean, lat_comp, lat_stdev, act_mean, act_comp, act_stdev, scale=1, n_rows=10, n_cols=5, make_plots=True, edit_type='latent'): - from notebooks.notebook_utils import create_strip_centered - - inst.remove_edits() - x_range = np.linspace(-scale, scale, n_cols, dtype=np.float32) # scale in sigmas - - rows = [] - for r in range(n_rows): - curr_row = [] - out_batch = create_strip_centered(inst, edit_type, layer_key, [latent], - act_comp[r], lat_comp[r], act_stdev[r], lat_stdev[r], act_mean, lat_mean, scale, 0, -1, n_cols)[0] - for i, img in enumerate(out_batch): - curr_row.append(('c{}_{:.2f}'.format(r, x_range[i]), img)) - - rows.append(curr_row[:n_cols]) - - inst.remove_edits() - - if make_plots: - # If more rows than columns, make several blocks side by side - n_blocks = 2 if n_rows > n_cols else 1 - - for r, data in enumerate(rows): - # Add white borders - imgs = pad_frames([img for _, img in data]) - - coord = ((r * n_blocks) % n_rows) + ((r * n_blocks) // n_rows) - plt.subplot(n_rows//n_blocks, n_blocks, 1 + coord) - plt.imshow(np.hstack(imgs)) - - # Custom x-axis labels - W = imgs[0].shape[1] # image width - P = imgs[1].shape[1] # padding width - locs = [(0.5*W + i*(W+P)) for i in range(n_cols)] - plt.xticks(locs, ["{:.2f}".format(v) for v in x_range]) - plt.yticks([]) - plt.ylabel(f'C{r}') - - plt.tight_layout() - plt.subplots_adjust(top=0.96) # make room for suptitle - - return [img for row in rows for img in row] - - -###################### -### Visualize results -###################### - -if __name__ == '__main__': - global max_batch, sample_shape, feature_shape, inst, args, layer_key, model - - args = Config().from_args() - t_start = datetime.datetime.now() - timestamp = lambda : datetime.datetime.now().strftime("%d.%m %H:%M") - print(f'[{timestamp()}] {args.model}, {args.layer}, {args.estimator}') - - # Ensure reproducibility - torch.manual_seed(0) # also sets cuda seeds - np.random.seed(0) - - # Speed up backend - torch.backends.cudnn.benchmark = True - torch.autograd.set_grad_enabled(False) - - has_gpu = torch.cuda.is_available() - device = torch.device('cuda' if has_gpu else 'cpu') - layer_key = args.layer - layer_name = layer_key #layer_key.lower().split('.')[-1] - - basedir = Path(__file__).parent.resolve() - outdir = basedir / 'out' - - # Load model - inst = get_instrumented_model(args.model, args.output_class, layer_key, device, use_w=args.use_w) - model = inst.model - feature_shape = inst.feature_shape[layer_key] - latent_shape = model.get_latent_shape() - print('Feature shape:', feature_shape) - - # Layout of activations - if len(feature_shape) != 4: # non-spatial - axis_mask = np.ones(len(feature_shape), dtype=np.int32) - else: - axis_mask = np.array([0, 1, 1, 1]) # only batch fixed => whole activation volume used - - # Shape of sample passed to PCA - sample_shape = feature_shape*axis_mask - sample_shape[sample_shape == 0] = 1 - - # Load or compute components - dump_name = get_or_compute(args, inst) - data = np.load(dump_name, allow_pickle=False) # does not contain object arrays - X_comp = data['act_comp'] - X_global_mean = data['act_mean'] - X_stdev = data['act_stdev'] - X_var_ratio = data['var_ratio'] - X_stdev_random = data['random_stdevs'] - Z_global_mean = data['lat_mean'] - Z_comp = data['lat_comp'] - Z_stdev = data['lat_stdev'] - n_comp = X_comp.shape[0] - data.close() - - # Transfer components to device - tensors = SimpleNamespace( - X_comp = torch.from_numpy(X_comp).to(device).float(), #-1, 1, C, H, W - X_global_mean = torch.from_numpy(X_global_mean).to(device).float(), # 1, C, H, W - X_stdev = torch.from_numpy(X_stdev).to(device).float(), - Z_comp = torch.from_numpy(Z_comp).to(device).float(), - Z_stdev = torch.from_numpy(Z_stdev).to(device).float(), - Z_global_mean = torch.from_numpy(Z_global_mean).to(device).float(), - ) - - transformer = get_estimator(args.estimator, n_comp, args.sparsity) - tr_param_str = transformer.get_param_str() - - # Compute max batch size given VRAM usage - max_batch = args.batch_size or (get_max_batch_size(inst, device) if has_gpu else 1) - print('Batch size:', max_batch) - - def show(): - if args.batch_mode: - plt.close('all') - else: - plt.show() - - print(f'[{timestamp()}] Creating visualizations') - - # Ensure visualization gets new samples - torch.manual_seed(SEED_VISUALIZATION) - np.random.seed(SEED_VISUALIZATION) - - # Make output directories - est_id = f'spca_{args.sparsity}' if args.estimator == 'spca' else args.estimator - outdir_comp = outdir/model.name/layer_key.lower()/est_id/'comp' - outdir_inst = outdir/model.name/layer_key.lower()/est_id/'inst' - outdir_summ = outdir/model.name/layer_key.lower()/est_id/'summ' - makedirs(outdir_comp, exist_ok=True) - makedirs(outdir_inst, exist_ok=True) - makedirs(outdir_summ, exist_ok=True) - - # Measure component sparsity (!= activation sparsity) - sparsity = np.mean(X_comp == 0) # percentage of zero values in components - print(f'Sparsity: {sparsity:.2f}') - - def get_edit_name(mode): - if mode == 'activation': - is_stylegan = 'StyleGAN' in args.model - is_w = layer_key in ['style', 'g_mapping'] - return 'W' if (is_stylegan and is_w) else 'ACT' - elif mode == 'latent': - return model.latent_space_name() - elif mode == 'both': - return 'BOTH' - else: - raise RuntimeError(f'Unknown edit mode {mode}') - - # Only visualize applicable edit modes - if args.use_w and layer_key in ['style', 'g_mapping']: - edit_modes = ['latent'] # activation edit is the same - else: - edit_modes = ['activation', 'latent'] - - # Summary grid, real components - for edit_mode in edit_modes: - plt.figure(figsize = (14,12)) - plt.suptitle(f"{args.estimator.upper()}: {model.name} - {layer_name}, {get_edit_name(edit_mode)} edit", size=16) - make_grid(tensors.Z_global_mean, tensors.Z_global_mean, tensors.Z_comp, tensors.Z_stdev, tensors.X_global_mean, - tensors.X_comp, tensors.X_stdev, scale=args.sigma, edit_type=edit_mode, n_rows=14) - plt.savefig(outdir_summ / f'components_{get_edit_name(edit_mode)}.jpg', dpi=300) - show() - - if args.make_video: - components = 15 - instances = 150 - - # One reasonable, one over the top - for sigma in [args.sigma, 3*args.sigma]: - for c in range(components): - for edit_mode in edit_modes: - frames = make_grid(tensors.Z_global_mean, tensors.Z_global_mean, tensors.Z_comp[c:c+1, :, :], tensors.Z_stdev[c:c+1], tensors.X_global_mean, - tensors.X_comp[c:c+1, :, :], tensors.X_stdev[c:c+1], n_rows=1, n_cols=instances, scale=sigma, make_plots=False, edit_type=edit_mode) - plt.close('all') - - frames = [x for _, x in frames] - frames = frames + frames[::-1] - make_mp4(frames, 5, outdir_comp / f'{get_edit_name(edit_mode)}_sigma{sigma}_comp{c}.mp4') - - - # Summary grid, random directions - # Using the stdevs of the principal components for same norm - random_dirs_act = torch.from_numpy(get_random_dirs(n_comp, np.prod(sample_shape)).reshape(-1, *sample_shape)).to(device) - random_dirs_z = torch.from_numpy(get_random_dirs(n_comp, np.prod(inst.input_shape)).reshape(-1, *latent_shape)).to(device) - - for edit_mode in edit_modes: - plt.figure(figsize = (14,12)) - plt.suptitle(f"{model.name} - {layer_name}, random directions w/ PC stdevs, {get_edit_name(edit_mode)} edit", size=16) - make_grid(tensors.Z_global_mean, tensors.Z_global_mean, random_dirs_z, tensors.Z_stdev, - tensors.X_global_mean, random_dirs_act, tensors.X_stdev, scale=args.sigma, edit_type=edit_mode, n_rows=14) - plt.savefig(outdir_summ / f'random_dirs_{get_edit_name(edit_mode)}.jpg', dpi=300) - show() - - # Random instances w/ components added - n_random_imgs = 10 - latents = model.sample_latent(n_samples=n_random_imgs) - - for img_idx in trange(n_random_imgs, desc='Random images', ascii=True): - #print(f'Creating visualizations for random image {img_idx+1}/{n_random_imgs}') - z = latents[img_idx][None, ...] - - # Summary grid, real components - for edit_mode in edit_modes: - plt.figure(figsize = (14,12)) - plt.suptitle(f"{args.estimator.upper()}: {model.name} - {layer_name}, {get_edit_name(edit_mode)} edit", size=16) - make_grid(z, tensors.Z_global_mean, tensors.Z_comp, tensors.Z_stdev, - tensors.X_global_mean, tensors.X_comp, tensors.X_stdev, scale=args.sigma, edit_type=edit_mode, n_rows=14) - plt.savefig(outdir_summ / f'samp{img_idx}_real_{get_edit_name(edit_mode)}.jpg', dpi=300) - show() - - if args.make_video: - components = 5 - instances = 150 - - # One reasonable, one over the top - for sigma in [args.sigma, 3*args.sigma]: #[2, 5]: - for edit_mode in edit_modes: - imgs = make_grid(z, tensors.Z_global_mean, tensors.Z_comp, tensors.Z_stdev, tensors.X_global_mean, tensors.X_comp, tensors.X_stdev, - n_rows=components, n_cols=instances, scale=sigma, make_plots=False, edit_type=edit_mode) - plt.close('all') - - for c in range(components): - frames = [x for _, x in imgs[c*instances:(c+1)*instances]] - frames = frames + frames[::-1] - make_mp4(frames, 5, outdir_inst / f'{get_edit_name(edit_mode)}_sigma{sigma}_img{img_idx}_comp{c}.mp4') - - print('Done in', datetime.datetime.now() - t_start) \ No newline at end of file diff --git a/spaces/simpie28/VITS-Umamusume-voice-synthesizer/text/cleaners.py b/spaces/simpie28/VITS-Umamusume-voice-synthesizer/text/cleaners.py deleted file mode 100644 index c80e113b2b81a66134800dbdaa29c7d96a0152a7..0000000000000000000000000000000000000000 --- a/spaces/simpie28/VITS-Umamusume-voice-synthesizer/text/cleaners.py +++ /dev/null @@ -1,146 +0,0 @@ -import re - - -def japanese_cleaners(text): - from text.japanese import japanese_to_romaji_with_accent - text = japanese_to_romaji_with_accent(text) - text = re.sub(r'([A-Za-z])$', r'\1.', text) - return text - - -def japanese_cleaners2(text): - return japanese_cleaners(text).replace('ts', 'ʦ').replace('...', '…') - - -def korean_cleaners(text): - '''Pipeline for Korean text''' - from text.korean import latin_to_hangul, number_to_hangul, divide_hangul - text = latin_to_hangul(text) - text = number_to_hangul(text) - text = divide_hangul(text) - text = re.sub(r'([\u3131-\u3163])$', r'\1.', text) - return text - - -def chinese_cleaners(text): - '''Pipeline for Chinese text''' - from text.mandarin import number_to_chinese, chinese_to_bopomofo, latin_to_bopomofo - text = number_to_chinese(text) - text = chinese_to_bopomofo(text) - text = latin_to_bopomofo(text) - text = re.sub(r'([ˉˊˇˋ˙])$', r'\1。', text) - return text - - -def zh_ja_mixture_cleaners(text): - from text.mandarin import chinese_to_romaji - from text.japanese import japanese_to_romaji_with_accent - text = re.sub(r'\[ZH\](.*?)\[ZH\]', - lambda x: chinese_to_romaji(x.group(1))+' ', text) - text = re.sub(r'\[JA\](.*?)\[JA\]', lambda x: japanese_to_romaji_with_accent( - x.group(1)).replace('ts', 'ʦ').replace('u', 'ɯ').replace('...', '…')+' ', text) - text = re.sub(r'\s+$', '', text) - text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text) - return text - - -def sanskrit_cleaners(text): - text = text.replace('॥', '।').replace('ॐ', 'ओम्') - if text[-1] != '।': - text += ' ।' - return text - - -def cjks_cleaners(text): - from text.mandarin import chinese_to_lazy_ipa - from text.japanese import japanese_to_ipa - from text.korean import korean_to_lazy_ipa - from text.sanskrit import devanagari_to_ipa - from text.english import english_to_lazy_ipa - text = re.sub(r'\[ZH\](.*?)\[ZH\]', - lambda x: chinese_to_lazy_ipa(x.group(1))+' ', text) - text = re.sub(r'\[JA\](.*?)\[JA\]', - lambda x: japanese_to_ipa(x.group(1))+' ', text) - text = re.sub(r'\[KO\](.*?)\[KO\]', - lambda x: korean_to_lazy_ipa(x.group(1))+' ', text) - text = re.sub(r'\[SA\](.*?)\[SA\]', - lambda x: devanagari_to_ipa(x.group(1))+' ', text) - text = re.sub(r'\[EN\](.*?)\[EN\]', - lambda x: english_to_lazy_ipa(x.group(1))+' ', text) - text = re.sub(r'\s+$', '', text) - text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text) - return text - - -def cjke_cleaners(text): - from text.mandarin import chinese_to_lazy_ipa - from text.japanese import japanese_to_ipa - from text.korean import korean_to_ipa - from text.english import english_to_ipa2 - text = re.sub(r'\[ZH\](.*?)\[ZH\]', lambda x: chinese_to_lazy_ipa(x.group(1)).replace( - 'ʧ', 'tʃ').replace('ʦ', 'ts').replace('ɥan', 'ɥæn')+' ', text) - text = re.sub(r'\[JA\](.*?)\[JA\]', lambda x: japanese_to_ipa(x.group(1)).replace('ʧ', 'tʃ').replace( - 'ʦ', 'ts').replace('ɥan', 'ɥæn').replace('ʥ', 'dz')+' ', text) - text = re.sub(r'\[KO\](.*?)\[KO\]', - lambda x: korean_to_ipa(x.group(1))+' ', text) - text = re.sub(r'\[EN\](.*?)\[EN\]', lambda x: english_to_ipa2(x.group(1)).replace('ɑ', 'a').replace( - 'ɔ', 'o').replace('ɛ', 'e').replace('ɪ', 'i').replace('ʊ', 'u')+' ', text) - text = re.sub(r'\s+$', '', text) - text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text) - return text - - -def cjke_cleaners2(text): - from text.mandarin import chinese_to_ipa - from text.japanese import japanese_to_ipa2 - from text.korean import korean_to_ipa - from text.english import english_to_ipa2 - text = re.sub(r'\[ZH\](.*?)\[ZH\]', - lambda x: chinese_to_ipa(x.group(1))+' ', text) - text = re.sub(r'\[JA\](.*?)\[JA\]', - lambda x: japanese_to_ipa2(x.group(1))+' ', text) - text = re.sub(r'\[KO\](.*?)\[KO\]', - lambda x: korean_to_ipa(x.group(1))+' ', text) - text = re.sub(r'\[EN\](.*?)\[EN\]', - lambda x: english_to_ipa2(x.group(1))+' ', text) - text = re.sub(r'\s+$', '', text) - text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text) - return text - - -def thai_cleaners(text): - from text.thai import num_to_thai, latin_to_thai - text = num_to_thai(text) - text = latin_to_thai(text) - return text - - -def shanghainese_cleaners(text): - from text.shanghainese import shanghainese_to_ipa - text = shanghainese_to_ipa(text) - text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text) - return text - - -def chinese_dialect_cleaners(text): - from text.mandarin import chinese_to_ipa2 - from text.japanese import japanese_to_ipa3 - from text.shanghainese import shanghainese_to_ipa - from text.cantonese import cantonese_to_ipa - from text.english import english_to_lazy_ipa2 - from text.ngu_dialect import ngu_dialect_to_ipa - text = re.sub(r'\[ZH\](.*?)\[ZH\]', - lambda x: chinese_to_ipa2(x.group(1))+' ', text) - text = re.sub(r'\[JA\](.*?)\[JA\]', - lambda x: japanese_to_ipa3(x.group(1)).replace('Q', 'ʔ')+' ', text) - text = re.sub(r'\[SH\](.*?)\[SH\]', lambda x: shanghainese_to_ipa(x.group(1)).replace('1', '˥˧').replace('5', - '˧˧˦').replace('6', '˩˩˧').replace('7', '˥').replace('8', '˩˨').replace('ᴀ', 'ɐ').replace('ᴇ', 'e')+' ', text) - text = re.sub(r'\[GD\](.*?)\[GD\]', - lambda x: cantonese_to_ipa(x.group(1))+' ', text) - text = re.sub(r'\[EN\](.*?)\[EN\]', - lambda x: english_to_lazy_ipa2(x.group(1))+' ', text) - text = re.sub(r'\[([A-Z]{2})\](.*?)\[\1\]', lambda x: ngu_dialect_to_ipa(x.group(2), x.group( - 1)).replace('ʣ', 'dz').replace('ʥ', 'dʑ').replace('ʦ', 'ts').replace('ʨ', 'tɕ')+' ', text) - text = re.sub(r'\s+$', '', text) - text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text) - return text diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Block Puzzle Jewel Game Legends MOD APK and Solve the Gem Puzzles.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Block Puzzle Jewel Game Legends MOD APK and Solve the Gem Puzzles.md deleted file mode 100644 index 23c244de67b96137002a3a79b74e70e88373d738..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Block Puzzle Jewel Game Legends MOD APK and Solve the Gem Puzzles.md +++ /dev/null @@ -1,112 +0,0 @@ - -

            Block Puzzle Jewel Game Legends Mod APK: A Fun and Addictive Puzzle Game

            -

            If you are looking for a simple yet challenging puzzle game to kill some time, you should try Block Puzzle Jewel Game Legends. This is a classic block puzzle game that will test your logic, strategy, and reflexes. You can download the original version of the game from the Google Play Store, or you can get the modded version from to enjoy some extra features and benefits. In this article, we will tell you everything you need to know about Block Puzzle Jewel Game Legends and its mod apk.

            -

            block puzzle jewel game legends mod apk


            DOWNLOAD >>> https://ssurll.com/2uNTGa



            -

            What is Block Puzzle Jewel Game Legends?

            -

            Block Puzzle Jewel Game Legends is a puzzle game that is inspired by the popular Tetris game. The goal of the game is to fill up the grid with different shapes of blocks without leaving any gaps. You can move and rotate the blocks to fit them in the best position. Once you complete a horizontal or vertical line, it will disappear and give you points. The game will end when there is no more space for new blocks to fall.

            -

            How to play Block Puzzle Jewel Game Legends

            -

            The game is very easy to play, but hard to master. You can use the following controls to play the game:

            -
              -
            • Swipe left or right to move the block horizontally.
            • -
            • Swipe down to drop the block faster.
            • -
            • Tap on the screen to rotate the block clockwise or counterclockwise.
            • -
            -

            You can also use the buttons at the bottom of the screen to perform these actions. You can pause the game by tapping on the pause button at the top right corner of the screen.

            -

            Features of Block Puzzle Jewel Game Legends

            -

            Block Puzzle Jewel Game Legends has many features that make it fun and addictive. Here are some of them:

            -

            Classic mode

            -

            This is the basic mode of the game, where you can play endlessly until you run out of space. You can choose from three difficulty levels: easy, normal, and hard. The higher the difficulty, the faster the blocks will fall and the more points you will get.

            -

            block puzzle jewel gem legend modded apk download
            -block puzzle jewel game legends hack mod apk free
            -block puzzle jewel game legends unlimited money mod apk
            -block puzzle jewel game legends premium mod apk latest version
            -block puzzle jewel game legends cheats mod apk 2023
            -block puzzle jewel game legends mod apk android 1
            -block puzzle jewel game legends mod apk revdl
            -block puzzle jewel game legends mod apk no ads
            -block puzzle jewel game legends mod apk offline
            -block puzzle jewel game legends mod apk online
            -block puzzle jewel game legends pro mod apk full
            -block puzzle jewel game legends vip mod apk unlocked
            -block puzzle jewel game legends cracked mod apk 2023
            -block puzzle jewel game legends mega mod apk update
            -block puzzle jewel game legends mod apk for pc
            -block puzzle jewel game legends mod apk for ios
            -block puzzle jewel game legends mod apk for windows 10
            -block puzzle jewel game legends mod apk for mac
            -block puzzle jewel game legends mod apk for laptop
            -block puzzle jewel game legends mod apk for chromebook
            -block puzzle jewel game legends modded apk install
            -block puzzle jewel game legends hacked mod apk play
            -block puzzle jewel game legends unlimited coins mod apk
            -block puzzle jewel game legends unlimited gems mod apk
            -block puzzle jewel game legends unlimited lives mod apk
            -block puzzle jewel game legends unlimited boosters mod apk
            -block puzzle jewel game legends unlimited stars mod apk
            -block puzzle jewel game legends unlimited levels mod apk
            -block puzzle jewel game legends unlimited moves mod apk
            -block puzzle jewel game legends unlimited time mod apk
            -block puzzle jewel game legends premium features mod apk
            -block puzzle jewel game legends cheats codes mod apk
            -block puzzle jewel game legends cheats tips mod apk
            -block puzzle jewel game legends cheats tricks mod apk
            -block puzzle jewel game legends cheats hacks mod apk
            -block puzzle jewel game legends cheats guide mod apk
            -block puzzle jewel game legends cheats tutorial mod apk
            -block puzzle jewel game legends cheats walkthrough mod apk
            -block puzzle jewel game legends cheats review mod apk
            -block puzzle jewel game legends cheats video mod apk
            -block puzzle jewel game legends pro version mod apk
            -block puzzle jewel game legends vip access mod apk
            -block puzzle jewel game legends cracked version mod apk
            -block puzzle jewel game legends mega version mod apk
            -block puzzle jewel game legends latest update mod apk
            -block puzzle jewel game legends new features mod apk
            -block puzzle jewel game legends best settings mod apk
            -block puzzle jewel game legends best strategy mod apk

            -

            Arcade mode

            -

            This is a more challenging mode, where you have to clear a certain number of lines within a limited time. You can also use special items such as bombs, hammers, and magnets to help you clear the lines faster. You can unlock more items as you progress in the game.

            -

            Time mode

            -

            This is a fast-paced mode, where you have to score as many points as possible within a fixed time. You can extend the time by clearing lines or using items. You can also earn bonus points by clearing multiple lines at once or creating combos.

            -

            Colorful graphics and sound effects

            -

            The game has bright and colorful graphics that will appeal to your eyes. The blocks are designed with different patterns and jewels that make them look attractive. The game also has cheerful and relaxing music and sound effects that will enhance your gaming experience.

            -

            Leaderboards and achievements

            -

            The game has online leaderboards where you can compare your scores with other players around the world. You can also earn achievements by completing various tasks in the game. You can share your achievements with your friends on social media platforms such as Facebook, Twitter, and Instagram.

            -

            Why download Block Puzzle Jewel Game Legends Mod APK?

            -

            If you want

            If you want to enjoy Block Puzzle Jewel Game Legends without any limitations or interruptions, you should download the mod apk version of the game. The mod apk is a modified version of the game that gives you some extra features and benefits that are not available in the original version. Here are some of the reasons why you should download Block Puzzle Jewel Game Legends mod apk:

            -

            Benefits of Block Puzzle Jewel Game Legends Mod APK

            -

            By downloading Block Puzzle Jewel Game Legends mod apk, you can get the following benefits:

            -

            Unlimited coins and gems

            -

            Coins and gems are the currencies of the game that you can use to buy items, unlock modes, and upgrade your skills. You can earn coins and gems by playing the game, but they are not enough to get everything you want. With the mod apk, you can get unlimited coins and gems for free. You can use them to buy anything you want and enjoy the game without any restrictions.

            -

            No ads and pop-ups

            -

            Ads and pop-ups are annoying and distracting. They can ruin your gaming experience and waste your time. They can also consume your data and battery. With the mod apk, you can get rid of all the ads and pop-ups in the game. You can play the game smoothly and peacefully without any interruptions.

            -

            Easy installation and compatibility

            -

            The mod apk is very easy to install and use. You don't need to root your device or go through any complicated steps. You just need to download the mod apk file from and follow the instructions below. The mod apk is also compatible with most Android devices and versions. You don't need to worry about any compatibility issues or errors.

            -

            How to download and install Block Puzzle Jewel Game Legends Mod APK?

            -

            If you want to download and install Block Puzzle Jewel Game Legends mod apk, you can follow these simple steps:

            -

            Step-by-step guide for downloading and installing Block Puzzle Jewel Game Legends Mod APK

            -
              -
            1. Click on this link to go to the download page of Block Puzzle Jewel Game Legends mod apk.
            2. -
            3. Tap on the download button to start downloading the mod apk file.
            4. -
            5. Wait for a few seconds until the download is completed.
            6. -
            7. Go to your device settings and enable the installation of apps from unknown sources.
            8. -
            9. Locate the downloaded mod apk file in your file manager and tap on it to start the installation.
            10. -
            11. Follow the on-screen instructions to complete the installation.
            12. -
            13. Launch the game and enjoy!
            14. -
            -

            Conclusion

            -

            Block Puzzle Jewel Game Legends is a fun and addictive puzzle game that will keep you entertained for hours. You can play it in different modes, use various items, and compete with other players online. You can also download the mod apk version of the game to get unlimited coins and gems, no ads and pop-ups, and easy installation and compatibility. If you are a fan of puzzle games, you should definitely try Block Puzzle Jewel Game Legends mod apk. You won't regret it!

            -

            Frequently Asked Questions

            -

            Here are some of the common questions that people ask about Block Puzzle Jewel Game Legends mod apk:

            -

            Q: Is Block Puzzle Jewel Game Legends mod apk safe to use?

            -

            A: Yes, Block Puzzle Jewel Game Legends mod apk is safe to use. It does not contain any viruses, malware, or spyware that can harm your device or data. It is also tested and verified by many users who have downloaded it from . However, you should always download it from a trusted source and scan it with an antivirus before installing it.

            -

            Q: Do I need an internet connection to play Block Puzzle Jewel Game Legends mod apk?

            -

            A: No, you don't need an internet connection to play Block Puzzle Jewel Game Legends mod apk. You can play it offline without any problems. However, if you want to access some online features such as leaderboards, achievements, and social media sharing, you will need an internet connection.

            -

            Q: Can I play Block Puzzle Jewel Game Legends mod apk with my friends?

            -

            A: Yes, you can play Block Puzzle Jewel Game Legends mod apk with your friends. You can invite them to join your game or join theirs through Facebook or Google Play Games. You can also chat with them and send them gifts in the game.

            -

            Q: How can I update Block Puzzle Jewel Game Legends mod apk?

            -

            A: To update Block Puzzle Jewel Game Legends mod apk, you will need to download the latest version of the mod apk file from and install

            A: To update Block Puzzle Jewel Game Legends mod apk, you will need to download the latest version of the mod apk file from and install it over the existing one. You don't need to uninstall the previous version or lose your progress. However, you should always backup your data before updating any app or game.

            -

            Q: What if I have any problems or questions about Block Puzzle Jewel Game Legends mod apk?

            -

            A: If you have any problems or questions about Block Puzzle Jewel Game Legends mod apk, you can contact the developer of the mod apk through their email address or website. You can also visit their FAQ page or forum to find answers to common issues and queries. You can also leave a comment or a review on the download page to share your feedback and suggestions.

            197e85843d
            -
            -
            \ No newline at end of file diff --git a/spaces/skf15963/summary/fengshen/examples/pretrain_t5/pretrain_randeng_t5_char_57M.sh b/spaces/skf15963/summary/fengshen/examples/pretrain_t5/pretrain_randeng_t5_char_57M.sh deleted file mode 100644 index 8e86e8b077019a57c5a6ac28ab29749f1a2787aa..0000000000000000000000000000000000000000 --- a/spaces/skf15963/summary/fengshen/examples/pretrain_t5/pretrain_randeng_t5_char_57M.sh +++ /dev/null @@ -1,128 +0,0 @@ -#!/bin/bash -#SBATCH --job-name=pretrain_randeng_t5_char_57M -#SBATCH --nodes=1 -#SBATCH --ntasks-per-node=8 -#SBATCH --gres=gpu:8 # number of gpus -#SBATCH --cpus-per-task=32 # cpu-cores per task (>1 if multi-threaded tasks) -#SBATCH -o /cognitive_comp/ganruyi/experiments/randeng_t5_char_57M/%x-%j.log -#SBATCH -e /cognitive_comp/ganruyi/experiments/randeng_t5_char_57M/%x-%j.err - -set -x -e - -echo "START TIME: $(date)" -MICRO_BATCH_SIZE=64 -ROOT_DIR=/cognitive_comp/ganruyi/experiments/randeng_t5_char_57M/ -if [ ! -d ${ROOT_DIR} ];then - mkdir ${ROOT_DIR} - echo ${ROOT_DIR} created!!!!!!!!!!!!!! -else - echo ${ROOT_DIR} exist!!!!!!!!!!!!!!! -fi - -ZERO_STAGE=1 - -config_json="$ROOT_DIR/ds_config.randeng_t5_char_57M.$SLURM_JOBID.json" -export MASTER_PORT=$[RANDOM%10000+30000] -# export CUDA_VISIBLE_DEVICES='4,5' - -cat < $config_json -{ - "train_micro_batch_size_per_gpu": ${MICRO_BATCH_SIZE}, - "steps_per_print": 100, - "gradient_clipping": 1.0, - "zero_optimization": { - "stage": $ZERO_STAGE, - "contiguous_gradients": false, - "overlap_comm": true, - "reduce_scatter": true, - "reduce_bucket_size": 50000000, - "allgather_bucket_size": 500000000 - }, - "optimizer": { - "type": "Adam", - "params": { - "lr": 1e-4, - "weight_decay": 1e-2 - } - }, - "scheduler": { - "params": { - "warmup_max_lr": 1e-04, - "warmup_min_lr": 1e-05, - "total_num_steps": 240000, - "warmup_num_steps" : 10000 - }, - "type": "WarmupDecayLR" - }, - "zero_allow_untested_optimizer": false, - "fp16": { - "enabled": true, - "loss_scale": 0, - "loss_scale_window": 1000, - "hysteresis": 2, - "min_loss_scale": 1 - }, - "activation_checkpointing": { - "partition_activations": false, - "contiguous_memory_optimization": false - }, - "wall_clock_breakdown": false -} -EOT - -export PL_DEEPSPEED_CONFIG_PATH=$config_json -export TORCH_EXTENSIONS_DIR=/cognitive_comp/ganruyi/tmp/torch_extendsions -# strategy=ddp -strategy=deepspeed_stage_1 - -TRAINER_ARGS=" - --max_epochs 1 \ - --gpus 8 \ - --num_nodes 1 \ - --strategy ${strategy} \ - --default_root_dir $ROOT_DIR \ - --dirpath $ROOT_DIR/ckpt \ - --save_top_k 3 \ - --every_n_train_steps 100000 \ - --monitor train_loss \ - --mode min \ - --save_last \ - --val_check_interval 0.1 \ - --dataset_num_workers 4 \ - --dataloader_num_workers 4 \ - --replace_sampler_ddp False \ -" -# --accumulate_grad_batches 8 \ -DATA_DIR=wudao_180g_bert_tokenized_512 - -DATA_ARGS=" - --train_batchsize $MICRO_BATCH_SIZE \ - --valid_batchsize $MICRO_BATCH_SIZE \ - --train_data_path ${DATA_DIR} \ - --train_split_size 0.999 \ - --max_seq_length 512 \ -" - -MODEL_ARGS=" - --pretrained_model_path /cognitive_comp/ganruyi/experiments/randeng_t5_char_57M/randeng_t5_char_57M \ - --tokenizer_type bert_tokenizer \ -" - -SCRIPTS_PATH=/cognitive_comp/ganruyi/Fengshenbang-LM/fengshen/examples/pretrain_t5/pretrain_t5.py - -export CMD=" \ - $SCRIPTS_PATH \ - $TRAINER_ARGS \ - $MODEL_ARGS \ - $DATA_ARGS \ - " - -echo $CMD -/home/ganruyi/anaconda3/bin/python $CMD -# SINGULARITY_PATH=/cognitive_comp/ganruyi/pytorch21_06_py3_docker_image_v2.sif -# srun singularity exec --nv -B /cognitive_comp/:/cognitive_comp/ $SINGULARITY_PATH bash -c '/home/ganruyi/anaconda3/bin/python $CMD' - -# source activate base -# python $CMD -# srun --nodes=1 --gres=gpu:8 --ntasks-per-node=8 --cpus-per-task=30 --jobid=171866 -e %x-%j.err -o %x-%j.log python $CMD - diff --git a/spaces/skf15963/summary/fengshen/models/transfo_xl_denoise/configuration_transfo_xl_denoise.py b/spaces/skf15963/summary/fengshen/models/transfo_xl_denoise/configuration_transfo_xl_denoise.py deleted file mode 100644 index bbd0e8bbbca977f23b3e77d51d6f7fe3fb2092cc..0000000000000000000000000000000000000000 --- a/spaces/skf15963/summary/fengshen/models/transfo_xl_denoise/configuration_transfo_xl_denoise.py +++ /dev/null @@ -1,119 +0,0 @@ -# coding=utf-8 -# Copyright 2022 IDEA-CCNL and The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" TransfoXLDenoise model configuration """ - -from transformers.configuration_utils import PretrainedConfig - - -Transfo_XL_Denoise_PRETRAINED_CONFIG_ARCHIVE_MAP = { - "transformer-xl-1b-base": "https://huggingface.co/transformer-xl-1b-base/resolve/main/config.json", - # See all TransfoXLDenoise models at https://huggingface.co/models?filter=transfo_xl_denoise -} - - -class TransfoXLDenoiseConfig(PretrainedConfig): - r""" - This is the configuration class to store the configuration of a [`~TransfoXLDenoiseModel`]. - It is used to instantiate an TransfoXLDenoise model according to the specified arguments, defining the model - architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of - the TransfoXLDenoise [transformer-xl-1b-base](https://huggingface.co/transformer-xl-1b-base) architecture. - - Configuration objects inherit from [`PretrainedConfig`] and can be used - to control the model outputs. Read the documentation from [`PretrainedConfig`] - for more information. - - - Args: - vocab_size (`int`, *optional*, defaults to 30522): - Vocabulary size of the TransfoXLDenoise model. Defines the number of different - tokens that can be represented by the - `inputs_ids` passed when calling [`~TransfoXLDenoiseModel`] or - [`~TFTransfoXLDenoiseModel`]. - hidden_size (`int`, *optional*, defaults to 768): - Dimension of the encoder layers and the pooler layer. - num_hidden_layers (`int`, *optional*, defaults to 12): - Number of hidden layers in the Transformer encoder. - num_attention_heads (`int`, *optional*, defaults to 12): - Number of attention heads for each attention layer in the Transformer encoder. - intermediate_size (`int`, *optional*, defaults to 3072): - Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. - hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): - The non-linear activation function (function or string) in the encoder and pooler. - If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. - hidden_dropout_prob (`float`, *optional*, defaults to 0.1): - The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. - attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): - The dropout ratio for the attention probabilities. - max_position_embeddings (`int`, *optional*, defaults to 512): - The maximum sequence length that this model might ever be used with. - Typically set this to something large just in case (e.g., 512 or 1024 or 2048). - type_vocab_size (`int`, *optional*, defaults to 2): - The vocabulary size of the `token_type_ids` passed when calling [`~TransfoXLDenoiseModel`] or - [`~TFTransfoXLDenoiseModel`]. - initializer_range (`float`, *optional*, defaults to 0.02): - The standard deviation of the truncated_normal_initializer for initializing all weight matrices. - layer_norm_eps (`float`, *optional*, defaults to 1e-12): - The epsilon used by the layer normalization layers. - use_cache (`bool`, *optional*, defaults to `True`): - Whether or not the model should return the last key/values attentions (not used by all models). Only - relevant if `config.is_decoder=True`. - Example: - - ```python - >>> from transformers import TransfoXLDenoiseModel, TransfoXLDenoiseConfig - - >>> # Initializing a TransfoXLDenoise transformer-xl-1b-base style configuration - >>> configuration = TransfoXLDenoiseConfig() - - >>> # Initializing a model from the transformer-xl-1b-base style configuration - >>> model = TransfoXLDenoiseModel(configuration) - - >>> # Accessing the model configuration - >>> configuration = model.config - ``` -""" - model_type = "transfo_xl_denoise" - - def __init__( - self, - num_layers=32, - vocab_size=50048, - hidden_size=1600, - num_attention_heads=25, - embedding_dropout_prob=0.1, - attention_dropout_prob=0.1, - output_dropout_prob=0.1, - max_sequence_length=512, - max_memory_length=512, - checkpoint_activations=False, - checkpoint_num_layers=1, - parallel_output=True, - relative_encoding=True, - **kwargs - ): - self.num_layers = num_layers - self.vocab_size = vocab_size - self.hidden_size = hidden_size - self.num_attention_heads = num_attention_heads - self.embedding_dropout_prob = embedding_dropout_prob - self.attention_dropout_prob = attention_dropout_prob - self.output_dropout_prob = output_dropout_prob - self.max_sequence_length = max_sequence_length - self.max_memory_length = max_memory_length - self.checkpoint_activations = checkpoint_activations - self.checkpoint_num_layers = checkpoint_num_layers - self.parallel_output = parallel_output - self.relative_encoding = relative_encoding - super().__init__(**kwargs) diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_synthesis/evaluation/eval_asr.py b/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_synthesis/evaluation/eval_asr.py deleted file mode 100644 index 005a11bfb34ca477ad9e133acd60f249e66cda47..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_synthesis/evaluation/eval_asr.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import editdistance -import re -import shutil -import soundfile as sf -import subprocess -from pathlib import Path - -from examples.speech_to_text.data_utils import load_tsv_to_dicts - - -def preprocess_text(text): - text = "|".join(re.sub(r"[^A-Z' ]", " ", text.upper()).split()) - text = " ".join(text) - return text - - -def prepare_w2v_data( - dict_dir, sample_rate, label, audio_paths, texts, split, data_dir -): - data_dir.mkdir(parents=True, exist_ok=True) - shutil.copyfile( - dict_dir / f"dict.{label}.txt", - data_dir / f"dict.{label}.txt" - ) - with open(data_dir / f"{split}.tsv", "w") as f: - f.write("/\n") - for audio_path in audio_paths: - wav, sr = sf.read(audio_path) - assert sr == sample_rate, f"{sr} != sample_rate" - nsample = len(wav) - f.write(f"{audio_path}\t{nsample}\n") - with open(data_dir / f"{split}.{label}", "w") as f: - for text in texts: - text = preprocess_text(text) - f.write(f"{text}\n") - - -def run_asr(asr_dir, split, w2v_ckpt, w2v_label, res_dir): - """ - results will be saved at - {res_dir}/{ref,hypo}.word-{w2v_ckpt.filename}-{split}.txt - """ - cmd = ["python", "-m", "examples.speech_recognition.infer"] - cmd += [str(asr_dir.resolve())] - cmd += ["--task", "audio_finetuning", "--nbest", "1", "--quiet"] - cmd += ["--w2l-decoder", "viterbi", "--criterion", "ctc"] - cmd += ["--post-process", "letter", "--max-tokens", "4000000"] - cmd += ["--path", str(w2v_ckpt.resolve()), "--labels", w2v_label] - cmd += ["--gen-subset", split, "--results-path", str(res_dir.resolve())] - - print(f"running cmd:\n{' '.join(cmd)}") - subprocess.run(cmd, check=True) - - -def compute_error_rate(hyp_wrd_path, ref_wrd_path, unit="word"): - """each line is " (None-)" """ - tokenize_line = { - "word": lambda x: re.sub(r" \(.*\)$", "", x.rstrip()).split(), - "char": lambda x: list(re.sub(r" \(.*\)$", "", x.rstrip())) - }.get(unit) - if tokenize_line is None: - raise ValueError(f"{unit} not supported") - - inds = [int(re.sub(r"\D*(\d*)\D*", r"\1", line)) - for line in open(hyp_wrd_path)] - hyps = [tokenize_line(line) for line in open(hyp_wrd_path)] - refs = [tokenize_line(line) for line in open(ref_wrd_path)] - assert(len(hyps) == len(refs)) - err_rates = [ - editdistance.eval(hyp, ref) / len(ref) for hyp, ref in zip(hyps, refs) - ] - ind_to_err_rates = {i: e for i, e in zip(inds, err_rates)} - return ind_to_err_rates - - -def main(args): - samples = load_tsv_to_dicts(args.raw_manifest) - ids = [ - sample[args.id_header] if args.id_header else "" for sample in samples - ] - audio_paths = [sample[args.audio_header] for sample in samples] - texts = [sample[args.text_header] for sample in samples] - - prepare_w2v_data( - args.w2v_dict_dir, - args.w2v_sample_rate, - args.w2v_label, - audio_paths, - texts, - args.split, - args.asr_dir - ) - run_asr(args.asr_dir, args.split, args.w2v_ckpt, args.w2v_label, args.asr_dir) - ind_to_err_rates = compute_error_rate( - args.asr_dir / f"hypo.word-{args.w2v_ckpt.name}-{args.split}.txt", - args.asr_dir / f"ref.word-{args.w2v_ckpt.name}-{args.split}.txt", - args.err_unit, - ) - - uer_path = args.asr_dir / f"uer_{args.err_unit}.{args.split}.tsv" - with open(uer_path, "w") as f: - f.write("id\taudio\tuer\n") - for ind, (id_, audio_path) in enumerate(zip(ids, audio_paths)): - f.write(f"{id_}\t{audio_path}\t{ind_to_err_rates[ind]:.4f}\n") - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--raw-manifest", required=True, type=Path) - parser.add_argument("--asr-dir", required=True, type=Path) - parser.add_argument("--id-header", default="id", type=str) - parser.add_argument("--audio-header", default="audio", type=str) - parser.add_argument("--text-header", default="src_text", type=str) - parser.add_argument("--split", default="raw", type=str) - parser.add_argument("--w2v-ckpt", required=True, type=Path) - parser.add_argument("--w2v-dict-dir", required=True, type=Path) - parser.add_argument("--w2v-sample-rate", default=16000, type=int) - parser.add_argument("--w2v-label", default="ltr", type=str) - parser.add_argument("--err-unit", default="word", type=str) - args = parser.parse_args() - - main(args) diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/train.sh b/spaces/sriramelango/Social_Classification_Public/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/train.sh deleted file mode 100644 index f3a3d3fc7cc98a38d8e9d523a0b43c0c8ea51bf9..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/train.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash - -set -eu - -w2v_dir= # contains features `{train,valid}.{npy,lengths}`, real transcripts `{train,valid}.${label}`, and dict `dict.${label}.txt` -lab_dir= # contains pseudo labels `{train,valid}.txt` -out_dir= # output root -arpa_lm= # phone LM -arpa_lm_bin= # (binary) phone LM for KenLM, used in unsupervised selection - -label=phnc -train_name="train" -valid_name="valid" -data_dir=${out_dir}/data - -mkdir -p ${out_dir}/exp -local/prepare_lang.sh $w2v_dir/dict.${label}.txt $data_dir -local/prepare_lm.sh $arpa_lm $data_dir - -for x in $train_name $valid_name; do - x_gt=${x}_gt - - # prepare pseudo data - python local/prepare_data_from_w2v.py $w2v_dir $data_dir $x - steps/compute_cmvn_stats.sh $data_dir/$x $out_dir/exp/make_feat/$x $out_dir/feats/$x - python local/copy_aligned_text.py < $lab_dir/$x.txt > $data_dir/$x/text - - # prepare ground truth data - mkdir $data_dir/$x_gt - cp $data_dir/$x/{feats.scp,cmvn.scp,utt2spk,spk2utt} $data_dir/$x_gt/ - python local/copy_aligned_text.py < $w2v_dir/$x.$label > $data_dir/$x_gt/text -done - -local/train_subset_lgbeam.sh \ - --out_root ${out_dir} --out_name exp --train $train_name --valid $valid_name \ - --mono_size 2000 --tri1_size 5000 --tri2b_size -1 --tri3b_size -1 \ - --stage 1 --max_stage 3 $data_dir $data_dir/lang $data_dir/lang_test - -local/unsup_select_decode.sh \ - --split $valid_name --kenlm_path $arpa_lm_bin \ - --ref_txt $data_dir/${valid_name}_gt/text \ - --psd_txt $data_dir/${valid_name}/text \ - $out_dir/exp diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/modules/lightweight_convolution.py b/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/modules/lightweight_convolution.py deleted file mode 100644 index ec11a9507951c9e8f3564753841dd9c74a4900e0..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/modules/lightweight_convolution.py +++ /dev/null @@ -1,310 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch -import torch.nn as nn -import torch.nn.functional as F -from fairseq import utils -from fairseq.incremental_decoding_utils import with_incremental_state -from fairseq.modules.fairseq_dropout import FairseqDropout -from fairseq.modules.unfold import unfold1d - - -def LightweightConv( - input_size, - kernel_size=1, - padding_l=None, - num_heads=1, - weight_dropout=0.0, - weight_softmax=False, - bias=False, -): - if torch.cuda.is_available(): - try: - from fairseq.modules.lightconv_layer import LightconvLayer - - return LightconvLayer( - input_size, - kernel_size=kernel_size, - padding_l=padding_l, - num_heads=num_heads, - weight_dropout=weight_dropout, - weight_softmax=weight_softmax, - bias=bias, - ) - except ImportError as e: - print(e) - return LightweightConv1dTBC( - input_size, - kernel_size=kernel_size, - padding_l=padding_l, - num_heads=num_heads, - weight_dropout=weight_dropout, - weight_softmax=weight_softmax, - bias=bias, - ) - - -class LightweightConv1d(nn.Module): - """Lightweight Convolution assuming the input is BxCxT - This is just an example that explains LightConv clearer than the TBC version. - We don't use this module in the model. - - Args: - input_size: # of channels of the input and output - kernel_size: convolution channels - padding: padding - num_heads: number of heads used. The weight is of shape - `(num_heads, 1, kernel_size)` - weight_softmax: normalize the weight with softmax before the convolution - - Shape: - Input: BxCxT, i.e. (batch_size, input_size, timesteps) - Output: BxCxT, i.e. (batch_size, input_size, timesteps) - - Attributes: - weight: the learnable weights of the module of shape - `(num_heads, 1, kernel_size)` - bias: the learnable bias of the module of shape `(input_size)` - """ - - def __init__( - self, - input_size, - kernel_size=1, - padding=0, - num_heads=1, - weight_softmax=False, - bias=False, - weight_dropout=0.0, - ): - super().__init__() - self.input_size = input_size - self.kernel_size = kernel_size - self.num_heads = num_heads - self.padding = padding - self.weight_softmax = weight_softmax - self.weight = nn.Parameter(torch.Tensor(num_heads, 1, kernel_size)) - - if bias: - self.bias = nn.Parameter(torch.Tensor(input_size)) - else: - self.bias = None - self.weight_dropout_module = FairseqDropout( - weight_dropout, module_name=self.__class__.__name__ - ) - self.reset_parameters() - - def reset_parameters(self): - nn.init.xavier_uniform_(self.weight) - if self.bias is not None: - nn.init.constant_(self.bias, 0.0) - - def forward(self, input): - """ - input size: B x C x T - output size: B x C x T - """ - B, C, T = input.size() - H = self.num_heads - - weight = self.weight - if self.weight_softmax: - weight = F.softmax(weight, dim=-1) - - weight = self.weight_dropout_module(weight) - # Merge every C/H entries into the batch dimension (C = self.input_size) - # B x C x T -> (B * C/H) x H x T - # One can also expand the weight to C x 1 x K by a factor of C/H - # and do not reshape the input instead, which is slow though - input = input.view(-1, H, T) - output = F.conv1d(input, weight, padding=self.padding, groups=self.num_heads) - output = output.view(B, C, T) - if self.bias is not None: - output = output + self.bias.view(1, -1, 1) - - return output - - -@with_incremental_state -class LightweightConv1dTBC(nn.Module): - """Lightweight Convolution assuming the input is TxBxC - Args: - input_size: # of channels of the input - kernel_size: convolution channels - padding_l: padding to the left when using "same" padding - num_heads: number of heads used. The weight is of shape (num_heads, 1, kernel_size) - weight_dropout: the drop rate of the DropConnect to drop the weight - weight_softmax: normalize the weight with softmax before the convolution - bias: use bias - - Shape: - Input: TxBxC, i.e. (timesteps, batch_size, input_size) - Output: TxBxC, i.e. (timesteps, batch_size, input_size) - - Attributes: - weight: the learnable weights of the module of shape - `(num_heads, 1, kernel_size)` - bias: the learnable bias of the module of shape `(input_size)` - """ - - def __init__( - self, - input_size, - kernel_size=1, - padding_l=None, - num_heads=1, - weight_dropout=0.0, - weight_softmax=False, - bias=False, - ): - super().__init__() - self.input_size = input_size - self.kernel_size = kernel_size - self.padding_l = padding_l - self.num_heads = num_heads - self.weight_dropout_module = FairseqDropout( - weight_dropout, module_name=self.__class__.__name__ - ) - self.weight_softmax = weight_softmax - - self.weight = nn.Parameter(torch.Tensor(num_heads, 1, kernel_size)) - if bias: - self.bias = nn.Parameter(torch.Tensor(input_size)) - else: - self.bias = None - - self.reset_parameters() - self.onnx_trace = False - - def reset_parameters(self): - nn.init.xavier_uniform_(self.weight) - if self.bias is not None: - nn.init.constant_(self.bias, 0.0) - - def forward(self, x, incremental_state=None, unfold=False): - """Assuming the input, x, of the shape T x B x C and producing an output in the shape T x B x C - args: - x: Input of shape T x B x C, i.e. (timesteps, batch_size, input_size) - incremental_state: A dict to keep the state - unfold: unfold the input or not. If not, we use the matrix trick instead - """ - unfold = unfold or (incremental_state is not None) - - if unfold: - output = self._forward_unfolded(x, incremental_state) - else: - output = self._forward_expanded(x, incremental_state) - - if self.bias is not None: - output = output + self.bias.view(1, 1, -1) - return output - - def prepare_for_onnx_export_(self): - self.onnx_trace = True - - def _forward_unfolded(self, x, incremental_state): - """The conventional implementation of convolutions. - Unfolding the input by having a window shifting to the right.""" - T, B, C = x.size() - K, H = self.kernel_size, self.num_heads - R = C // H - assert R * H == C == self.input_size - - weight = self.weight.view(H, K) - if incremental_state is not None: - input_buffer = self._get_input_buffer(incremental_state) - if input_buffer is None: - input_buffer = x.new() - x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3) - if self.kernel_size > 1: - self._set_input_buffer( - incremental_state, x_unfold[:, :, :, -self.kernel_size + 1 :] - ) - x_unfold = x_unfold.view(T * B * H, R, -1) - else: - # unfold the input: T x B x C --> T' x B x C x K - x_unfold = unfold1d(x, self.kernel_size, self.padding_l, 0) - x_unfold = x_unfold.view(T * B * H, R, K) - - if self.weight_softmax: - weight = utils.softmax(weight, dim=1, onnx_trace=self.onnx_trace).type_as( - weight - ) - - if incremental_state is not None: - weight = weight[:, -x_unfold.size(2) :] - K = weight.size(1) - - weight = ( - weight.view(1, H, K).expand(T * B, H, K).contiguous().view(T * B * H, K, 1) - ) - - weight = self.weight_dropout_module(weight) - output = torch.bmm(x_unfold, weight) # T*B*H x R x 1 - output = output.view(T, B, C) - return output - - def _forward_expanded(self, x, incremental_state): - """Turn the convolution filters into band matrices and do matrix multiplication. - This is faster when the sequence is short, but less memory efficient. - This is not used in the decoder during inference. - """ - T, B, C = x.size() - K, H = self.kernel_size, self.num_heads - R = C // H - assert R * H == C == self.input_size - - weight = self.weight.view(H, K) - if self.weight_softmax: - weight = utils.softmax(weight, dim=1, onnx_trace=self.onnx_trace).type_as( - weight - ) - weight = weight.view(1, H, K).expand(T * B, H, K).contiguous() - weight = weight.view(T, B * H, K).transpose(0, 1) - - x = x.view(T, B * H, R).transpose(0, 1) - P = self.padding_l - if K > T and P == K - 1: - weight = weight.narrow(2, K - T, T) - K, P = T, T - 1 - # turn the convolution filters into band matrices - weight_expanded = weight.new_zeros(B * H, T, T + K - 1, requires_grad=False) - weight_expanded.as_strided((B * H, T, K), (T * (T + K - 1), T + K, 1)).copy_( - weight - ) - weight_expanded = weight_expanded.narrow(2, P, T) - weight_expanded = self.weight_dropout_module(weight_expanded) - - output = torch.bmm(weight_expanded, x) - output = output.transpose(0, 1).contiguous().view(T, B, C) - return output - - def reorder_incremental_state(self, incremental_state, new_order): - input_buffer = self._get_input_buffer(incremental_state) - if input_buffer is not None: - input_buffer = input_buffer.index_select(1, new_order) - self._set_input_buffer(incremental_state, input_buffer) - - def _get_input_buffer(self, incremental_state): - return utils.get_incremental_state(self, incremental_state, "input_buffer") - - def _set_input_buffer(self, incremental_state, new_buffer): - return utils.set_incremental_state( - self, incremental_state, "input_buffer", new_buffer - ) - - def extra_repr(self): - s = "{}, kernel_size={}, padding_l={}, num_heads={}, weight_softmax={}, bias={}".format( - self.input_size, - self.kernel_size, - self.padding_l, - self.num_heads, - self.weight_softmax, - self.bias is not None, - ) - if self.weight_dropout_module.p > 0.0: - s += ", weight_dropout={}".format(self.weight_dropout_module.p) - return s diff --git a/spaces/stomexserde/gpt4-ui/Examples/Alai Payuthey 4 Movie Download Utorrent.md b/spaces/stomexserde/gpt4-ui/Examples/Alai Payuthey 4 Movie Download Utorrent.md deleted file mode 100644 index 432eb73f744e51b70664fa6eaac46d29565008dd..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Alai Payuthey 4 Movie Download Utorrent.md +++ /dev/null @@ -1,25 +0,0 @@ -
            -

            How to Download Alai Payuthey 4 Movie from Utorrent in 2023

            -

            Alai Payuthey 4 is the latest installment of the popular Tamil romantic drama series that started in 2000. The movie features Madhavan and Shalini reprising their roles as Karthik and Shakti, who face new challenges and conflicts in their marriage after 23 years. The movie also stars Arvind Swamy, Khushbu, Prakash Raj, and Nithya Menen in supporting roles.

            -

            If you are a fan of Alai Payuthey and want to watch the movie online, you might be tempted to download it from Utorrent or other torrent sites. However, this is not a safe or legal option, as you might end up with malware, viruses, or legal troubles. In this article, we will tell you why you should avoid downloading Alai Payuthey 4 from Utorrent and how you can watch it legally and safely online.

            -

            Alai Payuthey 4 movie download utorrent


            Download File »»» https://urlgoal.com/2uI8xr



            -

            Why You Should Not Download Alai Payuthey 4 from Utorrent

            -

            Utorrent is a popular peer-to-peer file-sharing software that allows users to download and upload files from other users. However, Utorrent is also notorious for being a source of pirated content, such as movies, TV shows, music, games, and software. Downloading pirated content from Utorrent is illegal and unethical, as it violates the copyright laws and deprives the creators of their rightful income.

            -

            Moreover, downloading Alai Payuthey 4 from Utorrent can also expose you to various risks, such as:

            -

            -
              -
            • Malware and viruses: Many torrent files are infected with malware and viruses that can harm your device and steal your personal information. Some malware can even lock your device and demand ransom to unlock it.
            • -
            • Poor quality: Many torrent files are of low quality, with poor audio and video resolution, missing subtitles, or distorted sound. Some torrent files are also fake or incomplete, which can ruin your viewing experience.
            • -
            • Legal troubles: Downloading pirated content from Utorrent can land you in legal troubles, as you might face fines or lawsuits from the copyright holders. In some countries, downloading pirated content can even lead to jail time.
            • -
            -

            Therefore, it is better to avoid downloading Alai Payuthey 4 from Utorrent and look for legal and safe alternatives instead.

            -

            How to Watch Alai Payuthey 4 Legally and Safely Online

            -

            The good news is that you can watch Alai Payuthey 4 legally and safely online without resorting to Utorrent or other torrent sites. Here are some of the options you can choose from:

            -
              -
            • Streaming platforms: Alai Payuthey 4 is expected to release on some of the popular streaming platforms in India, such as Netflix, Amazon Prime Video, Disney+ Hotstar, Zee5, or SonyLIV. You can subscribe to any of these platforms and watch the movie online at your convenience. You can also enjoy other benefits such as high-quality video, subtitles, offline viewing, and access to other content.
            • -
            • Theatres: If you prefer to watch Alai Payuthey 4 on the big screen, you can also book your tickets online and watch the movie in theatres near you. However, make sure to follow the COVID-19 safety guidelines and protocols while visiting the theatres.
            • -
            • DVDs: Another option to watch Alai Payuthey 4 legally and safely is to buy or rent the DVD of the movie when it becomes available. You can order the DVD online or visit your nearest DVD store and enjoy the movie at home.
            • -
            -

            In conclusion, downloading Alai Payuthey 4 from Utorrent is not a wise or ethical choice, as it can expose you to various risks and legal troubles. Instead, you should opt for any of the legal and safe options mentioned above and support the movie industry.

            7b8c122e87
            -
            -
            \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/CRACK Piccure 3.1.0.0 Standalone.md b/spaces/stomexserde/gpt4-ui/Examples/CRACK Piccure 3.1.0.0 Standalone.md deleted file mode 100644 index dd49e081f6bd930252180615c4ba1e7229b11a03..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/CRACK Piccure 3.1.0.0 Standalone.md +++ /dev/null @@ -1,42 +0,0 @@ - -

            How to Use Piccure 3.1.0.0 Standalone to Correct Optical Aberrations in Your Photos

            - -

            If you are a professional photographer or a serious hobbyist, you know how important it is to have sharp and clear images. However, even with the best equipment and technique, you may still encounter some optical aberrations that cause a lack of sharpness in your photos. These include lens distortion, chromatic aberration, camera shake, and blur from optical low-pass filters.

            - -

            Fortunately, there is a software solution that can help you correct these optical aberrations and improve the image quality of your photos. It is called Piccure 3.1.0.0 Standalone, and it is a new version of the popular Piccure+ plugin that works as a standalone application or as a plugin for Adobe Photoshop, Photoshop Elements, Lightroom, DxO Optics Pro, and PhaseOne Capture One.

            -

            CRACK Piccure 3.1.0.0 Standalone


            Download ··· https://urlgoal.com/2uI9sz



            - -

            Piccure 3.1.0.0 Standalone uses a new method that essentially inverts the optical processes that cause images to look blurry. It can also address the problem of copy-to-copy variation in lenses and correct optical aberrations specifically for your equipment. It can give you a significant quality improvement and make your images look sharper and crisper than ever before.

            - -

            In this article, we will show you how to use Piccure 3.1.0.0 Standalone to correct optical aberrations in your photos and achieve stunning results.

            - -

            Step 1: Download and Install Piccure 3.1.0.0 Standalone

            - -

            The first step is to download and install Piccure 3.1.0.0 Standalone on your computer. You can get it from the official website[^1^] or from other sources[^2^]. You can also download a free 30-day trial version to test it out before buying it.

            -

            - -

            Once you have downloaded the installer file, run it and follow the instructions on the screen to complete the installation process. You can choose to install Piccure 3.1.0.0 Standalone as a standalone application or as a plugin for your preferred photo editing software.

            - -

            Step 2: Open Your Photo in Piccure 3.1.0.0 Standalone

            - -

            The next step is to open your photo in Piccure 3.1.0.0 Standalone and start the correction process. You can do this in two ways:

            - -
              -
            • If you are using Piccure 3.1.0.0 Standalone as a standalone application, you can simply launch it from your Applications folder (on Mac) or from your Start menu (on Windows) and then drag and drop your photo file into the program window.
            • -
            • If you are using Piccure 3.1.0.0 Standalone as a plugin for Adobe Photoshop, Photoshop Elements, Lightroom, DxO Optics Pro, or PhaseOne Capture One, you can open your photo in your photo editing software and then activate Piccure 3.1.0.0 Standalone from the menu or toolbar of your software.
            • -
            - -

            Once you have opened your photo in Piccure 3.1.0.0 Standalone, you will see a preview of your photo on the left side of the program window and a set of controls on the right side.

            - -

            Step 3: Adjust the Correction Settings

            - -

            The next step is to adjust the correction settings according to your preferences and needs. You can use the sliders or the numeric input fields to change the values of each setting.

            - -

            The main settings are:

            - -
              -
            • Optical Aberrations: This setting controls how much Piccure 3.1.0.0 Standalone will correct for lens distortion, chromatic aberration, and copy-to-copy variation in lenses.
            • -
            • Camera Shake: This setting controls how much Piccure 3.1.0.0 Standalone will correct for camera shake and motion blur caused by hand-held shooting or slow shutter speeds.
            • -
            • OLPF: This setting controls how much Piccure 3.1.0.0 Standalone will correct for blur caused by

              7b8c122e87
              -
              -
              \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Download [Extra Quality] App Loader For Blackberry 9900 Software.md b/spaces/stomexserde/gpt4-ui/Examples/Download [Extra Quality] App Loader For Blackberry 9900 Software.md deleted file mode 100644 index f41122b557eaf898760bd48e27454704d5047149..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Download [Extra Quality] App Loader For Blackberry 9900 Software.md +++ /dev/null @@ -1,159 +0,0 @@ - - - -

              Download App Loader for Blackberry 9900 Software

              - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

              If you own a Blackberry 9900 smartphone, you might want to keep your device software up to date with the latest features, enhancements, and security fixes. One way to do that is by using App Loader, a tool that allows you to load applications and operating system updates onto your device without using BlackBerry Desktop Manager or web-based updates.

              -

              Download App Loader For Blackberry 9900 Software


              DOWNLOADhttps://urlgoal.com/2uI8zG



              -

              In this article, we will explain what is App Loader, what are its benefits, how to download and install it, how to use it, and how to troubleshoot some common issues that might arise when using it.

              Benefits of using App Loader for Blackberry 9900

              Faster and easier software updates

              One of the main benefits of using App Loader is that it allows you to update your device software faster and easier than using other methods. You don't need to install BlackBerry Desktop Manager or web-based updates on your computer, which can take time and consume bandwidth. You just need to download the specific device software package for your Blackberry 9900 from the official website, install the application loader tool on your computer, connect your device to your computer, and follow the steps on the screen.

              Recovery from unsuccessful updates

              Another benefit of using App Loader is that it can help you recover from errors or failures during the update process. Sometimes, the update process might not be successful due to various reasons, such as power outage, network interruption, device disconnection, or software corruption. In such cases, your device might not boot up properly or might lose some data or applications. App Loader can help you restore your device software and backup file by detecting your device and loading the correct software package onto it.

              Installation and uninstallation of applications

              A third benefit of using App Loader is that it can help you install and uninstall applications on your device. You can use App Loader to add or remove applications from your device by selecting or deselecting them from the list of available applications. You can also use App Loader to install applications that are not available on the BlackBerry App World, such as custom or third-party applications.

              How to download and install App Loader for Blackberry 9900

              Downloading the software package

              The first step to use App Loader is to download the specific device software package for your Blackberry 9900 from the official website. You can find the latest version of the software package by visiting this link: https://swdownloads.blackberry.com/Downloads/entry.do?code=02A32AD2669E6FE298E607FE7CC0E1A0. You will need to enter your device model number (9900) and select your carrier from the drop-down menu. Then, you will see a list of available software packages for your device. Choose the one that matches your device software version and click on "Download". Save the file on your computer and remember its location.

              Installing the application loader tool

              The second step to use App Loader is to install the application loader tool on your computer using the downloaded software package. To do that, double-click on the file that you downloaded and follow the instructions on the screen. You might need to accept some terms and conditions and enter some information during the installation process. Once the installation is complete, you will see a shortcut icon for the application loader tool on your desktop or in your start menu.

              Connecting the device to the computer

              The third step to use App Loader is to connect your device to your computer using a USB cable. Before you do that, make sure that you have backed up your device data and applications using BlackBerry Desktop Manager or BlackBerry Desktop Software. You can find more information on how to backup your device here. Also, make sure that you have removed the battery from your device and keep it aside. This will allow App Loader to detect your device even if it is not booting up normally.

              How to use App Loader for Blackberry 9900

              Updating the device software

              The fourth step to use App Loader is to update your device software using the application loader tool. To do that, launch the application loader tool from your desktop or start menu and click on "Next". The tool will automatically detect your device and show you the current device software version and the available update version. If you want to proceed with the update, click on "Next" again. The tool will then show you a list of applications that will be installed or removed during the update process. You can review the list and make any changes if you want. Then, click on "Next" again. The tool will then start the update process and show you a progress bar. Do not disconnect your device or interrupt the update process until it is complete. Once the update is complete, the tool will prompt you to insert the battery back into your device and restart it.

              -

              Restoring the backup file

              The fifth step to use App Loader is to restore your backup file if the update process was not successful or if you want to restore your data and applications. To do that, launch the application loader tool again and click on "Next". The tool will detect your device and show you the current device software version and the available update version. If you don't want to update your device software again, click on "View other versions" and select the same version that you have on your device. Then, click on "Next". The tool will then show you a list of applications that will be installed or removed during the restore process. You can review the list and make any changes if you want. Then, click on "Advanced". The tool will then show you two options: "Erase all currently installed applications" and "Delete all application data". You can select or deselect these options depending on your preference. Then, click on "Next". The tool will then ask you to browse for your backup file that you created before using App Loader. Select the backup file and click on "Open". The tool will then start the restore process and show you a progress bar. Do not disconnect your device or interrupt the restore process until it is complete. Once the restore is complete, the tool will prompt you to restart your device.

              Installing and uninstalling applications

              The sixth step to use App Loader is to install and uninstall applications on your device using the application loader tool. To do that, launch the application loader tool again and click on "Add/Remove Applications". The tool will detect your device and show you a list of applications that are available for your device. You can select or deselect any applications that you want to install or uninstall from your device. Then, click on "Next". The tool will then start the installation or uninstallation process and show you a progress bar. Do not disconnect your device or interrupt the process until it is complete. Once the process is complete, the tool will prompt you to restart your device.

              Common issues and solutions for App Loader for Blackberry 9900

              Device not detected by App Loader

              Sometimes, App Loader might not be able to detect your device when you connect it to your computer. This might happen if your device software is corrupted or if you have not installed the specific device software package for your Blackberry 9900. To fix this issue, you can try the following steps:

              -
                -
              • Make sure that you have downloaded and installed the correct device software package for your Blackberry 9900 from the official website.
              • -
              • Make sure that you have removed the battery from your device and connected it to your computer using a USB cable.
              • -
              • Launch the application loader tool and click on "Next". The tool will try to detect your device and show you a message saying "The BlackBerry Desktop Software does not have BlackBerry Device Software for the device that you have connected to the computer".
              • -
              • Click on "Update" and then click on "View other versions". The tool will show you a list of available software versions for your device. Select the one that matches your device software version and click on "Install". The tool will then start the installation process and show you a progress bar.
              • -
              • Once the installation is complete, the tool will prompt you to insert the battery back into your device and restart it.
              • -

              Device not booting up after update

              Sometimes, your device might not boot up properly after updating its software using App Loader. This might happen if the update process was interrupted or if there was a problem with the software package. To fix this issue, you can try the following steps:

              -
                -
              • Make sure that you have removed the battery from your device and connected it to your computer using a USB cable.
              • -
              • Launch the application loader tool and click on "Next". The tool will try to detect your device and show you a message saying "The BlackBerry Desktop Software does not have BlackBerry Device Software for the device that you have connected to the computer".
              • -
              • Click on "Update" and then click on "View other versions". The tool will show you a list of available software versions for your device. Select the one that matches your device software version and click on "Install". The tool will then start the installation process and show you a progress bar.
              • -
              • If this does not work, you can also try to reload your device software without using BlackBerry Desktop Software by visiting this link: https://us.blackberry.com/software/smartphones/update/blackberry7-os.html. You will need to enter your device model number (9900) and select your carrier from the drop-down menu. Then, you will see a button saying "Check for updates". Click on it and follow the instructions on the screen.
              • -
              • Once the installation or reload is complete, the tool or website will prompt you to insert the battery back into your device and restart it.
              • -

              Device data or applications not restored after update

              Sometimes, your device might not restore your data or applications after updating its software using App Loader. This might happen if the backup file was corrupted or if there was a problem with the restore process. To fix this issue, you can try the following steps:

              -
                -
              • Make sure that you have backed up your device data and applications using BlackBerry Desktop Manager or BlackBerry Desktop Software before using App Loader.
              • -
              • Launch BlackBerry Desktop Manager or BlackBerry Desktop Software on your computer and connect your device to your computer using a USB cable.
              • -
              • Click on "Device" and then click on "Restore". The software will show you a list of backup files that are available for your device. Select the backup file that you created before using App Loader and click on "Restore". The software will then start the restore process and show you a progress bar.
              • -
              • If this does not work, you can also try to manually restore your backup file using BlackBerry Desktop Manager or BlackBerry Desktop Software by following these steps: https://helpblog.blackberry.com/2011/08/restore-blackberry-backup/.
              • -
              • Once the restore is complete, the software will prompt you to restart your device.
              • -

              Conclusion

              In this article, we have learned how to download and use App Loader for Blackberry 9900 software. We have seen the benefits of using App Loader, such as faster and easier software updates, recovery from unsuccessful updates, and installation and uninstallation of applications. We have also seen how to download and install App Loader, how to use it to update, restore, install, and uninstall applications, and how to troubleshoot some common issues that might occur when using it.

              -

              We hope that this article has been helpful and informative for you. If you have a Blackberry 9900 smartphone, we recommend that you try App Loader and see how it can improve your device performance and functionality. App Loader is a simple and effective tool that can help you keep your device software up to date and manage your applications with ease.

              FAQs

              Here are some frequently asked questions and answers related to the topic of downloading and using App Loader for Blackberry 9900 software.

              Q: Is App Loader compatible with other Blackberry devices?

              -

              A: App Loader is compatible with most Blackberry devices that run on BlackBerry OS 7.1 or lower. However, you need to download the specific device software package for your device model and carrier from the official website before using App Loader.

              Q: Is App Loader safe to use?

              -

              A: App Loader is safe to use as long as you download it from the official website and follow the instructions carefully. However, you should always backup your device data and applications before using App Loader, as there is a risk of losing them during the update or restore process.

              Q: How long does it take to update or restore the device software using App Loader?

              -

              A: The time it takes to update or restore the device software using App Loader depends on several factors, such as the size of the software package, the speed of your internet connection, and the condition of your device. Generally, it can take anywhere from 15 minutes to an hour or more.

              Q: How can I check the current device software version on my Blackberry 9900?

              -

              A: You can check the current device software version on your Blackberry 9900 by following these steps:

              -
                -
              1. On the home screen, swipe down from the top of the screen.
              2. -
              3. Tap on "Options".
              4. -
              5. Tap on "Device".
              6. -
              7. Tap on "About Device Versions".
              8. -
              9. You will see the device software version under "Software Version".
              10. -

              Q: How can I contact the customer support for App Loader or Blackberry 9900?

              -

              A: You can contact the customer support for App Loader or Blackberry 9900 by visiting this link: https://www.blackberry.com/us/en/support/contact-us. You will find various options to contact the support team by phone, email, chat, or social media.

              b2dd77e56b
              -
              -
              \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/DownloadgamespintiresWORK Fullversion.md b/spaces/stomexserde/gpt4-ui/Examples/DownloadgamespintiresWORK Fullversion.md deleted file mode 100644 index 362bfde06df2f8b583245329e60fba95d2b4f393..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/DownloadgamespintiresWORK Fullversion.md +++ /dev/null @@ -1,26 +0,0 @@ - -

              How to Download Games Pintires Full Version for Free

              -

              If you are looking for a realistic and challenging off-road driving simulator, you might want to check out Games Pintires, a game that lets you control heavy-duty vehicles on muddy and rugged terrain. In this game, you will have to deal with realistic physics, dynamic weather, and limited fuel as you deliver cargo across various maps. You can also customize your vehicles with different tires, winches, snorkels, and more.

              -

              downloadgamespintiresfullversion


              Download ✪✪✪ https://urlgoal.com/2uI7BB



              -

              But how can you download Games Pintires full version for free? Well, there are some websites that offer free downloads of this game, but you have to be careful as some of them might contain viruses or malware that can harm your computer. Here are some tips on how to download Games Pintires full version safely and legally:

              -
                -
              • Make sure you have a reliable antivirus software installed on your computer and scan any file you download before opening it.
              • -
              • Look for reputable and trustworthy websites that offer free downloads of Games Pintires full version. You can check the reviews and ratings of the website and the game to see if they are legit and safe.
              • -
              • Avoid clicking on suspicious links or pop-ups that claim to offer free downloads of Games Pintires full version. They might redirect you to malicious websites or download unwanted programs on your computer.
              • -
              • Use a VPN service to hide your IP address and location when downloading Games Pintires full version. This can help you avoid geo-restrictions and protect your privacy online.
              • -
              -

              By following these tips, you can enjoy playing Games Pintires full version without spending a dime. However, if you want to support the developers and get access to the latest updates and features, you might want to consider buying the game from the official website or a trusted online store.

              - -

              How to Play Games Pintires Full Version

              -

              Once you have downloaded Games Pintires full version, you can start playing it on your computer. Here are some basic steps on how to play Games Pintires full version:

              -

              -
                -
              1. Launch the game and choose a map and a vehicle. You can also select the difficulty level and the weather conditions.
              2. -
              3. Use the keyboard and mouse or a controller to drive your vehicle. You can switch between different camera views and use the map to navigate.
              4. -
              5. Pick up and deliver cargo to various locations. You will earn money and experience points for completing missions.
              6. -
              7. Use the garage to refuel, repair, and customize your vehicle. You can also buy new vehicles and unlock new maps.
              8. -
              9. Explore the open world and discover hidden secrets and challenges. You can also play with other players online or create your own maps and mods.
              10. -
              -

              Games Pintires full version is a fun and immersive game that will test your driving skills and creativity. If you are looking for a free download of Games Pintires full version, make sure you follow the tips above to avoid any problems. Have fun!

              7b8c122e87
              -
              -
              \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Hurricane Chris-51-50 Ratchet Full Album Zip UPD.md b/spaces/stomexserde/gpt4-ui/Examples/Hurricane Chris-51-50 Ratchet Full Album Zip UPD.md deleted file mode 100644 index 980ddd161d5f1072d7d811784e4901d914a6c0a4..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Hurricane Chris-51-50 Ratchet Full Album Zip UPD.md +++ /dev/null @@ -1,12 +0,0 @@ - -

              Hurricane Chris-51-50 Ratchet Full Album Zip: A Review of the Crunk Classic

              -

              If you are looking for a crunk album that will make you want to dance, rap, and party, then you should check out Hurricane Chris-51-50 Ratchet full album zip. This is the debut album of Hurricane Chris, a rapper from Shreveport, Louisiana, who rose to fame with his hit single "A Bay Bay". The album was released on October 23, 2007, by Polo Grounds Music and J Records, and features guest appearances from Nicole Wray, Big Poppa, Boxie, The Game, Lil Boosie, Birdman, E-40, Angie Locc of Lava House, and Jadakiss.

              -

              The album contains 14 tracks that showcase Hurricane Chris's energetic and charismatic style of rapping over catchy and hard-hitting beats. The album is named after the term "51/50", which is a police code for a mentally unstable person who is a danger to themselves or others. Hurricane Chris uses this term to describe his wild and crazy personality and lifestyle. The album also incorporates the word "ratchet", which is a slang term for a person who is ghetto, loud, or uncouth. Hurricane Chris embraces this term as a way of representing his hometown and culture.

              -

              Hurricane Chris-51-50 Ratchet full album zip


              Download Filehttps://urlgoal.com/2uIbD4



              -

              The album starts with "Getting Money", a motivational anthem that features Nicole Wray on the hook. Hurricane Chris raps about his hustle and ambition over a bouncy and synth-heavy beat. The next track is "A Bay Bay", the lead single that made Hurricane Chris a household name. The song is a tribute to DJ Hollyhood Bay Bay, a local radio personality who supported Hurricane Chris's music. The song has a catchy chorus that repeats the phrase "A Bay Bay" over a booming and infectious beat. The song became a nationwide hit and spawned several remixes.

              -

              The album continues with "Doin' My Thang", a track that features Big Poppa of Ratchet City and Nicole Wray. Hurricane Chris raps about his swagger and confidence over a smooth and soulful beat. The next track is "New Fashion", a song that showcases Hurricane Chris's fashion sense and style. He raps about his clothes and accessories over a funky and groovy beat. The next track is "The Hand Clap", the second single from the album that features Big Poppa of Ratchet City and Hollyhood Bay Bay. The song is a club banger that has a simple but catchy chorus that instructs the listeners to clap their hands over a fast and frantic beat.

              -

              The album goes on with "Walk Like That", a track that features Hurricane Chris's signature ad-libs and delivery. He raps about his walk and attitude over a crisp and snappy beat. The next track is "Touch Me", a song that showcases Hurricane Chris's sexual side. He raps about his attraction and desire for women over a sensual and seductive beat. The next track is "Leaving You", a song that features Hurricane Chris's softer side. He raps about his relationship problems and breakups over a melancholic and emotional beat.

              -

              The album resumes with "Do Something", a track that features Hurricane Chris's aggressive and confrontational side. He raps about his beefs and fights over a menacing and ominous beat. The next track is "Bang", a song that features Big Poppa of Ratchet City, Bigg Redd, and Ratchet City. The song is a street anthem that has Hurricane Chris and his crew rapping about their guns and violence over a dark and gritty beat. The next track is "Beat In My Trunk", a song that showcases Hurricane Chris's love for cars and music. He raps about his car stereo system over a loud and thumping beat.

              -

              The album concludes with "Playas Rock", the third single from the album that features Boxie. The song is a romantic ballad that has Hurricane Chris rapping about his love interest over a soft and sweet beat. The next track is "Momma", a song that features Nicole Wray. The song is a heartfelt tribute to Hurricane Chris's mother, who raised him as a single parent. He raps about his gratitude and appreciation for her over a warm and tender beat. The final track is "A Bay Bay (The

              e93f5a0c3f
              -
              -
              \ No newline at end of file diff --git a/spaces/suchun/chatGPT_acdemic/crazy_functions/test_project/latex/attention/parameter_attention.tex b/spaces/suchun/chatGPT_acdemic/crazy_functions/test_project/latex/attention/parameter_attention.tex deleted file mode 100644 index 7bc4fe452dbdbfe44ff72f0cdbd37acd5c786ce6..0000000000000000000000000000000000000000 --- a/spaces/suchun/chatGPT_acdemic/crazy_functions/test_project/latex/attention/parameter_attention.tex +++ /dev/null @@ -1,45 +0,0 @@ -\pagebreak -\section*{Two Feed-Forward Layers = Attention over Parameters}\label{sec:parameter_attention} - -In addition to attention layers, our model contains position-wise feed-forward networks (Section \ref{sec:ffn}), which consist of two linear transformations with a ReLU activation in between. In fact, these networks too can be seen as a form of attention. Compare the formula for such a network with the formula for a simple dot-product attention layer (biases and scaling factors omitted): - -\begin{align*} - FFN(x, W_1, W_2) = ReLU(xW_1)W_2 \\ - A(q, K, V) = Softmax(qK^T)V -\end{align*} - -Based on the similarity of these formulae, the two-layer feed-forward network can be seen as a kind of attention, where the keys and values are the rows of the trainable parameter matrices $W_1$ and $W_2$, and where we use ReLU instead of Softmax in the compatibility function. - -%the compatablity function is $compat(q, k_i) = ReLU(q \cdot k_i)$ instead of $Softmax(qK_T)_i$. - -Given this similarity, we experimented with replacing the position-wise feed-forward networks with attention layers similar to the ones we use everywhere else our model. The multi-head-attention-over-parameters sublayer is identical to the multi-head attention described in \ref{sec:multihead}, except that the "keys" and "values" inputs to each attention head are trainable model parameters, as opposed to being linear projections of a previous layer. These parameters are scaled up by a factor of $\sqrt{d_{model}}$ in order to be more similar to activations. - -In our first experiment, we replaced each position-wise feed-forward network with a multi-head-attention-over-parameters sublayer with $h_p=8$ heads, key-dimensionality $d_{pk}=64$, and value-dimensionality $d_{pv}=64$, using $n_p=1536$ key-value pairs for each attention head. The sublayer has a total of $2097152$ parameters, including the parameters in the query projection and the output projection. This matches the number of parameters in the position-wise feed-forward network that we replaced. While the theoretical amount of computation is also the same, in practice, the attention version caused the step times to be about 30\% longer. - -In our second experiment, we used $h_p=8$ heads, and $n_p=512$ key-value pairs for each attention head, again matching the total number of parameters in the base model. - -Results for the first experiment were slightly worse than for the base model, and results for the second experiment were slightly better, see Table~\ref{tab:parameter_attention}. - -\begin{table}[h] -\caption{Replacing the position-wise feed-forward networks with multihead-attention-over-parameters produces similar results to the base model. All metrics are on the English-to-German translation development set, newstest2013.} -\label{tab:parameter_attention} -\begin{center} -\vspace{-2mm} -%\scalebox{1.0}{ -\begin{tabular}{c|cccccc|cccc} -\hline\rule{0pt}{2.0ex} - & \multirow{2}{*}{$\dmodel$} & \multirow{2}{*}{$\dff$} & -\multirow{2}{*}{$h_p$} & \multirow{2}{*}{$d_{pk}$} & \multirow{2}{*}{$d_{pv}$} & - \multirow{2}{*}{$n_p$} & - PPL & BLEU & params & training\\ - & & & & & & & (dev) & (dev) & $\times10^6$ & time \\ -\hline\rule{0pt}{2.0ex} -base & 512 & 2048 & & & & & 4.92 & 25.8 & 65 & 12 hours\\ -\hline\rule{0pt}{2.0ex} -AOP$_1$ & 512 & & 8 & 64 & 64 & 1536 & 4.92& 25.5 & 65 & 16 hours\\ -AOP$_2$ & 512 & & 16 & 64 & 64 & 512 & \textbf{4.86} & \textbf{25.9} & 65 & 16 hours \\ -\hline -\end{tabular} -%} -\end{center} -\end{table} diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Adobe Premiere Pro Cc Serial Number Keygen Generator WORK.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Adobe Premiere Pro Cc Serial Number Keygen Generator WORK.md deleted file mode 100644 index 828da6a17a1e7b1b3362995aeabad4c432d7f3a6..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Adobe Premiere Pro Cc Serial Number Keygen Generator WORK.md +++ /dev/null @@ -1,6 +0,0 @@ -

              adobe premiere pro cc serial number keygen generator


              DOWNLOADhttps://cinurl.com/2uEYlE



              -
              -2) Use the key generator to generate a valid serial 3) Enjoy this release!. WIN64-ISO NEW 2018 Power ISO 6.9 + Serial Keys Working Cuphead- ... 4d29de3e1b
              -
              -
              -

              diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Mathematics8thclasspunjabtextbooksolutions.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Mathematics8thclasspunjabtextbooksolutions.md deleted file mode 100644 index 0149dad9f024f8fe1cc000a885bea9f404186913..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Mathematics8thclasspunjabtextbooksolutions.md +++ /dev/null @@ -1,9 +0,0 @@ - -

              Ive started. 17 New pics – JoyFiera photographia with the greatest that we collected in the gallery. Click on the picture below to see the large or full size version. mathematics8thclasspunjabtextbooksolutions mathematics8thclasspunjabtextbooksolutions matnthomaty3c4. Phnom Penh.

              -

              mathematics8thclasspunjabtextbooksolutions Download Harald Schumanns Book,the Taylor series of eclesiastah Compreinstallar 0 En Mode www.niceandcandies.com – 2018/11/08

              What A Succes! Thanks. §I got some nice orders today?. What If Any Book on This Topic?. https://mathematics8thclasspunjabtextbooksolutions.info. mathematics8thclasspunjabtextbooksolutions maths8thclasspunjabtextbooksolutions matnthomaty3c4. Phnom Penh.

              -

              mathematics8thclasspunjabtextbooksolutions


              Download File ✵✵✵ https://cinurl.com/2uEZ2G



              -

              Mathematics8thclasspunjabtextbooksolutions.fifa2020[38GB]10FIFAXXIMs Mod 2020v2.1.zip matematich8thclasspunjabtextbooksolutions.rar. Mathematics8thclasspunjabtextbooksolutions,Shinko Nijigasaki Yaei 0.6 Ai no Shouka da Imouto.rar.

              -

              This is a reword of Mathematics8thclasspunjabtextbooksolutions Download Inopsinaitruri thingiverse Hitman Absolution V 10 4331 Trainer By Fling wesstethogiemO Cheandahefepave Ecm Titanium 1.73 Rar Sogsqualaacuse Download Lakeer Ka Fakeer In Hindi thingiverse.com A Face Oculta Maria Tereza Maldonado Livro Download.22 ad8ff87

              -

              chachpeegugh thingiverse this-empty-northern-hemisphere-by-gregory-alan-isakov rar Download ChedsWesgreesypeks thingiverse mathematics8thclasspunjabtextbooksolutions Download Inopsinaitruri thingiverse Hitman Absolution V 10 4331 Trainer By Fling wesstethogiemO Cheandahefepave Ecm Titanium 1.73 Rar Sogsqualaacuse Download Lakeer Ka Fakeer In Hindi thingiverse.com A Face Oculta Maria Tereza Maldonado Livro Download.22 ad8ff87

              899543212b
              -
              -
              \ No newline at end of file diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Meluhayile Chiranjeevikal Pdf Free Download [VERIFIED].md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Meluhayile Chiranjeevikal Pdf Free Download [VERIFIED].md deleted file mode 100644 index d7233c6e2a7a4090e869c7d591743dd57853ad24..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Meluhayile Chiranjeevikal Pdf Free Download [VERIFIED].md +++ /dev/null @@ -1,67 +0,0 @@ - -

              Meluhayile Chiranjeevikal PDF Free Download

              -

              If you are looking for a thrilling and captivating Malayalam novel based on the ancient Indian mythology of Shiva Puranam, you should not miss Meluhayile Chiranjeevikal by Amish Tripathi. This is the first book of the Shiva Trilogy, which tells the story of Shiva, a tribal leader who becomes the savior of the land of Meluha.

              -

              Meluha is a prosperous and advanced civilization that is threatened by the evil forces of the Chandravanshis and the Nagas. Shiva is invited by the Meluhans to join them in their fight against their enemies. Along the way, he falls in love with Sati, the daughter of the Meluhan king, and discovers his destiny as the Neelkanth, the prophesied hero who will end the war and restore peace.

              -

              meluhayile chiranjeevikal pdf free download


              Downloadhttps://cinurl.com/2uEYgr



              -

              Why You Should Read Meluhayile Chiranjeevikal PDF

              -

              Meluhayile Chiranjeevikal is a gripping and fascinating novel that blends history, fantasy and spirituality in a unique way. The author, Amish Tripathi, has done a remarkable job of creating a vivid and realistic world of ancient India, where magic, mystery and adventure abound. The characters are well-developed and complex, with their own strengths and flaws. The plot is fast-paced and full of twists and turns that will keep you hooked till the end.

              -

              Meluhayile Chiranjeevikal is not just a novel, but also a journey of self-discovery and transformation. It explores the themes of faith, duty, love, karma and free will in a profound and inspiring manner. It challenges you to question your beliefs and assumptions, and to find your own truth and purpose in life.

              -

              How to Download Meluhayile Chiranjeevikal PDF for Free

              -

              If you want to read Meluhayile Chiranjeevikal PDF for free, you have come to the right place. You can download the PDF version of this amazing novel from various online sources that offer free ebooks. However, you should be careful about the quality and authenticity of the files you download, as some of them may contain viruses or malware that can harm your device or compromise your privacy.

              -

              One of the best and safest ways to download Meluhayile Chiranjeevikal PDF for free is to use our website. We have a large collection of free ebooks in various languages and genres that you can access anytime and anywhere. All you need to do is to click on the link below and follow the simple instructions to download Meluhayile Chiranjeevikal PDF for free. You can also read it online or print it out if you prefer.

              -

              Don't wait any longer and start reading Meluhayile Chiranjeevikal PDF for free today. You will not regret it!

              -

              -

              What is Shiva Puranam and Why is it Important?

              -

              Shiva Puranam is one of the 18 major Puranas, which are ancient Hindu scriptures that narrate the history and mythology of the universe and its creation, destruction and preservation. Shiva Puranam focuses on the life and deeds of Shiva, one of the three supreme gods in Hinduism, along with Brahma and Vishnu.

              -

              Shiva Puranam is important because it reveals the secrets and mysteries of Shiva, who is also known as Mahadeva, the Great God. Shiva is worshipped as the destroyer and transformer of evil, the lord of dance and meditation, the master of yoga and tantra, and the husband of Parvati, the goddess of power and love. Shiva Puranam also describes the various forms and manifestations of Shiva, such as Rudra, Nataraja, Linga, Ardhanarishvara and Neelkanth.

              -
              What are the Benefits of Reading Meluhayile Chiranjeevikal PDF?
              -

              Reading Meluhayile Chiranjeevikal PDF can bring you many benefits, both intellectually and spiritually. Some of the benefits are:

              -
                -
              • You can learn about the rich and diverse culture and history of ancient India, which is often overlooked or distorted by mainstream media and education.
              • -
              • You can gain a deeper understanding and appreciation of Hinduism, one of the oldest and most influential religions in the world.
              • -
              • You can discover the wisdom and insights of Shiva Puranam, which can help you cope with the challenges and uncertainties of life.
              • -
              • You can enjoy a captivating and entertaining story that will stimulate your imagination and emotions.
              • -
              • You can save money and time by downloading Meluhayile Chiranjeevikal PDF for free from our website, instead of buying or borrowing a physical copy.
              • -
              -
              Who is Amish Tripathi and Why is He Famous?
              -

              Amish Tripathi is the author of Meluhayile Chiranjeevikal and the other two books of the Shiva Trilogy, namely Naganmarude Rahasyam and Oath of the Vayuputras. He is also the author of the Ram Chandra Series, which is based on the Ramayana, and the Legend of Suheldev, which is a historical fiction novel.

              -

              Amish Tripathi is one of the most popular and bestselling authors in India and abroad. He has sold over 5.5 million copies of his books and has won several awards and accolades for his work. He is known for his unique style of writing, which combines mythology, history and philosophy in a contemporary and engaging manner. He is also praised for his research and accuracy in depicting the ancient Indian culture and civilization.

              -What are the Reviews and Ratings of Meluhayile Chiranjeevikal PDF? -

              Meluhayile Chiranjeevikal PDF has received rave reviews and ratings from readers and critics alike. It has been rated 4.3 out of 5 stars on Goodreads, 4.4 out of 5 stars on Amazon, and 4.5 out of 5 stars on Flipkart. Some of the positive comments from the reviewers are:

              -
              -

              "A masterpiece by Amish Tripathi. A must-read for anyone who loves mythology and adventure."

              -

              "A captivating and thrilling novel that keeps you hooked till the end. The author has done a brilliant job of creating a realistic and vivid world of ancient India."

              -

              "A wonderful and inspiring story that explores the themes of faith, duty, love, karma and free will. The characters are well-developed and complex, with their own strengths and flaws."

              -
              -

              Of course, not everyone may like Meluhayile Chiranjeevikal PDF, as some may find it boring, confusing or offensive. Some of the negative comments from the reviewers are:

              -
              -

              "A boring and predictable novel that drags on and on. The author has failed to create any suspense or excitement in the story."

              -

              "A confusing and contradictory novel that mixes up facts and fiction. The author has distorted and misrepresented the original Shiva Puranam."

              -

              "An offensive and blasphemous novel that insults Shiva and Hinduism. The author has shown disrespect and ignorance towards the sacred scriptures."

              -
              -How to Read Meluhayile Chiranjeevikal PDF Online or Offline -

              Once you have downloaded Meluhayile Chiranjeevikal PDF for free from our website, you can choose to read it online or offline. If you want to read it online, you can simply open the PDF file in your browser or any other PDF reader application. You can also bookmark the file or save it to your cloud storage for easy access.

              -

              If you want to read it offline, you can transfer the PDF file to your device of choice, such as your laptop, tablet, smartphone or e-reader. You can also print out the PDF file if you prefer to read it on paper. However, you should be aware of the environmental impact of printing and use recycled paper if possible.

              -What are the Other Books in the Shiva Trilogy and How to Download Them -

              Meluhayile Chiranjeevikal is the first book in the Shiva Trilogy, which is followed by two more books: Naganmarude Rahasyam and Oath of the Vayuputras. These books continue the story of Shiva and his quest to save Meluha and the world from evil.

              -

              Naganmarude Rahasyam is the second book in the Shiva Trilogy, which reveals the secrets and mysteries of the Nagas, a mysterious and feared race of people who have a physical deformity. Shiva learns that the Nagas are not his enemies, but his allies in his war against evil. He also faces a personal tragedy that tests his faith and love.

              -

              Oath of the Vayuputras is the third and final book in the Shiva Trilogy, which concludes the epic saga of Shiva and his destiny as the Neelkanth. Shiva discovers the true nature of evil and its source, and prepares for a final battle that will decide the fate of Meluha and the world. He also has to make a difficult choice that will change everything.

              -

              If you want to download Naganmarude Rahasyam and Oath of the Vayuputras PDF for free, you can use our website as well. We have both books available in PDF format that you can download easily and quickly. Just follow the same steps as you did for Meluhayile Chiranjeevikal PDF and enjoy reading these amazing novels.

              -What are the Other Works of Amish Tripathi and How to Download Them -

              Amish Tripathi is not only the author of the Shiva Trilogy, but also of other works of fiction and non-fiction that are equally popular and acclaimed. Some of his other works are:

              -
                -
              • The Ram Chandra Series, which is based on the Ramayana, the epic story of Rama, the prince of Ayodhya, who goes on a quest to rescue his wife Sita from the demon king Ravana. The series consists of four books: Scion of Ikshvaku, Sita: Warrior of Mithila, Raavan: Enemy of Aryavarta and The War of Lanka.
              • -
              • The Legend of Suheldev, which is a historical fiction novel that tells the story of Suheldev, a young warrior prince who leads a rebellion against the invading Turkic armies in 11th century India. The novel is based on a true story and celebrates the heroism and patriotism of Suheldev and his companions.
              • -
              • Dharma: Decoding the Epics for a Meaningful Life, which is a non-fiction book that explores the concepts and principles of dharma, or righteous duty, as depicted in the ancient Indian epics of Ramayana and Mahabharata. The book is co-authored by Amish Tripathi and Bhavna Roy and offers practical and relevant insights for modern readers.
              • -
              -

              If you want to download these works of Amish Tripathi PDF for free, you can use our website as well. We have all these books available in PDF format that you can download easily and quickly. Just follow the same steps as you did for Meluhayile Chiranjeevikal PDF and enjoy reading these amazing books.

              -Conclusion -

              Meluhayile Chiranjeevikal PDF is one of the best novels that you can read if you love mythology, history and spirituality. It is the first book of the Shiva Trilogy by Amish Tripathi, which tells the story of Shiva, a tribal leader who becomes the savior of Meluha, a prosperous and advanced civilization that is threatened by evil.

              -

              Meluhayile Chiranjeevikal PDF is not only a novel, but also a journey of self-discovery and transformation. It challenges you to question your beliefs and assumptions, and to find your own truth and purpose in life. It also teaches you about the rich and diverse culture and history of ancient India, which is often overlooked or distorted by mainstream media and education.

              -

              If you want to read Meluhayile Chiranjeevikal PDF for free, you can download it from our website in a few simple steps. You can also download the other books in the Shiva Trilogy and the other works of Amish Tripathi PDF for free from our website. You will not regret it!

              -Conclusion -

              Meluhayile Chiranjeevikal PDF is one of the best novels that you can read if you love mythology, history and spirituality. It is the first book of the Shiva Trilogy by Amish Tripathi, which tells the story of Shiva, a tribal leader who becomes the savior of Meluha, a prosperous and advanced civilization that is threatened by evil.

              -

              Meluhayile Chiranjeevikal PDF is not only a novel, but also a journey of self-discovery and transformation. It challenges you to question your beliefs and assumptions, and to find your own truth and purpose in life. It also teaches you about the rich and diverse culture and history of ancient India, which is often overlooked or distorted by mainstream media and education.

              -

              If you want to read Meluhayile Chiranjeevikal PDF for free, you can download it from our website in a few simple steps. You can also download the other books in the Shiva Trilogy and the other works of Amish Tripathi PDF for free from our website. You will not regret it!

              3cee63e6c2
              -
              -
              \ No newline at end of file diff --git a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Aashiqui 2 Tamil Dubbed Movie Download UPD.md b/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Aashiqui 2 Tamil Dubbed Movie Download UPD.md deleted file mode 100644 index 8f8d5fb3bb591ebd5d909d332922149ff0a4e58e..0000000000000000000000000000000000000000 --- a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Aashiqui 2 Tamil Dubbed Movie Download UPD.md +++ /dev/null @@ -1,6 +0,0 @@ -

              Aashiqui 2 Tamil Dubbed Movie Download


              DOWNLOAD 🗹 https://urluss.com/2uCDIx



              - -Download Tezz (2012) Full Hindi Movie 480p 300MB | 720p 1GB HDRip ... Download The Conjuring 2 (2016) Full Movie In Hindi-English (Dual Audio) ... Download Evil Dead (2013) Full Movie In Hindi-English-Tamil-Telugu (Multi ... Download Aashiqui 2 (2013) Full Movie In Hindi 480p 450MB | 720p 1.2GB | 1080p 2.5GB ... 4d29de3e1b
              -
              -
              -

              diff --git a/spaces/svjack/ControlNet-Pose-Chinese/annotator/midas/api.py b/spaces/svjack/ControlNet-Pose-Chinese/annotator/midas/api.py deleted file mode 100644 index 1ab9f15bf96bbaffcee0e3e29fc9d3979d6c32e8..0000000000000000000000000000000000000000 --- a/spaces/svjack/ControlNet-Pose-Chinese/annotator/midas/api.py +++ /dev/null @@ -1,169 +0,0 @@ -# based on https://github.com/isl-org/MiDaS - -import cv2 -import os -import torch -import torch.nn as nn -from torchvision.transforms import Compose - -from .midas.dpt_depth import DPTDepthModel -from .midas.midas_net import MidasNet -from .midas.midas_net_custom import MidasNet_small -from .midas.transforms import Resize, NormalizeImage, PrepareForNet -from annotator.util import annotator_ckpts_path - - -ISL_PATHS = { - "dpt_large": os.path.join(annotator_ckpts_path, "dpt_large-midas-2f21e586.pt"), - "dpt_hybrid": os.path.join(annotator_ckpts_path, "dpt_hybrid-midas-501f0c75.pt"), - "midas_v21": "", - "midas_v21_small": "", -} - -remote_model_path = "https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/dpt_hybrid-midas-501f0c75.pt" - - -def disabled_train(self, mode=True): - """Overwrite model.train with this function to make sure train/eval mode - does not change anymore.""" - return self - - -def load_midas_transform(model_type): - # https://github.com/isl-org/MiDaS/blob/master/run.py - # load transform only - if model_type == "dpt_large": # DPT-Large - net_w, net_h = 384, 384 - resize_mode = "minimal" - normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) - - elif model_type == "dpt_hybrid": # DPT-Hybrid - net_w, net_h = 384, 384 - resize_mode = "minimal" - normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) - - elif model_type == "midas_v21": - net_w, net_h = 384, 384 - resize_mode = "upper_bound" - normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) - - elif model_type == "midas_v21_small": - net_w, net_h = 256, 256 - resize_mode = "upper_bound" - normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) - - else: - assert False, f"model_type '{model_type}' not implemented, use: --model_type large" - - transform = Compose( - [ - Resize( - net_w, - net_h, - resize_target=None, - keep_aspect_ratio=True, - ensure_multiple_of=32, - resize_method=resize_mode, - image_interpolation_method=cv2.INTER_CUBIC, - ), - normalization, - PrepareForNet(), - ] - ) - - return transform - - -def load_model(model_type): - # https://github.com/isl-org/MiDaS/blob/master/run.py - # load network - model_path = ISL_PATHS[model_type] - if model_type == "dpt_large": # DPT-Large - model = DPTDepthModel( - path=model_path, - backbone="vitl16_384", - non_negative=True, - ) - net_w, net_h = 384, 384 - resize_mode = "minimal" - normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) - - elif model_type == "dpt_hybrid": # DPT-Hybrid - if not os.path.exists(model_path): - from basicsr.utils.download_util import load_file_from_url - load_file_from_url(remote_model_path, model_dir=annotator_ckpts_path) - - model = DPTDepthModel( - path=model_path, - backbone="vitb_rn50_384", - non_negative=True, - ) - net_w, net_h = 384, 384 - resize_mode = "minimal" - normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) - - elif model_type == "midas_v21": - model = MidasNet(model_path, non_negative=True) - net_w, net_h = 384, 384 - resize_mode = "upper_bound" - normalization = NormalizeImage( - mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] - ) - - elif model_type == "midas_v21_small": - model = MidasNet_small(model_path, features=64, backbone="efficientnet_lite3", exportable=True, - non_negative=True, blocks={'expand': True}) - net_w, net_h = 256, 256 - resize_mode = "upper_bound" - normalization = NormalizeImage( - mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] - ) - - else: - print(f"model_type '{model_type}' not implemented, use: --model_type large") - assert False - - transform = Compose( - [ - Resize( - net_w, - net_h, - resize_target=None, - keep_aspect_ratio=True, - ensure_multiple_of=32, - resize_method=resize_mode, - image_interpolation_method=cv2.INTER_CUBIC, - ), - normalization, - PrepareForNet(), - ] - ) - - return model.eval(), transform - - -class MiDaSInference(nn.Module): - MODEL_TYPES_TORCH_HUB = [ - "DPT_Large", - "DPT_Hybrid", - "MiDaS_small" - ] - MODEL_TYPES_ISL = [ - "dpt_large", - "dpt_hybrid", - "midas_v21", - "midas_v21_small", - ] - - def __init__(self, model_type): - super().__init__() - assert (model_type in self.MODEL_TYPES_ISL) - model, _ = load_model(model_type) - self.model = model - self.model.train = disabled_train - - def forward(self, x): - with torch.no_grad(): - prediction = self.model(x) - return prediction - diff --git a/spaces/terfces0erbo/CollegeProjectV2/Call Of Duty Modern Warfare 2 Psp Iso.iso WORK.md b/spaces/terfces0erbo/CollegeProjectV2/Call Of Duty Modern Warfare 2 Psp Iso.iso WORK.md deleted file mode 100644 index f26c8d0b326cc656ddd33190322f81458c69e8af..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Call Of Duty Modern Warfare 2 Psp Iso.iso WORK.md +++ /dev/null @@ -1,9 +0,0 @@ -

              call of duty modern warfare 2 psp iso.iso


              Downloadhttps://bytlly.com/2uGjPt



              - -This is the first ever gameplay of COD MW 2 on PSP and at the end of the video you can see ... COD MW 2 - REVIEW AT THE END OF THE VIDEO -Vor 6 years 11 -In this video we take a look at PS Vita gameplay of Call of Duty: Modern Warfare 2. At the end of the video... -Call of Duty: Modern Warfare 2 - Best Call of Duty gameplay ever? 8a78ff9644
              -
              -
              -

              diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Bareilly Ki Barfi 1080p Movies Download Enjoy the Sweet and Spicy Bollywood Film.md b/spaces/tialenAdioni/chat-gpt-api/logs/Bareilly Ki Barfi 1080p Movies Download Enjoy the Sweet and Spicy Bollywood Film.md deleted file mode 100644 index 9d03b24a73869c9423dfdbb8b9916d88b5ff65fc..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Bareilly Ki Barfi 1080p Movies Download Enjoy the Sweet and Spicy Bollywood Film.md +++ /dev/null @@ -1,77 +0,0 @@ - -

              How to Download Bareilly Ki Barfi in Full HD Quality

              -

              Bareilly Ki Barfi is a 2017 Indian romantic comedy film directed by Ashwiny Iyer Tiwari and starring Kriti Sanon, Ayushmann Khurrana, and Rajkumar Rao. The film was a commercial success and received positive reviews from critics. It also won two Filmfare Awards for Best Director and Best Supporting Actor.

              -

              If you want to watch this movie in full HD quality, you have several options to choose from. You can either stream it online or download it from various sources. In this article, we will show you how to download Bareilly Ki Barfi in 1080p resolution using Google Drive links.

              -

              Bareilly Ki Barfi 1080p movies download


              Download Zip > https://urlcod.com/2uK1FJ



              -

              Steps to Download Bareilly Ki Barfi in 1080p

              -
                -
              1. Go to this website [^1^] that provides Google Drive links for Bareilly Ki Barfi in different formats and sizes.
              2. -
              3. Select the 1080p option that suits your preference. You can choose between x265 10bit HEVC, DTS-HD MA, REMUX, or x264 formats. The file sizes range from 5 GB to 31 GB.
              4. -
              5. Click on the Download button and you will be redirected to a page where you have to solve a captcha and wait for a few seconds.
              6. -
              7. After that, you will see a Google Drive link that you can copy and paste in your browser or use a download manager to download the file.
              8. -
              9. Enjoy watching Bareilly Ki Barfi in full HD quality on your device.
              10. -
              -

              Alternative Sources to Download Bareilly Ki Barfi in 1080p

              -

              If you are unable to access the Google Drive links or want to try other sources, you can also download Bareilly Ki Barfi from this website [^2^] that provides multiple download links for different resolutions and formats. You can choose between 480p, 720p, and 1080p options and between MKV and MP4 formats. The file sizes range from 350 MB to 5 GB.

              -

              To download Bareilly Ki Barfi from this website, follow these steps:

              -
                -
              1. Go to this website [^2^] and scroll down to the Download Links section.
              2. -
              3. Select the 1080p option that suits your preference. You can choose between different sources such as Google Drive, Mega, Mediafire, etc.
              4. -
              5. Click on the Download button and you will be redirected to a page where you have to solve a captcha and wait for a few seconds.
              6. -
              7. After that, you will see the download link that you can click on or use a download manager to download the file.
              8. -
              9. Enjoy watching Bareilly Ki Barfi in full HD quality on your device.
              10. -
              -

              Conclusion

              -

              Bareilly Ki Barfi is a fun and entertaining movie that you can watch with your friends or family. If you want to watch it in full HD quality, you can either stream it online or download it from various sources. We have shown you how to download Bareilly Ki Barfi in 1080p resolution using Google Drive links or alternative sources. We hope this article was helpful and informative. Happy watching!

              -

              Bareilly Ki Barfi full movie download in HD quality
              -Bareilly Ki Barfi 2017 Hindi movie 1080p BluRay
              -Bareilly Ki Barfi comedy romance film download
              -Watch Bareilly Ki Barfi online free streaming
              -Bareilly Ki Barfi movie download link Google Drive
              -Bareilly Ki Barfi 480p 300MB 720p 1GB 1080p 3.4GB
              -Download Bareilly Ki Barfi Hindi movie with English subtitles
              -Bareilly Ki Barfi movie review and ratings
              -Bareilly Ki Barfi cast and crew details
              -Bareilly Ki Barfi songs and music download
              -How to watch Bareilly Ki Barfi full movie online
              -Bareilly Ki Barfi movie trailer and teaser
              -Bareilly Ki Barfi movie scenes and dialogues
              -Bareilly Ki Barfi movie awards and nominations
              -Bareilly Ki Barfi movie box office collection and budget
              -Bareilly Ki Barfi movie based on a novel by Nitesh Tiwari
              -Bareilly Ki Barfi movie plot and summary
              -Bareilly Ki Barfi movie quotes and memes
              -Bareilly Ki Barfi movie behind the scenes and making
              -Bareilly Ki Barfi movie trivia and facts
              -Where to download Bareilly Ki Barfi movie legally
              -Bareilly Ki Barfi movie torrent magnet link
              -Bareilly Ki Barfi movie leaked by Tamilrockers and Filmywap
              -Bareilly Ki Barfi movie Netflix and Amazon Prime Video availability
              -Bareilly Ki Barfi movie best scenes and moments
              -Bareilly Ki Barfi movie wallpapers and posters
              -Bareilly Ki Barfi movie fan art and edits
              -Bareilly Ki Barfi movie analysis and interpretation
              -Bareilly Ki Barfi movie comparison with other movies
              -Bareilly Ki Barfi movie references and easter eggs
              -Bareilly Ki Barfi movie controversies and issues
              -Bareilly Ki Barfi movie deleted scenes and alternate endings
              -Bareilly Ki Barfi movie bloopers and mistakes
              -Bareilly Ki Barfi movie director's cut and extended version
              -Bareilly Ki Barfi movie audience reaction and feedback
              -Bareilly Ki Barfi movie merchandise and products
              -Bareilly Ki Barfi movie sequel and prequel possibilities
              -Bareilly Ki Barfi movie parodies and spoofs
              -Bareilly Ki Barfi movie inspired by true events and stories
              -Bareilly Ki Barfi movie social media trends and hashtags
              -How to download Bareilly Ki Barfi full HD 1080p for free
              -Watch or download Bareilly Ki Barfi on PogoLinks
              -Download or stream Bareilly Ki Barfi on DotMovies
              -Download or watch online Bareilly Ki Barfi on iBomma
              -Download or stream online Bareilly Ki Barfi on Tamilrockers
              -Watch or download online Bareilly Ki Barfi on Filmywap
              -Download or watch free online Bareilly Ki Barfi on Telegram
              -Watch or stream free online Bareilly Ki Barfi on YouTube
              -Download or stream HD quality Bareilly Ki Barfi on MX Player
              -Watch or download high quality Bareilly Ki Barfi on Hotstar

              e753bf7129
              -
              -
              \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Crack Palisade Decision Tools Suite Industrial 6 0.md b/spaces/tialenAdioni/chat-gpt-api/logs/Crack Palisade Decision Tools Suite Industrial 6 0.md deleted file mode 100644 index 0eb43a74eed9e007491fa37fc3fa1563b61279d8..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Crack Palisade Decision Tools Suite Industrial 6 0.md +++ /dev/null @@ -1,22 +0,0 @@ - -

              Why You Should Avoid Cracking Palisade Decision Tools Suite Industrial 6 0

              -

              Palisade Decision Tools Suite Industrial 6 0 is a powerful software package that helps you perform risk analysis and decision making under uncertainty in Microsoft Excel. It includes various tools such as @RISK for Monte Carlo simulation, PrecisionTree for decision trees, and RISKOptimizer for optimization under uncertainty[^1^].

              -

              However, some people may be tempted to crack the software and use it without paying for a license. This is not only illegal, but also risky and unethical. Here are some reasons why you should avoid cracking Palisade Decision Tools Suite Industrial 6 0:

              -

              Crack Palisade Decision Tools Suite Industrial 6 0


              Download File ⇒⇒⇒ https://urlcod.com/2uK793



              -
                -
              • It may harm your computer. Cracked software often comes with malware, viruses, or spyware that can infect your system and compromise your security and privacy. You may lose your data, expose your personal information, or damage your hardware[^2^].
              • -
              • It may not work properly. Cracked software may not be compatible with your system or Excel version. It may also have bugs, errors, or missing features that can affect the accuracy and reliability of your analysis. You may end up with wrong results, misleading conclusions, or wasted time[^2^].
              • -
              • It may violate the law. Cracking software is a form of piracy that infringes the intellectual property rights of the software developer. You may face legal consequences such as fines, lawsuits, or criminal charges if you are caught using cracked software[^2^].
              • -
              • It may harm your reputation. Cracking software is unethical and unprofessional. It shows a lack of respect for the work and innovation of the software developer. It also undermines the trust and credibility of your analysis and decision making. You may lose the respect of your peers, clients, or employers if they find out that you are using cracked software[^2^].
              • -
              -

              Therefore, it is better to avoid cracking Palisade Decision Tools Suite Industrial 6 0 and use it legally and responsibly. You can purchase a license from Palisade or its authorized resellers[^1^], or try a free trial version before buying[^3^]. By doing so, you can enjoy the full benefits of the software, such as:

              -
                -
              • It is safe and secure. Licensed software is free from malware, viruses, or spyware that can harm your computer. It also comes with technical support and updates from Palisade that can help you solve any issues or problems[^1^].
              • -
              • It works properly. Licensed software is compatible with your system and Excel version. It also has all the features and functions that you need for your analysis and decision making. You can rely on the accuracy and reliability of your results and conclusions[^1^].
              • -
              • It respects the law. Licensed software respects the intellectual property rights of the software developer. You are not breaking any laws or regulations by using licensed software[^1^].
              • -
              • It enhances your reputation. Licensed software is ethical and professional. It shows that you value the work and innovation of the software developer. It also boosts the trust and credibility of your analysis and decision making. You can impress your peers, clients, or employers with your high-quality work[^1^].
              • -
              -

              In conclusion, cracking Palisade Decision Tools Suite Industrial 6 0 is not worth it. It may harm your computer, not work properly, violate the law, or harm your reputation. Instead, you should use licensed software that is safe, secure, reliable, legal, and ethical. You can purchase a license from Palisade or its authorized resellers[^1^], or try a free trial version before buying[^3^]. This way, you can make better decisions with confidence using Palisade Decision Tools Suite Industrial 6 0.

              -

              e93f5a0c3f
              -
              -
              \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub A Free and Reliable Tool to Protect Your PC and AutoCAD Software from Any Infections.md b/spaces/tialenAdioni/chat-gpt-api/logs/DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub A Free and Reliable Tool to Protect Your PC and AutoCAD Software from Any Infections.md deleted file mode 100644 index ad4543f7b3d28afa4d3293e054a067ff5ee32497..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub A Free and Reliable Tool to Protect Your PC and AutoCAD Software from Any Infections.md +++ /dev/null @@ -1,203 +0,0 @@ - -

              DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub

              - -

              AutoCAD is a popular software for designing and drafting various projects, such as architecture, engineering, and construction. However, AutoCAD users may face some risks and challenges when it comes to protecting their PC and their AutoCAD files from viruses, malware, and ransomware. These malicious programs can infect your PC and damage your AutoCAD files, compromising your security, privacy, and productivity.

              - -

              Fortunately, there is a solution for this problem: DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub. This is a free and reliable anti-virus software that can protect your PC and your AutoCAD files from any threats and attacks. In this article, we will explain what DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub is, how it works, and how you can download it.

              -

              DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub


              DOWNLOADhttps://urlcod.com/2uK1F5



              - -

              What is DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub?

              - -

              DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub is a special anti-virus software that is designed for AutoCAD users. It is an epub file that contains the latest anti-virus definitions and updates that can scan and remove any viruses, malware, or ransomware that may affect your PC and your AutoCAD files.

              - -

              DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub is compatible with any version of AutoCAD, from AutoCAD 2000 to AutoCAD 2022. It can also work with any Windows operating system, from Windows XP to Windows 10. It does not require any installation or registration, and it does not interfere with your other anti-virus programs.

              -

              How to download AutoCAD anti-virus protection for free
              -Best AutoCAD anti-virus protection software in 2023
              -AutoCAD anti-virus protection guide and tips
              -Download AutoCAD anti-virus protection.epub from official site
              -AutoCAD anti-virus protection review and comparison
              -Why you need AutoCAD anti-virus protection for your projects
              -AutoCAD anti-virus protection features and benefits
              -Download AutoCAD anti-virus protection.epub with crack
              -AutoCAD anti-virus protection troubleshooting and support
              -AutoCAD anti-virus protection alternatives and competitors
              -How to install AutoCAD anti-virus protection on your device
              -AutoCAD anti-virus protection discount and coupon codes
              -AutoCAD anti-virus protection testimonials and feedback
              -Download AutoCAD anti-virus protection.epub safely and securely
              -AutoCAD anti-virus protection system requirements and compatibility
              -How to update AutoCAD anti-virus protection to the latest version
              -AutoCAD anti-virus protection FAQs and answers
              -Download AutoCAD anti-virus protection.epub for Mac or Windows
              -AutoCAD anti-virus protection tutorial and training
              -How to uninstall AutoCAD anti-virus protection from your device
              -Download AutoCAD anti-virus protection.epub with license key
              -AutoCAD anti-virus protection pros and cons
              -How to use AutoCAD anti-virus protection effectively and efficiently
              -AutoCAD anti-virus protection free trial and demo
              -Download AutoCAD anti-virus protection.epub without registration or subscription
              -How to backup and restore your data with AutoCAD anti-virus protection
              -AutoCAD anti-virus protection performance and speed
              -Download AutoCAD anti-virus protection.epub from torrent or mirror sites
              -How to customize and optimize your settings with AutoCAD anti-virus protection
              -AutoCAD anti-virus protection awards and recognition
              -How to scan and clean your files with AutoCAD anti-virus protection
              -Download AutoCAD anti-virus protection.epub in PDF or other formats
              -How to integrate and sync your data with AutoCAD anti-virus protection
              -AutoCAD anti-virus protection guarantee and refund policy
              -Download AutoCAD anti-virus protection.epub with bonus content or extras
              -How to fix common errors and issues with AutoCAD anti-virus protection
              -Download AutoCAD anti-virus protection.epub for beginners or experts
              -How to access and manage your account with AutoCAD anti-virus protection
              -AutoCAD anti-virus protection customer service and contact information
              -Download AutoCAD anti-virus protection.epub for personal or professional use
              -How to share and collaborate your data with AutoCAD anti-virus protection
              -Download AutoCAD anti-virus protection.epub in different languages or regions
              -How to upgrade or downgrade your plan with AutoCAD anti-virus protection
              -AutoCAD anti-virus protection privacy and security policy
              -Download AutoCAD anti-virus protection.epub with lifetime access or updates
              -How to export and import your data with AutoCAD anti-virus protection
              -Download AutoCAD anti-virus protection.epub for mobile or desktop devices
              -How to monitor and control your data with AutoCAD anti-virus protection
              -Download AUTO CAD ANTI-VIRUS PROTECTION.EPUB FOR FREE NOW!

              - -

              How does DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub work?

              - -

              DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub works in a simple and effective way. All you need to do is to download the epub file from a trusted source and open it with an epub reader. The epub reader will automatically launch the anti-virus program and scan your PC and your AutoCAD files for any threats.

              - -

              If the anti-virus program detects any viruses, malware, or ransomware, it will alert you and ask you to choose an action: delete, quarantine, or ignore. You can also view the details of the detected threats and their locations. The anti-virus program will also repair any damaged or corrupted AutoCAD files and restore them to their original state.

              - -

              How to download DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub?

              - -

              DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub is available for free on various websites that offer anti-virus software for AutoCAD users. However, you should be careful and choose a reputable and secure website that can guarantee the quality and safety of the epub file. Some of the websites that provide DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub are:

              - -
                -
              • AutoCAD Anti-Virus Protection Download: This website offers the most popular version of DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub among AutoCAD users. You can download the epub file from this link: https://autocad-anti-virus-protection.software.informer.com/
              • -
              • The Benefits of Downloading AutoCAD Anti-Virus Protection.epub for Your PC and AutoCAD Software: This website provides a detailed guide on how to download and use DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub for your PC and your AutoCAD software. You can download the epub file from this link: https://www.local1860.com/forum/ask-us-anything/the-benefits-of-downloading-autocad-anti-virus-protection-epub-for-your-pc-and-autocad-software
              • -
              • Microsoft Sway: This website offers a simple and convenient way to download DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub with just one click. You can download the epub file from this link: https://sway.office.com/ego4ImC6BtJsiIDG
              • -
              - -

              Conclusion

              - -

              DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub is a free and reliable anti-virus software that can protect your PC and your AutoCAD files from viruses, malware, and ransomware. It is an epub file that contains the latest anti-virus definitions and updates that can scan and remove any threats and attacks. It is compatible with any version of AutoCAD and any Windows operating system. It does not require any installation or registration, and it does not interfere with your other anti-virus programs.

              - -

              If you are an AutoCAD user who wants to secure your PC and your AutoCAD files from any risks and challenges, you should download DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub today. It is a simple and effective solution that can save you time, money, and trouble.

              -

              How to Update DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub

              - -

              DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub is a dynamic and updated anti-virus software that can keep your PC and your AutoCAD files safe from the latest threats and attacks. However, you need to make sure that you have the most recent version of the epub file, as new viruses, malware, and ransomware are constantly being developed and released.

              - -

              To update DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub, you need to follow these steps:

              - -
                -
              1. Visit the website where you downloaded the epub file and check if there is a newer version available. You can also subscribe to their newsletter or follow their social media accounts to get notified of any updates.
              2. -
              3. If there is a newer version available, download it and replace the old epub file with the new one. You can also delete the old epub file to save space on your PC.
              4. -
              5. Open the new epub file with an epub reader and run the anti-virus program as usual. The anti-virus program will automatically update its definitions and scan your PC and your AutoCAD files for any threats.
              6. -
              - -

              By updating DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub regularly, you can ensure that your PC and your AutoCAD files are always protected from any risks and challenges.

              - -

              How to Uninstall DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub

              - -

              DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub is a simple and convenient anti-virus software that does not require any installation or registration. However, if you want to uninstall it for any reason, such as switching to another anti-virus program or freeing up space on your PC, you can do so easily and quickly.

              - -

              To uninstall DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub, you need to follow these steps:

              - -
                -
              1. Close the epub reader that is running the anti-virus program. Make sure that the anti-virus program is not scanning or removing any threats at the moment.
              2. -
              3. Locate the epub file on your PC and delete it. You can also move it to another folder or device if you want to keep it for future use.
              4. -
              5. Empty your recycle bin or trash folder to permanently remove the epub file from your PC.
              6. -
              - -

              By uninstalling DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub, you can remove the anti-virus software from your PC without leaving any traces or residues.

              -

              How to Troubleshoot DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub

              - -

              DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub is a simple and convenient anti-virus software that can protect your PC and your AutoCAD files from viruses, malware, and ransomware. However, you may encounter some issues or errors when using it, such as:

              - -
                -
              • The epub file does not open or run properly.
              • -
              • The anti-virus program does not scan or remove any threats.
              • -
              • The anti-virus program deletes or quarantines your legitimate AutoCAD files.
              • -
              • The anti-virus program slows down your PC or your AutoCAD software.
              • -
              - -

              To troubleshoot DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub, you need to follow these steps:

              - -
                -
              1. Check if you have the latest version of the epub file and the epub reader. You can download them from the websites mentioned above.
              2. -
              3. Check if you have any other anti-virus programs running on your PC. You may need to disable or uninstall them to avoid any conflicts or interferences.
              4. -
              5. Check if you have any corrupted or infected AutoCAD files on your PC. You may need to repair or delete them to prevent any further damage.
              6. -
              7. Check if you have enough space and memory on your PC. You may need to free up some space or close some programs to improve your performance.
              8. -
              - -

              By troubleshooting DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub, you can fix any issues or errors that may affect your PC and your AutoCAD files.

              - -

              How to Review DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub

              - -

              DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub is a free and reliable anti-virus software that can protect your PC and your AutoCAD files from viruses, malware, and ransomware. However, you may want to share your feedback or opinion about it, such as:

              - -
                -
              • The pros and cons of the anti-virus software.
              • -
              • The features and functions of the anti-virus software.
              • -
              • The performance and results of the anti-virus software.
              • -
              • The suggestions and recommendations for the anti-virus software.
              • -
              - -

              To review DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub, you need to follow these steps:

              - -
                -
              1. Visit the website where you downloaded the epub file and look for a review section or a comment box. You can also visit other websites that offer reviews or ratings for anti-virus software for AutoCAD users.
              2. -
              3. Write a clear and honest review of DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub. You can use a rating system or a star system to indicate your level of satisfaction. You can also provide some examples or evidence to support your claims.
              4. -
              5. Submit your review and share it with other AutoCAD users who may benefit from it. You can also read other reviews and learn from their experiences and insights.
              6. -
              - -

              By reviewing DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub, you can help other AutoCAD users make informed decisions about their PC and their AutoCAD files.

              -

              How to Share DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub

              - -

              DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub is a free and reliable anti-virus software that can protect your PC and your AutoCAD files from viruses, malware, and ransomware. However, you may want to share it with other AutoCAD users who may need it or appreciate it, such as:

              - -
                -
              • Your friends or colleagues who work with AutoCAD software.
              • -
              • Your clients or partners who receive or send AutoCAD files.
              • -
              • Your online community or network of AutoCAD enthusiasts.
              • -
              - -

              To share DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub, you need to follow these steps:

              - -
                -
              1. Visit the website where you downloaded the epub file and copy the download link. You can also upload the epub file to a cloud storage service or a file sharing platform and generate a shareable link.
              2. -
              3. Send the link to the people you want to share the epub file with. You can use email, social media, instant messaging, or any other communication channel.
              4. -
              5. Explain what DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub is and how it works. You can also provide some tips or instructions on how to use it.
              6. -
              - -

              By sharing DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub, you can help other AutoCAD users protect their PC and their AutoCAD files from any risks and challenges.

              - -

              How to Support DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub

              - -

              DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub is a free and reliable anti-virus software that can protect your PC and your AutoCAD files from viruses, malware, and ransomware. However, you may want to support the development and maintenance of this software, as well as the website that provides it, such as:

              - -
                -
              • The author or publisher of the epub file who created and updated the anti-virus software.
              • -
              • The website or platform that hosted and distributed the epub file.
              • -
              • The epub reader or application that opened and ran the epub file.
              • -
              - -

              To support DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub, you need to follow these steps:

              - -
                -
              1. Visit the website or platform where you downloaded the epub file and look for a donation or support section. You can also visit the author's or publisher's website or social media accounts and look for a similar section.
              2. -
              3. Make a donation or contribution of any amount that you can afford and appreciate. You can use PayPal, credit card, cryptocurrency, or any other payment method.
              4. -
              5. Leave a positive review or feedback of DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub. You can also rate or recommend it to other AutoCAD users.
              6. -
              - -

              By supporting DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub, you can show your gratitude and appreciation for this software and its providers.

              -

              Conclusion

              - -

              DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub is a free and reliable anti-virus software that can protect your PC and your AutoCAD files from viruses, malware, and ransomware. It is an epub file that contains the latest anti-virus definitions and updates that can scan and remove any threats and attacks. It is compatible with any version of AutoCAD and any Windows operating system. It does not require any installation or registration, and it does not interfere with your other anti-virus programs.

              - -

              If you are an AutoCAD user who wants to secure your PC and your AutoCAD files from any risks and challenges, you should download DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub today. It is a simple and effective solution that can save you time, money, and trouble.

              - -

              You can also update, uninstall, share, and support DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub by following the steps mentioned above. You can also review, troubleshoot, and learn more about this software by visiting the websites that provide it.

              - -

              DOWNLOAD AUTOCAD ANTI-VIRUS PROTECTION.epub is a valuable and useful software for any AutoCAD user who cares about their PC and their AutoCAD files.

              679dcb208e
              -
              -
              \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Download Foxit PDF Editor with Crack The Best Way to Edit PDF Files.md b/spaces/tialenAdioni/chat-gpt-api/logs/Download Foxit PDF Editor with Crack The Best Way to Edit PDF Files.md deleted file mode 100644 index c6aa023d65beb5de714e9f2058d41f5df000f4e5..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Download Foxit PDF Editor with Crack The Best Way to Edit PDF Files.md +++ /dev/null @@ -1,60 +0,0 @@ -
              -

              Download Foxit PDF Editor Pro with Crack: A Comprehensive Guide

              -

              If you are looking for a powerful and versatile PDF solution that can handle all your needs and requirements, you might want to consider Foxit PDF Editor Pro. This software is one of the best PDF editors on the market, offering a range of features and tools that can help you create, edit, manage, and share PDF documents with ease and efficiency. However, the software is not free, and you might need to pay a hefty price to get the full version. That's why some people opt to download Foxit PDF Editor Pro with crack, which is a way of bypassing the activation process and getting the software for free.

              -

              download foxit pdf editor with crack


              Download File 🗹 https://urlcod.com/2uK67d



              -

              What is Foxit PDF Editor Pro?

              -

              Foxit PDF Editor Pro is a professional PDF software that was formerly known as Foxit PhantomPDF Business. It is designed to cater to the needs and requirements of businesses and personal users who work with PDF files on a regular basis. It has an intuitive, user-friendly interface that makes it easy to use and adopt by users of all levels. It also has an extensive set of advanced PDF editing and security tools and applications that allow you to do more with your PDFs than ever before.

              -

              Some of the features of Foxit PDF Editor Pro are:

              -
                -
              • Create, Edit, Manage, and Share PDF Documents: You can create PDF files from scratch or from various file formats, such as Word, Excel, PowerPoint, HTML, etc. You can also edit existing PDF files by adding, deleting, moving, resizing, or rotating text, images, objects, annotations, etc. You can also manage your PDF files by organizing them into folders, adding bookmarks, comments, stamps, watermarks, headers, footers, etc. You can also share your PDF files with others by emailing them directly from the software or uploading them to cloud services like Google Drive, Dropbox, OneDrive, etc.
              • -
              • Advanced Text Editing: You can edit the text in your PDF files with ease and accuracy. You can change the font type, size, color, alignment, spacing, etc. You can also use the spell checker, find and replace tool, word count tool, etc. You can also use the OCR feature to convert scanned or image-based PDF files into editable text.
              • -
              • Advanced Object Editing: You can edit the objects in your PDF files with more control and flexibility. You can add or remove images, shapes, forms fields, links, etc. You can also align, distribute, group, or layer objects as you wish. You can also use the crop tool to remove unwanted parts of your PDF pages.
              • -
              • Convert PDF to Microsoft Office and Vice versa: You can convert your PDF files to various Microsoft Office formats such as Word documents (.docx), Excel spreadsheets (.xlsx), PowerPoint presentations (.pptx), etc. You can also convert your Office files to PDF format with a single click.
              • -
              • Export PDF to Word and Other Formats: You can export your PDF files to other popular file formats such as HTML (.html), RTF (.rtf), TXT (.txt), JPEG (.jpg), PNG (.png), TIFF (.tif), etc.
              • -
              • Third Party Integration: You can integrate Foxit PDF Editor Pro with other third-party applications such as Evernote,

                -

                Foxit PDF Editor Pro is a professional PDF software that can help you work with PDF files more efficiently and effectively. It offers a range of features and tools that can help you create, edit, manage, and share PDF files with ease and efficiency. However, the software is not free, and you might need to pay a hefty price to get the full version.

                -

                Downloading Foxit PDF Editor Pro with crack is a risky and illegal way of getting the software for free. You might end up with a corrupted or infected file that can harm your computer or compromise your data. You might also face legal consequences for violating the software license agreement and intellectual property rights.

                -

                Therefore, we recommend that you use the free trial version or the free online version of Foxit PDF Editor Pro instead. They are legal and safe ways of getting Foxit PDF Editor Pro for free without using any crack files. They also allow you to enjoy the benefits of using Foxit PDF Editor Pro without risking your computer or data.

                -

                how to get foxit pdf editor for free with crack
                -foxit pdf editor full version cracked download
                -download foxit pdf editor pro with crack
                -foxit pdf editor cracked version free download
                -foxit pdf editor license key crack download
                -download foxit pdf editor with patch
                -foxit pdf editor activation key crack download
                -download foxit pdf editor with serial key
                -foxit pdf editor crack file download
                -download foxit pdf editor with keygen
                -foxit pdf editor crack download for windows 10
                -download foxit pdf editor portable with crack
                -foxit pdf editor crack download for mac
                -download foxit pdf editor with registration code
                -foxit pdf editor crack download for linux
                -download foxit pdf editor full crack 64 bit
                -foxit pdf editor crack download for android
                -download foxit pdf editor full crack 32 bit
                -foxit pdf editor crack download for ios
                -download foxit pdf editor latest version with crack
                -foxit pdf editor free download with crack for windows 7
                -download foxit advanced pdf editor with crack
                -foxit pdf editor free download with crack for windows 8.1
                -download foxit phantompdf editor with crack
                -foxit pdf editor free download with crack for windows xp
                -download foxit reader and pdf editor with crack
                -foxit pdf editor free download with crack for windows vista
                -download foxit standard business pdf editor with crack
                -foxit pdf editor free download with crack for windows 8
                -download foxit mobilepdf business -pdf editor with crack
                -foxit pdf editor free download with crack for windows 11
                -download foxit advanced bates numbering -pdf editor with crack
                -foxit pdf editor free download with crack for windows server 2012
                -download foxit redaction -pdf editor with crack
                -foxit pdf editor free download with crack for windows server 2016
                -download foxit ocr -pdf editor with crack
                -foxit pdf editor free download with crack for windows server 2019
                -download foxit scan -pdf editor with crack
                -foxit pdf editor free download with crack for windows server 2022
                -download foxit sign -pdf editor with crack
                -how to install and activate foxit pdf editor with crack

                679dcb208e
                -
                -
                \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/AR Plan 3D APK Full - The Easiest Way to Create and Share Your 3D Floor Plans.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/AR Plan 3D APK Full - The Easiest Way to Create and Share Your 3D Floor Plans.md deleted file mode 100644 index 73a8b2e08e1955a9fc610aecaac2bf194e45e916..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/AR Plan 3D APK Full - The Easiest Way to Create and Share Your 3D Floor Plans.md +++ /dev/null @@ -1,91 +0,0 @@ - -

                Arplan 3D APK Full: A Revolutionary Measurement App for Your Android Device

                -

                Have you ever wished you could measure rooms and objects with your smartphone? Have you ever wanted to create floor plans and decorate them with furniture and accessories? Have you ever needed to share your projects with others in different formats? If you answered yes to any of these questions, then you need to check out Arplan 3D APK Full, a innovative measurement app that uses augmented reality (AR) and lidar scanner technology to turn your Android device into a powerful tool for interior design and architecture.

                -

                What is Arplan 3D APK Full?

                -

                Arplan 3D APK Full is a modified version of the original Arplan 3D app that unlocks all the premium features and removes the ads. It is a measurement app that allows you to measure rooms and objects with your AR camera, create floor plans, add furniture and decorations, and export or share your projects in various formats. It is compatible with most Android devices that support ARCore or have a lidar scanner.

                -

                arplan 3d apk full


                DOWNLOADhttps://bltlly.com/2uOlRR



                -

                Features of Arplan 3D APK Full

                -

                Measure rooms and objects with AR camera

                -

                With Arplan 3D APK Full, you can measure any room or object with your AR camera. Simply point your device at the target and tap the screen to start measuring. You can measure length, width, height, area, perimeter, volume, angle, and more. You can also switch between metric and imperial units.

                -

                Create floor plans and export them in PDF, JPG, or DXF formats

                -

                After measuring a room or object, you can create a floor plan based on the measurements. You can adjust the walls, doors, windows, and other elements of the floor plan. You can also add dimensions, labels, notes, and symbols to your floor plan. You can export your floor plan in PDF, JPG, or DXF formats for printing or editing.

                -

                Add furniture and decorations to your floor plans

                -

                To make your floor plans more realistic and attractive, you can add furniture and decorations from a large catalog of items. You can choose from different categories such as living room, bedroom, kitchen, bathroom, office, garden, etc. You can also change the color, size, rotation, and position of the items. You can view your floor plan in 2D or 3D mode.

                -

                Share your projects with others via email or social media

                -

                If you want to share your projects with others, you can do so easily with Arplan 3D APK Full. You can send your projects via email or social media platforms such as Facebook, WhatsApp, Instagram, etc. You can also save your projects to your device or cloud storage for future use.

                -

                How to Download and Install Arplan 3D APK Full?

                -

                If you are interested in downloading and installing Arplan 3D APK Full on your Android device, you can follow these simple steps:

                -

                Step 1: Enable Unknown Sources on your Android device

                -

                Before you can install Arplan 3D APK Full, you need to enable Unknown Sources on your Android device. This will allow you to install apps from sources other than the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on.

                -

                arplan 3d tape measure ruler apk
                -arplan 3d floor planner apk
                -arplan 3d pro apk download
                -arplan 3d mod apk unlocked
                -arplan 3d apk latest version
                -arplan 3d measurement app apk
                -arplan 3d augmented reality apk
                -arplan 3d lidar scanner apk
                -arplan 3d camera sensor apk
                -arplan 3d room measurement apk
                -arplan 3d android app apk
                -arplan 3d free download apk
                -arplan 3d premium apk cracked
                -arplan 3d hack apk modded
                -arplan 3d update apk new
                -arplan 3d offline apk install
                -arplan 3d online apk play
                -arplan 3d review apk rating
                -arplan 3d tutorial apk guide
                -arplan 3d features apk benefits
                -arplan 3d alternatives apk comparison
                -arplan 3d tips apk tricks
                -arplan 3d support apk help
                -arplan 3d feedback apk comments
                -arplan 3d faq apk questions
                -arplan 3d demo apk trial
                -arplan 3d license apk purchase
                -arplan 3d refund apk policy
                -arplan 3d privacy apk security
                -arplan 3d terms apk conditions
                -arplan 3d bugs apk issues
                -arplan 3d fixes apk solutions
                -arplan 3d improvements apk enhancements
                -arplan 3d suggestions apk ideas
                -arplan 3d testimonials apk reviews
                -arplan 3d case studies apk examples
                -arplan 3d success stories apk results
                -arplan 3d best practices apk tips
                -arplan 3d use cases apk scenarios
                -arplan 3d benefits apk advantages
                -arplan 3d drawbacks apk disadvantages
                -arplan 3d pros and cons apk comparison
                -arplan 3d requirements apk specifications
                -arplan 3d compatibility apk devices
                -arplan 3d performance apk speed
                -arplan 3d accuracy apk precision
                -arplan 3d reliability apk quality
                -arplan 3d usability apk ease of use

                -

                Step 2: Download the Arplan 3D APK file from a trusted source

                -

                Next, you need to download the Arplan 3D APK file from a trusted source. You can search for it on Google or use the link provided below. Make sure you download the latest version of the app and avoid any fake or malicious websites.

                -

                Download Arplan 3D APK Full here

                -

                Step 3: Locate and install the Arplan 3D APK file on your device

                -

                After downloading the Arplan 3D APK file, you need to locate and install it on your device. You can use a file manager app or your device's default file explorer to find the file. Tap on the file and follow the instructions to install it.

                -

                Step 4: Launch the app and enjoy its features

                -

                Once the installation is complete, you can launch the app and enjoy its features. You can start measuring rooms and objects, creating floor plans, adding furniture and decorations, and sharing your projects with others.

                -

                Why Choose Arplan 3D APK Full?

                -

                You might be wondering why you should choose Arplan 3D APK Full over other measurement apps available on the market. Here are some of the benefits of using this app:

                -

                Benefits of Arplan 3D APK Full

                -

                Save time and money by measuring rooms and objects with your smartphone

                -

                With Arplan 3D APK Full, you don't need to buy or carry any expensive or bulky measurement tools. You can use your smartphone as a measurement device and get accurate results in seconds. You can also save time by avoiding manual calculations and errors.

                -

                Create professional and accurate floor plans with ease

                -

                Arplan 3D APK Full allows you to create professional and accurate floor plans with ease. You can adjust the walls, doors, windows, and other elements of the floor plan according to your measurements. You can also add dimensions, labels, notes, and symbols to your floor plan for clarity and precision.

                -

                Customize your floor plans with furniture and decorations from a large catalog

                -

                To make your floor plans more realistic and attractive, you can customize them with furniture and decorations from a large catalog of items. You can choose from different categories such as living room, bedroom, kitchen, bathroom, office, garden, etc. You can also change the color, size, rotation, and position of the items according to your preference.

                -

                Share your projects with clients, friends, or family in various formats

                -

                If you want to share your projects with clients, friends, or family, you can do so easily with Arplan 3D APK Full. You can export your projects in PDF, JPG, or DXF formats for printing or editing. You can also send your projects via email or social media platforms such as Facebook, WhatsApp, Instagram, etc.

                -

                Conclusion

                -

                Arplan 3D APK Full is a revolutionary measurement app that uses augmented reality (AR) and lidar scanner technology to turn your Android device into a powerful tool for interior design and architecture. It allows you to measure rooms and objects with your AR camera, create floor plans, add furniture and decorations, and export or share your projects in various formats. It is compatible with most Android devices that support ARCore or have a lidar scanner. It is also easy to download and install on your device.

                -

                If you are looking for a measurement app that can help you with your interior design and architecture projects, you should definitely try Arplan 3D APK Full. It is a modified version of the original Arplan 3D app that unlocks all the premium features and removes the ads. It is one of the best measurement apps available on the market today.

                - FAQs Q: Is Arplan 3D APK Full safe to use? A: Yes, Arplan 3D APK Full is safe to use as long as you download it from a trusted source. It does not contain any viruses or malware that can harm your device. Q: Is Arplan 3D APK Full free to use? A: Yes, Arplan 3D APK Full is free to use. It is a modified version of the original Arplan 3D app that unlocks all the premium features and removes the ads. You don't need to pay anything to use this app. Q: How accurate is Arplan 3D APK Full? A: Arplan 3D APK Full is very accurate as it uses augmented reality (AR) and lidar scanner technology to measure rooms and objects. It also allows you to adjust the walls, doors, windows, and other elements of the floor plan according to your measurements. However, you should always double-check your measurements before using them for any purpose. Q: What devices are compatible with Arplan 3D APK Full? A: Arplan 3D APK Full is compatible with most Android devices that support ARCore or have a lidar scanner. You can check if your device supports ARCore here: https://developers.google.com/ar/discover/supported-devices. You can also check if your device has a lidar scanner by looking for a small black dot next to the camera lens. Q: What formats can I export or share my projects in? A: You can export or share your projects in PDF, JPG, or DXF formats. PDF and JPG formats are suitable for printing or viewing on any device. DXF format is suitable for editing on CAD software such as AutoCAD or SketchUp. Q: How can I contact the developers of Arplan 3D APK Full? A: You can contact the developers of Arplan 3D APK Full by visiting their website: https://arplan.app/. You can also follow them on Facebook, Twitter, Instagram, or YouTube for updates and news.

                401be4b1e0
                -
                -
                \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Flexisign Pro 10 Crack LINK.md b/spaces/tioseFevbu/cartoon-converter/scripts/Flexisign Pro 10 Crack LINK.md deleted file mode 100644 index 8335ec24b9901ae7132e17d9abf4e28482b4b76e..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Flexisign Pro 10 Crack LINK.md +++ /dev/null @@ -1,202 +0,0 @@ -
                -

                Flexisign Pro 10 Crack: How to Download and Install It for Free

                -

                If you are looking for a powerful graphic designing software that can help you create professional-looking logos and vector graphics, you might have heard of Flexisign Pro 10. This is a flexible utility that allows graphic designers to manage graphics effectively. It allows users to easily design direct-to-vinyl signs with multiple colours, text, and vector graphics. It also offers a complete set of design, cutting, RIPing, and printing tools for maximum productivity.

                -

                However, Flexisign Pro 10 is not a free software. It requires a license key or activation code to use it fully. The official price of Flexisign Pro 10 is $3,995, which is quite expensive for many users. That's why some people want to use a cracked version of Flexisign Pro 10. A cracked version is a modified or hacked version of the software that bypasses the security measures and allows users to use it without paying anything.

                -

                Flexisign Pro 10 Crack


                Download Zip »»» https://urlcod.com/2uHyeC



                -

                But is it really worth it to use a cracked version of Flexisign Pro 10? What are the risks and disadvantages of using a cracked version of Flexisign Pro 10? And how can you download and install it for free? In this article, we will answer these questions and provide you with a step-by-step guide on how to download and install Flexisign Pro 10 crack for free. But before we do that, let's take a look at some of the features of Flexis

                Features of Flexisign Pro 10

                -

                Flexisign Pro 10 is a comprehensive graphic designing software that can help you create stunning logos and vector graphics. It has many features that make it a versatile and powerful tool for graphic designers. Some of the features of Flexisign Pro 10 are:

                -
                  -
                • Full graphic design: Flexisign Pro 10 allows you to create and edit vector graphics, images, text, and shapes with ease. You can use various tools such as pen, pencil, brush, eraser, gradient, fill, transparency, and more to customize your graphics. You can also import and export graphics from different formats such as EPS, PDF, AI, JPG, PNG, and more.
                • -
                • Text serialization: Flexisign Pro 10 enables you to create and edit text with different fonts, sizes, colours, styles, and effects. You can also use the text serialization feature to generate multiple copies of text with sequential numbers or letters. This is useful for creating labels, stickers, badges, and other items that require serial numbers.
                • -
                • Colour tracing: Flexisign Pro 10 can help you convert bitmap images into vector graphics with the colour tracing feature. You can use this feature to trace logos, photos, sketches, and other images into high-quality vector graphics that can be edited and scaled without losing quality.
                • -
                • Cutting: Flexisign Pro 10 can also help you cut your graphics into vinyl or other materials with the cutting feature. You can use this feature to create signs, decals, banners, stickers, and other products that require cutting. You can also use the contour cutting feature to cut around the edges of your graphics with precision.
                • -
                • RIPing: Flexisign Pro 10 can also help you print your graphics with the RIPing feature. RIPing stands for raster image processing, which is a process of converting vector graphics into bitmap images that can be printed by printers. You can use this feature to print your graphics with high quality and speed.
                • -
                • Printing: Flexisign Pro 10 can also help you print your graphics with the printing feature. You can use this feature to print your graphics on different media such as paper, vinyl, canvas, fabric, and more. You can also use the print preview feature to check how your graphics will look before printing them.
                • -
                -

                These are some of the features of Flexisign Pro 10 that make it a great graphic designing software. However, as we mentioned earlier, Flexisign Pro 10 is not a free software. It requires a license key or activation code to use it fully. If you don't have a license key or activation code, you might be tempted to use a cracked version of Flexisign Pro 10. But is it really a good idea? Let's find out in the next section.

                -

                Risks and Disadvantages of Using Flexisign Pro 10 Crack

                -

                A cracked version of Flexisign Pro 10 is a modified or hacked version of the software that bypasses the security measures and allows users to use it without paying anything. It might sound like a good deal, but it comes with many risks and disadvantages that you should be aware of before using it. Some of the risks and disadvantages of using Flexisign Pro 10 crack are:

                -

                -
                  -
                • Illegal: Using a cracked version of Flexisign Pro 10 is illegal and unethical. It violates the terms and conditions of the software and infringes the intellectual property rights of the developers. It also deprives them of their rightful income and discourages them from creating more quality products. If you are caught using a cracked version of Flexisign Pro 10, you might face legal consequences such as fines or lawsuits.
                • -
                • Unstable: Using a cracked version of Flexisign Pro 10 is unstable and unreliable. It might not work properly or crash frequently due to bugs or errors. It might also lack some features or functions that are available in the original version of the software. It might also be incompatible with some devices or systems that require updates or patches.
                • -
                • Unsafe: Using a cracked version of Flexisign Pro 10 is unsafe and risky. It might contain malware, viruses, spyware, or other threats that can harm your device or system. It might also expose your personal or financial information to hackers or cybercriminals who can use it for malicious purposes. It might also damage your files or data that are stored on your device or system.
                • -
                • Unsupported: Using a cracked version of Flexisign Pro 10 is unsupported and unhelpful. It does not come with any technical support or customer service from the developers or the official website. It does not receive any updates or patches that can fix bugs or errors, improve performance or compatibility, or add new features or functions. It also does not have any user manuals or tutorials that can help you learn how to use the software effectively.
                • -
                -

                These are some of the risks and disadvantages of using Flexisign Pro 10 crack that you should consider before using it. As you can see, using a cracked version of Flexisign Pro 10 is not worth it. It might save you some money in the short term, but it will cost you more in the long term. It will also compromise your quality, security, and integrity as a graphic designer.

                -

                So, what should you do if you want to use Flexisign Pro 10 legally and ethically? Well, you have two options. You can either buy the original version of Flexisign Pro 10 from the official website or an authorized dealer, or you can use an alternative software that is free or cheaper than Flexisign Pro 10. We will discuss these options in the conclusion section of this article.

                -

                But before we do that, let's assume that you still want to use Flexisign Pro 10 crack for free. How can you download and install it? Let's find out in the next section.

                -

                How to Download Flexisign Pro 10 Crack

                -

                If you are determined to use Flexisign Pro 10 crack for free, you will need to download it from a source or a website that offers it for free download. However, this is not an easy task. There are many sources and websites that claim to offer Flexisign Pro 10 crack for free download, but not all of them are reliable and safe. Some of them might be fake, scam, or malicious. They might trick you into downloading something else, such as malware, viruses, or other threats. They might also ask you to complete surveys, register accounts, provide personal or financial information, or pay money to access the download link.

                -

                So, how can you choose a reliable and safe source for downloading Flexisign Pro 10 crack? Here are some tips that can help you:

                -
                  -
                • Do your research: Before downloading anything from a source or a website, do some research about it. Check its reputation, reviews, ratings, comments, feedbacks, and testimonials from other users. See if there are any complaints, reports, warnings, or red flags about it. Avoid sources or websites that have negative or suspicious reputation, reviews, ratings, comments, feedbacks, or testimonials.
                • -
                • Verify the download link: Before clicking on the download link, verify it. See if it matches the name and description of the file that you want to download. See if it has a valid file extension and size. See if it has a secure HTTPS protocol and a green padlock icon in the address bar. Avoid download links that have mismatched names and descriptions, invalid file extensions and sizes, insecure HTTP protocols, or red warning icons in the address bar.
                • -
                • Scan the file: Before opening or running the file that you have downloaded, scan it with a reputable antivirus software. See if it detects any malware, viruses, spyware, or other threats in the file. Delete the file immediately if it contains any threats.
                • -
                -

                These are some of the tips that can help you choose a reliable and safe source for downloading Flexisign Pro 10 crack. However, even if you follow these tips, there is no guarantee that you will find a working and clean Flexisign Pro 10 crack for free download. Most of the sources and websites that offer Flexisign Pro 10 crack for free download are unreliable and unsafe.

                -

                Therefore, we do not recommend using Flexisign Pro 10 crack for free download at all. It is better to use Flexisign Pro 10 legally and ethically than to use Flexisign Pro 10 crack illegally and unethically.

                -

                But if you still insist on using Flexisign Pro 10 crack for free download this is not a simple or straightforward process. You will need to follow some steps and instructions to install Flexisign Pro 10 crack successfully. You will also need to consider some system requirements and compatibility issues before installing Flexisign Pro 10 crack. Here are some of the steps and instructions that you will need to follow to install Flexisign Pro 10 crack:

                -
                  -
                • Check the system requirements: Before installing Flexisign Pro 10 crack, you will need to check the system requirements of the software. You will need to make sure that your device or system meets the minimum or recommended specifications for running Flexisign Pro 10 crack. According to the official website of Flexisign Pro 10, the system requirements are: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                  Minimum RequirementsRecommended Requirements
                  Windows XP SP3 or higherWindows 7, 8, or 10
                  Pentium 4 or higher processorCore i5 or higher processor
                  1 GB RAM4 GB RAM or higher
                  2 GB free hard disk space4 GB free hard disk space or higher
                  1024 x 768 screen resolution1280 x 1024 screen resolution or higher
                  DVD driveDVD drive
                  Internet connection (for updates and activation)Internet connection (for updates and activation)
                  - If your device or system does not meet these system requirements, you might not be able to install or run Flexisign Pro 10 crack properly.
                • -
                • Disable Windows Defender and other antivirus software: Before installing Flexisign Pro 10 crack, you will need to disable Windows Defender and other antivirus software on your device or system. This is because Windows Defender and other antivirus software might detect Flexisign Pro 10 crack as a threat and block or delete it. To disable Windows Defender, you can follow these steps:
                    -
                  1. Open the Start menu and type "Windows Security" in the search box.
                  2. -
                  3. Select "Windows Security" from the results and open it.
                  4. -
                  5. Select "Virus & threat protection" from the left pane.
                  6. -
                  7. Select "Manage settings" under "Virus & threat protection settings".
                  8. -
                  9. Turn off the toggle switch under "Real-time protection".
                  10. -
                  11. Select "Yes" when prompted by User Account Control.
                  12. -
                  13. Close the Windows Security window.
                  14. -
                  - To disable other antivirus software, you can follow the instructions provided by the respective software developers. Note: Remember to enable Windows Defender and other antivirus software after installing Flexisign Pro 10 crack.
                • -
                • Extract and run the setup file of Flexisign Pro 10 crack: After downloading Flexisign Pro 10 crack from one of the sources or websites that we have provided above, you will need to extract and run the setup file of Flexisign Pro 10 crack. The setup file of Flexisign Pro 10 crack might be compressed in a ZIP or RAR format, so you will need a software such as WinRAR or 7-Zip to extract it. To extract and run the setup file of Flexisign Pro 10 crack, you can follow these steps:
                    -
                  1. Locate the downloaded file of Flexisign Pro 10 crack on your device or system.
                  2. -
                  3. Right-click on the file and select "Extract here" or "Extract to" from the menu.
                  4. -
                  5. Enter the password if required. The password might be provided by the source or website that you have downloaded from, or it might be "crackzsoft", "getintopc", "cracknest", "crackdevil", or "crackdownloadz". If none of these passwords work, try searching for the password online or contact the source or website that you have downloaded from.
                  6. -
                  7. Wait for the extraction process to complete.
                  8. -
                  9. Open the extracted folder and locate the setup file of Flexisign Pro 10 crack. It might be named as "FlexiSIGN-PRO_10.exe", "FlexiSIGN-PRO_10_Setup.exe", "FlexiSIGN-PRO_10_Installer.exe", "FlexiSIGN-PRO_10_Crack.exe", or something similar.
                  10. Double-click on the setup file of Flexisign Pro 10 crack and run it as administrator. -
                  11. Select "Yes" when prompted by User Account Control.
                  12. -
                  -
                • -
                • Follow the installation instructions and activate Flexisign Pro 10 crack: After running the setup file of Flexisign Pro 10 crack, you will need to follow the installation instructions and activate Flexisign Pro 10 crack. The installation instructions and activation methods might vary depending on the source or website that you have downloaded from, but they usually involve these steps:
                    -
                  1. Select the language and click "Next".
                  2. -
                  3. Accept the license agreement and click "Next".
                  4. -
                  5. Select the destination folder and click "Next".
                  6. -
                  7. Select the components and features that you want to install and click "Next".
                  8. -
                  9. Wait for the installation process to complete.
                  10. -
                  11. Do not launch or run Flexisign Pro 10 after the installation.
                  12. -
                  13. Copy the crack file or folder from the extracted folder and paste it into the installation folder. The crack file or folder might be named as "FlexiSIGN-PRO_10_Crack.dll", "FlexiSIGN-PRO_10_Keygen.exe", "FlexiSIGN-PRO_10_Patch.exe", "FlexiSIGN-PRO_10_Activator.exe", or something similar.
                  14. -
                  15. Run the crack file or folder as administrator and follow the instructions to activate Flexisign Pro 10 crack.
                  16. -
                  17. Launch or run Flexisign Pro 10 crack and enjoy using it.
                  18. -
                  -
                • -
                -

                These are some of the steps and instructions that you will need to follow to install Flexisign Pro 10 crack. However, as we mentioned earlier, installing Flexisign Pro 10 crack is not enough. You also need to know how to use it. How can you do that? Let's find out in the next section.

                -

                How to Use Flexisign Pro 10 Crack

                -

                If you have installed Flexisign Pro 10 crack successfully, you will need to know how to use it. Flexisign Pro 10 crack is a comprehensive graphic designing software that can help you create stunning logos and vector graphics. It has many features and tools that can help you manage graphics effectively. Here are some of the steps and tips that can help you use Flexisign Pro 10 crack:

                -
                  -
                • Launch and access the main interface of Flexisign Pro 10 crack: To launch and access the main interface of Flexisign Pro 10 crack, you can follow these steps:
                    -
                  1. Open the Start menu and type "Flexisign Pro 10" in the search box.
                  2. -
                  3. Select "Flexisign Pro 10" from the results and open it.
                  4. -
                  5. Wait for the main interface of Flexisign Pro 10 crack to load.
                  6. -
                  7. The main interface of Flexisign Pro 10 crack consists of several parts, such as:
                      -
                    • The menu bar, which contains various menus such as File, Edit, View, Design, Arrange, Effects, Bitmaps, Tools, Window, and Help.
                    • -
                    • The toolbar, which contains various tools such as New, Open, Save, Cut, Copy, Paste, Undo, Redo, Zoom, Pan, Selection, Pen, Pencil, Brush, Eraser, Gradient, Fill, Transparency, Text, Shape, Image, Vectorize, Cut/Plot, Print/Print+Cut, RIP/Queue Manager, Color Profiler Wizard , and Preferences.
                    • -
                    • The workspace, which is the main area where you can create and edit your graphics.
                    • -
                    • The rulers, which show the measurements and coordinates of your graphics.
                    • -
                    • The status bar, which shows the information and tips about your graphics and tools.
                    • -
                    • The panels, which show the properties and options of your graphics and tools. You can access different panels such as Color, Swatches, Layers, Objects, Styles, Effects, Bitmaps, Cut/Plot, Print/Print+Cut, RIP/Queue Manager, Color Profiler Wizard, and Help from the Window menu or the toolbar.
                    • -
                    -
                  8. -
                  -
                • -
                • Create professional-looking logos and vector graphics with Flexisign Pro 10 crack: To create professional-looking logos and vector graphics with Flexisign Pro 10 crack, you can follow these steps:
                    -
                  1. Select "File" from the menu bar and click "New" to create a new document. You can also press "Ctrl+N" on your keyboard.
                  2. -
                  3. Enter the name, size, resolution, color mode, and background color of your document and click "OK".
                  4. -
                  5. Use the tools from the toolbar and the panels to create and edit your logo or vector graphic. You can use the pen, pencil, brush, eraser, gradient, fill, transparency, text, shape, image, vectorize, and other tools to customize your graphic. You can also use the design, arrange, effects, bitmaps, and other menus to apply different functions to your graphic.
                  6. -
                  7. Save your logo or vector graphic as a Flexisign Pro 10 file by selecting "File" from the menu bar and clicking "Save" or "Save As". You can also press "Ctrl+S" or "Ctrl+Shift+S" on your keyboard. You can also export your logo or vector graphic as a different format such as EPS, PDF, AI, JPG, PNG, and more by selecting "File" from the menu bar and clicking "Export".
                  8. -
                  -
                • -
                • Design direct-to-vinyl signs with multiple colours, text, and vector graphics with Flexisign Pro 10 crack: To design direct-to-vinyl signs with multiple colours, text, and vector graphics with Flexisign Pro 10 crack , you can follow these steps:
                    -
                  1. Create a new document or open an existing one as described in the previous step.
                  2. -
                  3. Use the tools from the toolbar and the panels to create and edit your sign. You can use the text, shape, image, vectorize, and other tools to add multiple colours, text, and vector graphics to your sign. You can also use the design, arrange, effects, bitmaps, and other menus to apply different functions to your sign.
                  4. -
                  5. Use the cut/plot tool from the toolbar or the menu to prepare your sign for cutting. You can use this tool to set the cut mode, cut style, cut speed, cut force, cut offset, cut blade, cut registration marks, and other options for your sign. You can also use the preview feature to see how your sign will look after cutting.
                  6. -
                  7. Use the print/print+cut tool from the toolbar or the menu to prepare your sign for printing. You can use this tool to set the print mode, print quality, print resolution, print colour management, print media size, print margins, print registration marks, and other options for your sign. You can also use the preview feature to see how your sign will look after printing.
                  8. -
                  9. Use the RIP/queue manager tool from the toolbar or the menu to send your sign to the printer or cutter. You can use this tool to manage the queue of your jobs, monitor the status of your jobs, pause or resume your jobs, cancel or delete your jobs, and other functions for your jobs.
                  10. -
                  -

                  These are some of the steps and tips that can help you use Flexisign Pro 10 crack. However, as we mentioned earlier, using Flexisign Pro 10 crack is not recommended. It is better to use Flexisign Pro 10 legally and ethically than to use Flexisign Pro 10 crack illegally and unethically.

                  -

                  Conclusion

                  -

                  In this article, we have discussed Flexisign Pro 10 crack: how to download and install it for free. We have also discussed some of the features of Flexisign Pro 10 and some of the risks and disadvantages of using Flexisign Pro 10 crack. We have also provided you with a step-by-step guide on how to download and install Flexisign Pro 10 crack for free and how to use Flexisign Pro 10 crack.

                  -

                  However, we have also emphasized that using Flexisign Pro 10 crack is not worth it. It is illegal and unethical. It is unstable and unreliable. It is unsafe and risky. It is unsupported and unhelpful. It will compromise your quality, security, and integrity as a graphic designer.

                  -

                  Therefore, we do not recommend using Flexisign Pro 10 crack at all. It is better to use Flexisign Pro 10 legally and ethically than to use Flexisign Pro 10 crack illegally and unethically.

                  -

                  So, what are your options if you want to use Flexisign Pro 10 legally and ethically? Well, you have two options. You can either buy the original version of Flexisign Pro 10 from the official website or an authorized dealer , or you can use an alternative software that is free or cheaper than Flexisign Pro 10. Here are some of the alternatives and recommendations for using Flexisign Pro 10 legally and ethically:

                  -
                    -
                  • Buy the original version of Flexisign Pro 10: The best option for using Flexisign Pro 10 legally and ethically is to buy the original version of the software from the official website or an authorized dealer. This way, you can enjoy all the features and functions of Flexisign Pro 10 without any risks or disadvantages. You can also get technical support and customer service from the developers or the official website. You can also receive updates and patches that can fix bugs or errors, improve performance or compatibility, or add new features or functions. You can also have user manuals and tutorials that can help you learn how to use the software effectively. The official price of Flexisign Pro 10 is $3,995, which is quite expensive for many users. However, you might be able to find some discounts or offers that can lower the price. You can also consider buying a used or refurbished version of Flexisign Pro 10 that might be cheaper than a new one.
                  • -
                  • Use an alternative software that is free or cheaper than Flexisign Pro 10: Another option for using Flexisign Pro 10 legally and ethically is to use an alternative software that is free or cheaper than Flexisign Pro 10. There are many graphic designing software that can help you create professional-looking logos and vector graphics. Some of them are free, while some of them are cheaper than Flexisign Pro 10. However, they might not have all the features and functions of Flexisign Pro 10, or they might have different interfaces and workflows. You will need to compare and contrast them to find the best one for your needs and preferences. Here are some of the alternative software that you can use instead of Flexisign Pro 10: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                    SoftwarePriceFeatures
                    InkscapeFreeA free and open-source vector graphics editor that can create logos, icons, illustrations, diagrams, maps, and web graphics. It has tools such as pen, pencil, calligraphy, shape, text, gradient, fill, transparency, node, path, bitmap trace, and more. It also supports various formats such as SVG, EPS, PDF, AI, PNG, JPG, and more.
                    GIMPFreeA free and open-source raster graphics editor that can create and edit images, photos, logos, icons, and web graphics. It has tools such as brush, pencil, eraser, gradient, fill, transparency, text, shape, selection, crop, rotate, scale , and more. It also supports various formats such as PNG, JPG, GIF, TIFF, PSD, PDF, and more.
                    Adobe Illustrator$20.99/month or $239.88/yearA professional vector graphics editor that can create logos, icons, illustrations, typography, and web graphics. It has tools such as pen, pencil, brush, eraser, gradient, fill, transparency, text, shape, image trace, and more. It also supports various formats such as AI, EPS, PDF, SVG, PNG, JPG, and more.
                    CorelDRAW$249/year or $499/one-time purchaseA professional graphic design software that can create logos, icons, illustrations, web graphics, and print projects. It has tools such as pen, pencil, brush, eraser, gradient, fill, transparency, text, shape, bitmap trace, and more. It also supports various formats such as CDR, EPS, PDF, SVG, PNG, JPG, and more.
                    Affinity Designer$49.99/one-time purchaseA professional graphic design software that can create logos, icons, illustrations, web graphics, and print projects. It has tools such as pen, pencil, brush , eraser, gradient, fill, transparency, text, shape, image trace, and more. It also supports various formats such as AFDESIGN, EPS, PDF, SVG, PNG, JPG, and more.
                    - These are some of the alternative software that you can use instead of Flexisign Pro 10. You can compare and contrast them to find the best one for your needs and preferences.
                  • -
                  -

                  We hope that this article has helped you understand Flexisign Pro 10 crack: how to download and install it for free. We also hope that you have learned some of the features of Flexisign Pro 10 and some of the risks and disadvantages of using Flexisign Pro 10 crack. We also hope that you have followed our step-by-step guide on how to download and install Flexisign Pro 10 crack for free and how to use Flexisign Pro 10 crack. And we also hope that you have considered our alternatives and recommendations for using Flexisign Pro 10 legally and ethically.

                  -

                  Now that you have read this article, what do you think? Do you still want to use Flexisign Pro 10 crack for free? Or do you want to use Flexisign Pro 10 legally and ethically? Do you have any questions or opinions about Flexisign Pro 10 or Flexisign Pro 10 crack? Let us know in the comments below. We would love to hear from you.

                  -

                  FAQs

                  -

                  Here are some of the frequently asked questions about Flexisign Pro 10 and Flexisign Pro 10 crack:

                  -
                    -
                  1. What is the difference between Flexisign Pro 10 and other versions of Flexisign?
                  2. -

                    Flexisign Pro 10 is the most advanced and comprehensive version of Flexisign. It has all the features and functions of other versions of Flexisign, such as Flexisign Basic, Flexisign Plus, and Flexisign Expert. It also has some additional features and functions that are not available in other versions of Flexisign, such as full graphic design, text serialization, colour tracing, cutting, RIPing, and printing tools.

                    -
                  3. Is Flexisign Pro 10 compatible with Windows 10?
                  4. -

                    Yes, Flexisign Pro 10 is compatible with Windows 10. However, you might need to update or patch your software to ensure compatibility and performance. You can check for updates or patches from the official website of Flexisign Pro 10 or from the Help menu or the toolbar of the software.

                    -
                  5. How much does Flexisign Pro 10 cost if I want to buy it legally?
                  6. -

                    The official price of Flexisign Pro 10 is $3,995, which is quite expensive for many users. However, you might be able to find some discounts or offers that can lower the price. You can also consider buying a used or refurbished version of Flexisign Pro 10 that might be cheaper than a new one. You can also use an alternative software that is free or cheaper than Flexisign Pro 10.

                    -
                  7. Can I use Flexisign Pro 10 crack on multiple devices or computers?
                  8. -

                    No, you cannot use Flexisign Pro 10 crack on multiple devices or computers. A cracked version of Flexisign Pro 10 is a modified or hacked version of the software that bypasses the security measures and allows users to use it without paying anything. However, it also limits the usage and functionality of the software. You can only use Flexisign Pro 10 crack on one device or computer at a time. If you try to use it on another device or computer , you might face errors or problems such as activation failure, license expiration, software corruption, or system crash.

                    -
                  9. How can I get technical support or customer service for Flexisign Pro 10 crack?
                  10. -

                    You cannot get technical support or customer service for Flexisign Pro 10 crack. A cracked version of Flexisign Pro 10 is a modified or hacked version of the software that bypasses the security measures and allows users to use it without paying anything. However, it also cuts off the connection and communication with the developers or the official website. You cannot get any technical support or customer service from the developers or the official website for Flexisign Pro 10 crack. You also cannot get any updates or patches that can fix bugs or errors, improve performance or compatibility, or add new features or functions for Flexisign Pro 10 crack. You are on your own if you encounter any issues or problems with Flexisign Pro 10 crack.

                    -
                  -

                  These are some of the frequently asked questions about Flexisign Pro 10 and Flexisign Pro 10 crack. If you have any other questions or opinions about Flexisign Pro 10 or Flexisign Pro 10 crack, feel free to leave them in the comments below. We will try to answer them as soon as possible.

                  -

                  Thank you for reading this article. We hope that you have enjoyed it and learned something from it. Please share it with your friends and family who might be interested in Flexisign Pro 10 or Flexisign Pro 10 crack. And don't forget to subscribe to our newsletter for more articles like this one.

                  b2dd77e56b
                  -
                  -
                  \ No newline at end of file diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/urllib3/util/timeout.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/urllib3/util/timeout.py deleted file mode 100644 index ff69593b05b5eb5fcd336b4bd16193c44dc48ef5..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/urllib3/util/timeout.py +++ /dev/null @@ -1,268 +0,0 @@ -from __future__ import absolute_import - -import time - -# The default socket timeout, used by httplib to indicate that no timeout was -# specified by the user -from socket import _GLOBAL_DEFAULT_TIMEOUT - -from ..exceptions import TimeoutStateError - -# A sentinel value to indicate that no timeout was specified by the user in -# urllib3 -_Default = object() - - -# Use time.monotonic if available. -current_time = getattr(time, "monotonic", time.time) - - -class Timeout(object): - """Timeout configuration. - - Timeouts can be defined as a default for a pool: - - .. code-block:: python - - timeout = Timeout(connect=2.0, read=7.0) - http = PoolManager(timeout=timeout) - response = http.request('GET', 'http://example.com/') - - Or per-request (which overrides the default for the pool): - - .. code-block:: python - - response = http.request('GET', 'http://example.com/', timeout=Timeout(10)) - - Timeouts can be disabled by setting all the parameters to ``None``: - - .. code-block:: python - - no_timeout = Timeout(connect=None, read=None) - response = http.request('GET', 'http://example.com/, timeout=no_timeout) - - - :param total: - This combines the connect and read timeouts into one; the read timeout - will be set to the time leftover from the connect attempt. In the - event that both a connect timeout and a total are specified, or a read - timeout and a total are specified, the shorter timeout will be applied. - - Defaults to None. - - :type total: int, float, or None - - :param connect: - The maximum amount of time (in seconds) to wait for a connection - attempt to a server to succeed. Omitting the parameter will default the - connect timeout to the system default, probably `the global default - timeout in socket.py - `_. - None will set an infinite timeout for connection attempts. - - :type connect: int, float, or None - - :param read: - The maximum amount of time (in seconds) to wait between consecutive - read operations for a response from the server. Omitting the parameter - will default the read timeout to the system default, probably `the - global default timeout in socket.py - `_. - None will set an infinite timeout. - - :type read: int, float, or None - - .. note:: - - Many factors can affect the total amount of time for urllib3 to return - an HTTP response. - - For example, Python's DNS resolver does not obey the timeout specified - on the socket. Other factors that can affect total request time include - high CPU load, high swap, the program running at a low priority level, - or other behaviors. - - In addition, the read and total timeouts only measure the time between - read operations on the socket connecting the client and the server, - not the total amount of time for the request to return a complete - response. For most requests, the timeout is raised because the server - has not sent the first byte in the specified time. This is not always - the case; if a server streams one byte every fifteen seconds, a timeout - of 20 seconds will not trigger, even though the request will take - several minutes to complete. - - If your goal is to cut off any request after a set amount of wall clock - time, consider having a second "watcher" thread to cut off a slow - request. - """ - - #: A sentinel object representing the default timeout value - DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT - - def __init__(self, total=None, connect=_Default, read=_Default): - self._connect = self._validate_timeout(connect, "connect") - self._read = self._validate_timeout(read, "read") - self.total = self._validate_timeout(total, "total") - self._start_connect = None - - def __repr__(self): - return "%s(connect=%r, read=%r, total=%r)" % ( - type(self).__name__, - self._connect, - self._read, - self.total, - ) - - # __str__ provided for backwards compatibility - __str__ = __repr__ - - @classmethod - def _validate_timeout(cls, value, name): - """Check that a timeout attribute is valid. - - :param value: The timeout value to validate - :param name: The name of the timeout attribute to validate. This is - used to specify in error messages. - :return: The validated and casted version of the given value. - :raises ValueError: If it is a numeric value less than or equal to - zero, or the type is not an integer, float, or None. - """ - if value is _Default: - return cls.DEFAULT_TIMEOUT - - if value is None or value is cls.DEFAULT_TIMEOUT: - return value - - if isinstance(value, bool): - raise ValueError( - "Timeout cannot be a boolean value. It must " - "be an int, float or None." - ) - try: - float(value) - except (TypeError, ValueError): - raise ValueError( - "Timeout value %s was %s, but it must be an " - "int, float or None." % (name, value) - ) - - try: - if value <= 0: - raise ValueError( - "Attempted to set %s timeout to %s, but the " - "timeout cannot be set to a value less " - "than or equal to 0." % (name, value) - ) - except TypeError: - # Python 3 - raise ValueError( - "Timeout value %s was %s, but it must be an " - "int, float or None." % (name, value) - ) - - return value - - @classmethod - def from_float(cls, timeout): - """Create a new Timeout from a legacy timeout value. - - The timeout value used by httplib.py sets the same timeout on the - connect(), and recv() socket requests. This creates a :class:`Timeout` - object that sets the individual timeouts to the ``timeout`` value - passed to this function. - - :param timeout: The legacy timeout value. - :type timeout: integer, float, sentinel default object, or None - :return: Timeout object - :rtype: :class:`Timeout` - """ - return Timeout(read=timeout, connect=timeout) - - def clone(self): - """Create a copy of the timeout object - - Timeout properties are stored per-pool but each request needs a fresh - Timeout object to ensure each one has its own start/stop configured. - - :return: a copy of the timeout object - :rtype: :class:`Timeout` - """ - # We can't use copy.deepcopy because that will also create a new object - # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to - # detect the user default. - return Timeout(connect=self._connect, read=self._read, total=self.total) - - def start_connect(self): - """Start the timeout clock, used during a connect() attempt - - :raises urllib3.exceptions.TimeoutStateError: if you attempt - to start a timer that has been started already. - """ - if self._start_connect is not None: - raise TimeoutStateError("Timeout timer has already been started.") - self._start_connect = current_time() - return self._start_connect - - def get_connect_duration(self): - """Gets the time elapsed since the call to :meth:`start_connect`. - - :return: Elapsed time in seconds. - :rtype: float - :raises urllib3.exceptions.TimeoutStateError: if you attempt - to get duration for a timer that hasn't been started. - """ - if self._start_connect is None: - raise TimeoutStateError( - "Can't get connect duration for timer that has not started." - ) - return current_time() - self._start_connect - - @property - def connect_timeout(self): - """Get the value to use when setting a connection timeout. - - This will be a positive float or integer, the value None - (never timeout), or the default system timeout. - - :return: Connect timeout. - :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None - """ - if self.total is None: - return self._connect - - if self._connect is None or self._connect is self.DEFAULT_TIMEOUT: - return self.total - - return min(self._connect, self.total) - - @property - def read_timeout(self): - """Get the value for the read timeout. - - This assumes some time has elapsed in the connection timeout and - computes the read timeout appropriately. - - If self.total is set, the read timeout is dependent on the amount of - time taken by the connect timeout. If the connection time has not been - established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be - raised. - - :return: Value to use for the read timeout. - :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None - :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect` - has not yet been called on this object. - """ - if ( - self.total is not None - and self.total is not self.DEFAULT_TIMEOUT - and self._read is not None - and self._read is not self.DEFAULT_TIMEOUT - ): - # In case the connect timeout has not yet been established. - if self._start_connect is None: - return self._read - return max(0, min(self.total - self.get_connect_duration(), self._read)) - elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT: - return max(0, self.total - self.get_connect_duration()) - else: - return self._read diff --git a/spaces/tmaham/DS-Fusion-Express/ldm/modules/encoders/modules_back.py b/spaces/tmaham/DS-Fusion-Express/ldm/modules/encoders/modules_back.py deleted file mode 100644 index 9b15750108ce1a7d0896bc31e13f2f5b96b85c5a..0000000000000000000000000000000000000000 --- a/spaces/tmaham/DS-Fusion-Express/ldm/modules/encoders/modules_back.py +++ /dev/null @@ -1,396 +0,0 @@ -import torch -import torch.nn as nn -from functools import partial -import clip -from einops import rearrange, repeat -from transformers import CLIPTokenizer, CLIPTextModel -import kornia - -from ldm.modules.x_transformer import Encoder, TransformerWrapper # TODO: can we directly rely on lucidrains code and simply add this as a reuirement? --> test - -def _expand_mask(mask, dtype, tgt_len = None): - """ - Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. - """ - bsz, src_len = mask.size() - tgt_len = tgt_len if tgt_len is not None else src_len - - expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) - - inverted_mask = 1.0 - expanded_mask - - return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) - -def _build_causal_attention_mask(bsz, seq_len, dtype): - # lazily create causal attention mask, with full attention between the vision tokens - # pytorch uses additive attention mask; fill with -inf - mask = torch.empty(bsz, seq_len, seq_len, dtype=dtype) - mask.fill_(torch.tensor(torch.finfo(dtype).min)) - mask.triu_(1) # zero out the lower diagonal - mask = mask.unsqueeze(1) # expand mask - return mask - -class AbstractEncoder(nn.Module): - def __init__(self): - super().__init__() - - def encode(self, *args, **kwargs): - raise NotImplementedError - - - -class ClassEmbedder(nn.Module): - def __init__(self, embed_dim, n_classes=1000, key='class'): - super().__init__() - self.key = key - self.embedding = nn.Embedding(n_classes, embed_dim) - - def forward(self, batch, key=None): - if key is None: - key = self.key - # this is for use in crossattn - c = batch[key][:, None] - c = self.embedding(c) - return c - - -class TransformerEmbedder(AbstractEncoder): - """Some transformer encoder layers""" - def __init__(self, n_embed, n_layer, vocab_size, max_seq_len=77, device="cuda"): - super().__init__() - self.device = device - self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len, - attn_layers=Encoder(dim=n_embed, depth=n_layer)) - - def forward(self, tokens): - tokens = tokens.to(self.device) # meh - z = self.transformer(tokens, return_embeddings=True) - return z - - def encode(self, x): - return self(x) - - -class BERTTokenizer(AbstractEncoder): - """ Uses a pretrained BERT tokenizer by huggingface. Vocab size: 30522 (?)""" - def __init__(self, device="cuda", vq_interface=True, max_length=77): - super().__init__() - from transformers import BertTokenizerFast # TODO: add to reuquirements - self.tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased") - self.device = device - self.vq_interface = vq_interface - self.max_length = max_length - - def forward(self, text): - batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True, - return_overflowing_tokens=False, padding="max_length", return_tensors="pt") - tokens = batch_encoding["input_ids"].to(self.device) - return tokens - - @torch.no_grad() - def encode(self, text): - tokens = self(text) - if not self.vq_interface: - return tokens - return None, None, [None, None, tokens] - - def decode(self, text): - return text - - -class BERTEmbedder(AbstractEncoder): - """Uses the BERT tokenizr model and add some transformer encoder layers""" - def __init__(self, n_embed, n_layer, vocab_size=30522, max_seq_len=77, - device="cuda",use_tokenizer=True, embedding_dropout=0.0): - super().__init__() - self.use_tknz_fn = use_tokenizer - if self.use_tknz_fn: - self.tknz_fn = BERTTokenizer(vq_interface=False, max_length=max_seq_len) - self.device = device - self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len, - attn_layers=Encoder(dim=n_embed, depth=n_layer), - emb_dropout=embedding_dropout) - - def forward(self, text, embedding_manager=None): - if self.use_tknz_fn: - tokens = self.tknz_fn(text)#.to(self.device) - else: - tokens = text - z = self.transformer(tokens, return_embeddings=True, embedding_manager=embedding_manager) - return z - - def encode(self, text, **kwargs): - # output of length 77 - return self(text, **kwargs) - -class SpatialRescaler(nn.Module): - def __init__(self, - n_stages=1, - method='bilinear', - multiplier=0.5, - in_channels=3, - out_channels=None, - bias=False): - super().__init__() - self.n_stages = n_stages - assert self.n_stages >= 0 - assert method in ['nearest','linear','bilinear','trilinear','bicubic','area'] - self.multiplier = multiplier - self.interpolator = partial(torch.nn.functional.interpolate, mode=method) - self.remap_output = out_channels is not None - if self.remap_output: - print(f'Spatial Rescaler mapping from {in_channels} to {out_channels} channels after resizing.') - self.channel_mapper = nn.Conv2d(in_channels,out_channels,1,bias=bias) - - def forward(self,x): - for stage in range(self.n_stages): - x = self.interpolator(x, scale_factor=self.multiplier) - - - if self.remap_output: - x = self.channel_mapper(x) - return x - - def encode(self, x): - return self(x) - -class FrozenCLIPEmbedder(AbstractEncoder): - """Uses the CLIP transformer encoder for text (from Hugging Face)""" - def __init__(self, version="openai/clip-vit-large-patch14", device="cuda", max_length=77): - super().__init__() - self.tokenizer = CLIPTokenizer.from_pretrained(version) - self.transformer = CLIPTextModel.from_pretrained(version) - self.device = device - self.max_length = max_length - - def embedding_forward( - self, - input_ids = None, - position_ids = None, - inputs_embeds = None, - embedding_manager = None, - ) -> torch.Tensor: - - seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2] - - if position_ids is None: - position_ids = self.position_ids[:, :seq_length] - - if inputs_embeds is None: - inputs_embeds = self.token_embedding(input_ids) - - if embedding_manager is not None: - inputs_embeds = embedding_manager(input_ids, inputs_embeds) - - - position_embeddings = self.position_embedding(position_ids) - embeddings = inputs_embeds + position_embeddings - - return embeddings - - self.transformer.text_model.embeddings.forward = embedding_forward.__get__(self.transformer.text_model.embeddings) - - def encoder_forward( - self, - inputs_embeds, - attention_mask = None, - causal_attention_mask = None, - output_attentions = None, - output_hidden_states = None, - return_dict = None, - ): - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - encoder_states = () if output_hidden_states else None - all_attentions = () if output_attentions else None - - hidden_states = inputs_embeds - for idx, encoder_layer in enumerate(self.layers): - if output_hidden_states: - encoder_states = encoder_states + (hidden_states,) - - layer_outputs = encoder_layer( - hidden_states, - attention_mask, - causal_attention_mask, - output_attentions=output_attentions, - ) - - hidden_states = layer_outputs[0] - - if output_attentions: - all_attentions = all_attentions + (layer_outputs[1],) - - if output_hidden_states: - encoder_states = encoder_states + (hidden_states,) - - return hidden_states - - self.transformer.text_model.encoder.forward = encoder_forward.__get__(self.transformer.text_model.encoder) - - - def text_encoder_forward( - self, - input_ids = None, - attention_mask = None, - position_ids = None, - output_attentions = None, - output_hidden_states = None, - return_dict = None, - embedding_manager = None, - ): - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - if input_ids is None: - raise ValueError("You have to specify either input_ids") - - input_shape = input_ids.size() - input_ids = input_ids.view(-1, input_shape[-1]) - - hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids, embedding_manager=embedding_manager) - - bsz, seq_len = input_shape - # CLIP's text model uses causal mask, prepare it here. - # https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324 - causal_attention_mask = _build_causal_attention_mask(bsz, seq_len, hidden_states.dtype).to( - hidden_states.device - ) - - # expand attention_mask - if attention_mask is not None: - # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - attention_mask = _expand_mask(attention_mask, hidden_states.dtype) - - last_hidden_state = self.encoder( - inputs_embeds=hidden_states, - attention_mask=attention_mask, - causal_attention_mask=causal_attention_mask, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - last_hidden_state = self.final_layer_norm(last_hidden_state) - - return last_hidden_state - - self.transformer.text_model.forward = text_encoder_forward.__get__(self.transformer.text_model) - - def transformer_forward( - self, - input_ids = None, - attention_mask = None, - position_ids = None, - output_attentions = None, - output_hidden_states = None, - return_dict = None, - embedding_manager = None, - ): - return self.text_model( - input_ids=input_ids, - attention_mask=attention_mask, - position_ids=position_ids, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - embedding_manager = embedding_manager - ) - - self.transformer.forward = transformer_forward.__get__(self.transformer) - - - def freeze(self): - self.transformer = self.transformer.eval() - for param in self.parameters(): - param.requires_grad = False - - def forward(self, text, **kwargs): - batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True, - return_overflowing_tokens=False, padding="max_length", return_tensors="pt") - tokens = batch_encoding["input_ids"].to(self.device) - z = self.transformer(input_ids=tokens, **kwargs) - - return z - - def encode(self, text, **kwargs): - return self(text, **kwargs) - - -class FrozenCLIPTextEmbedder(nn.Module): - """ - Uses the CLIP transformer encoder for text. - """ - def __init__(self, version='ViT-L/14', device="cuda", max_length=77, n_repeat=1, normalize=True): - super().__init__() - self.model, _ = clip.load(version, jit=False, device="cpu") - self.device = device - self.max_length = max_length - self.n_repeat = n_repeat - self.normalize = normalize - - def freeze(self): - self.model = self.model.eval() - for param in self.parameters(): - param.requires_grad = False - - def forward(self, text): - tokens = clip.tokenize(text).to(self.device) - z = self.model.encode_text(tokens) - if self.normalize: - z = z / torch.linalg.norm(z, dim=1, keepdim=True) - return z - - def encode(self, text): - z = self(text) - if z.ndim==2: - z = z[:, None, :] - z = repeat(z, 'b 1 d -> b k d', k=self.n_repeat) - return z - - -class FrozenClipImageEmbedder(nn.Module): - """ - Uses the CLIP image encoder. - """ - def __init__( - self, - model, - jit=False, - device='cuda' if torch.cuda.is_available() else 'cpu', - antialias=False, - ): - super().__init__() - self.model, _ = clip.load(name=model, device=device, jit=jit) - - self.antialias = antialias - - self.register_buffer('mean', torch.Tensor([0.48145466, 0.4578275, 0.40821073]), persistent=False) - self.register_buffer('std', torch.Tensor([0.26862954, 0.26130258, 0.27577711]), persistent=False) - - def preprocess(self, x): - # normalize to [0,1] - x = kornia.geometry.resize(x, (224, 224), - interpolation='bicubic',align_corners=True, - antialias=self.antialias) - x = (x + 1.) / 2. - # renormalize according to clip - x = kornia.enhance.normalize(x, self.mean, self.std) - return x - - def forward(self, x): - # x is assumed to be in range [-1,1] - return self.model.encode_image(self.preprocess(x)) - - -if __name__ == "__main__": - from ldm.util import count_params - model = FrozenCLIPEmbedder() - count_params(model, verbose=True) \ No newline at end of file diff --git a/spaces/tom-beer/birds-israel/loggings.py b/spaces/tom-beer/birds-israel/loggings.py deleted file mode 100644 index 2411b7b568d0f0191d7f154ee144883921ffeffa..0000000000000000000000000000000000000000 --- a/spaces/tom-beer/birds-israel/loggings.py +++ /dev/null @@ -1,23 +0,0 @@ -import pytorch_lightning as pl -import torch -import wandb - - -class ImagePredictionLogger(pl.Callback): - def __init__(self, val_samples, num_samples=32): - super().__init__() - self.val_imgs, self.val_labels = val_samples - self.val_imgs = self.val_imgs[:num_samples] - self.val_labels = self.val_labels[:num_samples] - - def on_validation_epoch_end(self, trainer, pl_module): - val_imgs = self.val_imgs.to(device=pl_module.device) - - logits = pl_module(val_imgs) - preds = torch.argmax(logits, 1) - - trainer.logger.experiment.log({ - "examples": [wandb.Image(x, caption=f"Pred:{pred}, Label:{y}") - for x, pred, y in zip(val_imgs, preds, self.val_labels)], - "global_step": trainer.global_step - }) diff --git a/spaces/tommy24/chatGPT2/README.md b/spaces/tommy24/chatGPT2/README.md deleted file mode 100644 index 799948c169d953914e91d4e1bb867c5670e65ba7..0000000000000000000000000000000000000000 --- a/spaces/tommy24/chatGPT2/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: ChatGPT -emoji: 📊 -colorFrom: blue -colorTo: blue -sdk: gradio -sdk_version: 3.12.0 -app_file: app.py -pinned: false -duplicated_from: yizhangliu/chatGPT ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/tomofi/MMOCR/configs/_base_/det_pipelines/fcenet_pipeline.py b/spaces/tomofi/MMOCR/configs/_base_/det_pipelines/fcenet_pipeline.py deleted file mode 100644 index b1be6b22dace62ea8beb0c213bf138c93a2430e4..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MMOCR/configs/_base_/det_pipelines/fcenet_pipeline.py +++ /dev/null @@ -1,118 +0,0 @@ -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) - -# for icdar2015 -leval_prop_range_icdar2015 = ((0, 0.4), (0.3, 0.7), (0.6, 1.0)) -train_pipeline_icdar2015 = [ - dict(type='LoadImageFromFile', color_type='color_ignore_orientation'), - dict( - type='LoadTextAnnotations', - with_bbox=True, - with_mask=True, - poly2mask=False), - dict( - type='ColorJitter', - brightness=32.0 / 255, - saturation=0.5, - contrast=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='RandomScaling', size=800, scale=(3. / 4, 5. / 2)), - dict( - type='RandomCropFlip', crop_ratio=0.5, iter_num=1, min_area_ratio=0.2), - dict( - type='RandomCropPolyInstances', - instance_key='gt_masks', - crop_ratio=0.8, - min_side_ratio=0.3), - dict( - type='RandomRotatePolyInstances', - rotate_ratio=0.5, - max_angle=30, - pad_with_fixed_color=False), - dict(type='SquareResizePad', target_size=800, pad_ratio=0.6), - dict(type='RandomFlip', flip_ratio=0.5, direction='horizontal'), - dict(type='Pad', size_divisor=32), - dict( - type='FCENetTargets', - fourier_degree=5, - level_proportion_range=leval_prop_range_icdar2015), - dict( - type='CustomFormatBundle', - keys=['p3_maps', 'p4_maps', 'p5_maps'], - visualize=dict(flag=False, boundary_key=None)), - dict(type='Collect', keys=['img', 'p3_maps', 'p4_maps', 'p5_maps']) -] - -img_scale_icdar2015 = (2260, 2260) -test_pipeline_icdar2015 = [ - dict(type='LoadImageFromFile', color_type='color_ignore_orientation'), - dict( - type='MultiScaleFlipAug', - img_scale=img_scale_icdar2015, - flip=False, - transforms=[ - dict(type='Resize', img_scale=(1280, 800), keep_ratio=True), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] - -# for ctw1500 -leval_prop_range_ctw1500 = ((0, 0.25), (0.2, 0.65), (0.55, 1.0)) -train_pipeline_ctw1500 = [ - dict(type='LoadImageFromFile', color_type='color_ignore_orientation'), - dict( - type='LoadTextAnnotations', - with_bbox=True, - with_mask=True, - poly2mask=False), - dict( - type='ColorJitter', - brightness=32.0 / 255, - saturation=0.5, - contrast=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='RandomScaling', size=800, scale=(3. / 4, 5. / 2)), - dict( - type='RandomCropFlip', crop_ratio=0.5, iter_num=1, min_area_ratio=0.2), - dict( - type='RandomCropPolyInstances', - instance_key='gt_masks', - crop_ratio=0.8, - min_side_ratio=0.3), - dict( - type='RandomRotatePolyInstances', - rotate_ratio=0.5, - max_angle=30, - pad_with_fixed_color=False), - dict(type='SquareResizePad', target_size=800, pad_ratio=0.6), - dict(type='RandomFlip', flip_ratio=0.5, direction='horizontal'), - dict(type='Pad', size_divisor=32), - dict( - type='FCENetTargets', - fourier_degree=5, - level_proportion_range=leval_prop_range_ctw1500), - dict( - type='CustomFormatBundle', - keys=['p3_maps', 'p4_maps', 'p5_maps'], - visualize=dict(flag=False, boundary_key=None)), - dict(type='Collect', keys=['img', 'p3_maps', 'p4_maps', 'p5_maps']) -] - -img_scale_ctw1500 = (1080, 736) -test_pipeline_ctw1500 = [ - dict(type='LoadImageFromFile', color_type='color_ignore_orientation'), - dict( - type='MultiScaleFlipAug', - img_scale=img_scale_ctw1500, - flip=False, - transforms=[ - dict(type='Resize', img_scale=(1280, 800), keep_ratio=True), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py deleted file mode 100644 index 7b8ce4a1caf95d7e66e79e14219d3d9a8f74321d..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py +++ /dev/null @@ -1,62 +0,0 @@ -_base_ = [ - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -teacher_ckpt = 'http://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_mstrain_2x_coco/gfl_r101_fpn_mstrain_2x_coco_20200629_200126-dd12f847.pth' # noqa -model = dict( - type='KnowledgeDistillationSingleStageDetector', - pretrained='torchvision://resnet18', - teacher_config='configs/gfl/gfl_r101_fpn_mstrain_2x_coco.py', - teacher_ckpt=teacher_ckpt, - backbone=dict( - type='ResNet', - depth=18, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch'), - neck=dict( - type='FPN', - in_channels=[64, 128, 256, 512], - out_channels=256, - start_level=1, - add_extra_convs='on_output', - num_outs=5), - bbox_head=dict( - type='LDHead', - num_classes=80, - in_channels=256, - stacked_convs=4, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - ratios=[1.0], - octave_base_scale=8, - scales_per_octave=1, - strides=[8, 16, 32, 64, 128]), - loss_cls=dict( - type='QualityFocalLoss', - use_sigmoid=True, - beta=2.0, - loss_weight=1.0), - loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25), - loss_ld=dict( - type='KnowledgeDistillationKLDivLoss', loss_weight=0.25, T=10), - reg_max=16, - loss_bbox=dict(type='GIoULoss', loss_weight=2.0)), - # training and testing settings - train_cfg=dict( - assigner=dict(type='ATSSAssigner', topk=9), - allowed_border=-1, - pos_weight=-1, - debug=False), - test_cfg=dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.6), - max_per_img=100)) - -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco.py deleted file mode 100644 index 1b48a2104baf0df935954897ae4a991b38684d78..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py' -# learning policy -lr_config = dict(step=[28, 34]) -runner = dict(type='EpochBasedRunner', max_epochs=36) diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/roi_heads/test_mixins.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/roi_heads/test_mixins.py deleted file mode 100644 index 930d73786b725ea29f82141efca705f5f3e9b7e8..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/roi_heads/test_mixins.py +++ /dev/null @@ -1,370 +0,0 @@ -import logging -import sys - -import torch - -from mmdet.core import (bbox2roi, bbox_mapping, merge_aug_bboxes, - merge_aug_masks, multiclass_nms) - -logger = logging.getLogger(__name__) - -if sys.version_info >= (3, 7): - from mmdet.utils.contextmanagers import completed - - -class BBoxTestMixin(object): - - if sys.version_info >= (3, 7): - - async def async_test_bboxes(self, - x, - img_metas, - proposals, - rcnn_test_cfg, - rescale=False, - bbox_semaphore=None, - global_lock=None): - """Asynchronized test for box head without augmentation.""" - rois = bbox2roi(proposals) - roi_feats = self.bbox_roi_extractor( - x[:len(self.bbox_roi_extractor.featmap_strides)], rois) - if self.with_shared_head: - roi_feats = self.shared_head(roi_feats) - sleep_interval = rcnn_test_cfg.get('async_sleep_interval', 0.017) - - async with completed( - __name__, 'bbox_head_forward', - sleep_interval=sleep_interval): - cls_score, bbox_pred = self.bbox_head(roi_feats) - - img_shape = img_metas[0]['img_shape'] - scale_factor = img_metas[0]['scale_factor'] - det_bboxes, det_labels = self.bbox_head.get_bboxes( - rois, - cls_score, - bbox_pred, - img_shape, - scale_factor, - rescale=rescale, - cfg=rcnn_test_cfg) - return det_bboxes, det_labels - - def simple_test_bboxes(self, - x, - img_metas, - proposals, - rcnn_test_cfg, - rescale=False): - """Test only det bboxes without augmentation. - - Args: - x (tuple[Tensor]): Feature maps of all scale level. - img_metas (list[dict]): Image meta info. - proposals (Tensor or List[Tensor]): Region proposals. - rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN. - rescale (bool): If True, return boxes in original image space. - Default: False. - - Returns: - tuple[list[Tensor], list[Tensor]]: The first list contains - the boxes of the corresponding image in a batch, each - tensor has the shape (num_boxes, 5) and last dimension - 5 represent (tl_x, tl_y, br_x, br_y, score). Each Tensor - in the second list is the labels with shape (num_boxes, ). - The length of both lists should be equal to batch_size. - """ - # get origin input shape to support onnx dynamic input shape - if torch.onnx.is_in_onnx_export(): - assert len( - img_metas - ) == 1, 'Only support one input image while in exporting to ONNX' - img_shapes = img_metas[0]['img_shape_for_onnx'] - else: - img_shapes = tuple(meta['img_shape'] for meta in img_metas) - scale_factors = tuple(meta['scale_factor'] for meta in img_metas) - - # The length of proposals of different batches may be different. - # In order to form a batch, a padding operation is required. - if isinstance(proposals, list): - # padding to form a batch - max_size = max([proposal.size(0) for proposal in proposals]) - for i, proposal in enumerate(proposals): - supplement = proposal.new_full( - (max_size - proposal.size(0), proposal.size(1)), 0) - proposals[i] = torch.cat((supplement, proposal), dim=0) - rois = torch.stack(proposals, dim=0) - else: - rois = proposals - - batch_index = torch.arange( - rois.size(0), device=rois.device).float().view(-1, 1, 1).expand( - rois.size(0), rois.size(1), 1) - rois = torch.cat([batch_index, rois[..., :4]], dim=-1) - batch_size = rois.shape[0] - num_proposals_per_img = rois.shape[1] - - # Eliminate the batch dimension - rois = rois.view(-1, 5) - bbox_results = self._bbox_forward(x, rois) - cls_score = bbox_results['cls_score'] - bbox_pred = bbox_results['bbox_pred'] - - # Recover the batch dimension - rois = rois.reshape(batch_size, num_proposals_per_img, rois.size(-1)) - cls_score = cls_score.reshape(batch_size, num_proposals_per_img, - cls_score.size(-1)) - - if not torch.onnx.is_in_onnx_export(): - # remove padding, ignore batch_index when calculating mask - supplement_mask = rois.abs()[..., 1:].sum(dim=-1) == 0 - cls_score[supplement_mask, :] = 0 - - # bbox_pred would be None in some detector when with_reg is False, - # e.g. Grid R-CNN. - if bbox_pred is not None: - # the bbox prediction of some detectors like SABL is not Tensor - if isinstance(bbox_pred, torch.Tensor): - bbox_pred = bbox_pred.reshape(batch_size, - num_proposals_per_img, - bbox_pred.size(-1)) - if not torch.onnx.is_in_onnx_export(): - bbox_pred[supplement_mask, :] = 0 - else: - # TODO: Looking forward to a better way - # For SABL - bbox_preds = self.bbox_head.bbox_pred_split( - bbox_pred, num_proposals_per_img) - # apply bbox post-processing to each image individually - det_bboxes = [] - det_labels = [] - for i in range(len(proposals)): - # remove padding - supplement_mask = proposals[i].abs().sum(dim=-1) == 0 - for bbox in bbox_preds[i]: - bbox[supplement_mask] = 0 - det_bbox, det_label = self.bbox_head.get_bboxes( - rois[i], - cls_score[i], - bbox_preds[i], - img_shapes[i], - scale_factors[i], - rescale=rescale, - cfg=rcnn_test_cfg) - det_bboxes.append(det_bbox) - det_labels.append(det_label) - return det_bboxes, det_labels - else: - bbox_pred = None - - return self.bbox_head.get_bboxes( - rois, - cls_score, - bbox_pred, - img_shapes, - scale_factors, - rescale=rescale, - cfg=rcnn_test_cfg) - - def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_cfg): - """Test det bboxes with test time augmentation.""" - aug_bboxes = [] - aug_scores = [] - for x, img_meta in zip(feats, img_metas): - # only one image in the batch - img_shape = img_meta[0]['img_shape'] - scale_factor = img_meta[0]['scale_factor'] - flip = img_meta[0]['flip'] - flip_direction = img_meta[0]['flip_direction'] - # TODO more flexible - proposals = bbox_mapping(proposal_list[0][:, :4], img_shape, - scale_factor, flip, flip_direction) - rois = bbox2roi([proposals]) - bbox_results = self._bbox_forward(x, rois) - bboxes, scores = self.bbox_head.get_bboxes( - rois, - bbox_results['cls_score'], - bbox_results['bbox_pred'], - img_shape, - scale_factor, - rescale=False, - cfg=None) - aug_bboxes.append(bboxes) - aug_scores.append(scores) - # after merging, bboxes will be rescaled to the original image size - merged_bboxes, merged_scores = merge_aug_bboxes( - aug_bboxes, aug_scores, img_metas, rcnn_test_cfg) - det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores, - rcnn_test_cfg.score_thr, - rcnn_test_cfg.nms, - rcnn_test_cfg.max_per_img) - return det_bboxes, det_labels - - -class MaskTestMixin(object): - - if sys.version_info >= (3, 7): - - async def async_test_mask(self, - x, - img_metas, - det_bboxes, - det_labels, - rescale=False, - mask_test_cfg=None): - """Asynchronized test for mask head without augmentation.""" - # image shape of the first image in the batch (only one) - ori_shape = img_metas[0]['ori_shape'] - scale_factor = img_metas[0]['scale_factor'] - if det_bboxes.shape[0] == 0: - segm_result = [[] for _ in range(self.mask_head.num_classes)] - else: - if rescale and not isinstance(scale_factor, - (float, torch.Tensor)): - scale_factor = det_bboxes.new_tensor(scale_factor) - _bboxes = ( - det_bboxes[:, :4] * - scale_factor if rescale else det_bboxes) - mask_rois = bbox2roi([_bboxes]) - mask_feats = self.mask_roi_extractor( - x[:len(self.mask_roi_extractor.featmap_strides)], - mask_rois) - - if self.with_shared_head: - mask_feats = self.shared_head(mask_feats) - if mask_test_cfg and mask_test_cfg.get('async_sleep_interval'): - sleep_interval = mask_test_cfg['async_sleep_interval'] - else: - sleep_interval = 0.035 - async with completed( - __name__, - 'mask_head_forward', - sleep_interval=sleep_interval): - mask_pred = self.mask_head(mask_feats) - segm_result = self.mask_head.get_seg_masks( - mask_pred, _bboxes, det_labels, self.test_cfg, ori_shape, - scale_factor, rescale) - return segm_result - - def simple_test_mask(self, - x, - img_metas, - det_bboxes, - det_labels, - rescale=False): - """Simple test for mask head without augmentation.""" - # image shapes of images in the batch - ori_shapes = tuple(meta['ori_shape'] for meta in img_metas) - scale_factors = tuple(meta['scale_factor'] for meta in img_metas) - - if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): - if torch.onnx.is_in_onnx_export(): - raise RuntimeError('[ONNX Error] Can not record MaskHead ' - 'as it has not been executed this time') - segm_results = [[[] for _ in range(self.mask_head.num_classes)] - for _ in range(len(det_bboxes))] - return segm_results - - # The length of proposals of different batches may be different. - # In order to form a batch, a padding operation is required. - if isinstance(det_bboxes, list): - # padding to form a batch - max_size = max([bboxes.size(0) for bboxes in det_bboxes]) - for i, (bbox, label) in enumerate(zip(det_bboxes, det_labels)): - supplement_bbox = bbox.new_full( - (max_size - bbox.size(0), bbox.size(1)), 0) - supplement_label = label.new_full((max_size - label.size(0), ), - 0) - det_bboxes[i] = torch.cat((supplement_bbox, bbox), dim=0) - det_labels[i] = torch.cat((supplement_label, label), dim=0) - det_bboxes = torch.stack(det_bboxes, dim=0) - det_labels = torch.stack(det_labels, dim=0) - - batch_size = det_bboxes.size(0) - num_proposals_per_img = det_bboxes.shape[1] - - # if det_bboxes is rescaled to the original image size, we need to - # rescale it back to the testing scale to obtain RoIs. - det_bboxes = det_bboxes[..., :4] - if rescale: - if not isinstance(scale_factors[0], float): - scale_factors = det_bboxes.new_tensor(scale_factors) - det_bboxes = det_bboxes * scale_factors.unsqueeze(1) - - batch_index = torch.arange( - det_bboxes.size(0), device=det_bboxes.device).float().view( - -1, 1, 1).expand(det_bboxes.size(0), det_bboxes.size(1), 1) - mask_rois = torch.cat([batch_index, det_bboxes], dim=-1) - mask_rois = mask_rois.view(-1, 5) - mask_results = self._mask_forward(x, mask_rois) - mask_pred = mask_results['mask_pred'] - - # Support get_seg_masks exporting to ONNX - if torch.onnx.is_in_onnx_export(): - max_shape = img_metas[0]['img_shape_for_onnx'] - num_det = det_bboxes.shape[1] - det_bboxes = det_bboxes.reshape(-1, 4) - det_labels = det_labels.reshape(-1) - segm_results = self.mask_head.get_seg_masks( - mask_pred, det_bboxes, det_labels, self.test_cfg, max_shape, - scale_factors[0], rescale) - segm_results = segm_results.reshape(batch_size, num_det, - max_shape[0], max_shape[1]) - return segm_results - # Recover the batch dimension - mask_preds = mask_pred.reshape(batch_size, num_proposals_per_img, - *mask_pred.shape[1:]) - - # apply mask post-processing to each image individually - segm_results = [] - for i in range(batch_size): - mask_pred = mask_preds[i] - det_bbox = det_bboxes[i] - det_label = det_labels[i] - - # remove padding - supplement_mask = det_bbox.abs().sum(dim=-1) != 0 - mask_pred = mask_pred[supplement_mask] - det_bbox = det_bbox[supplement_mask] - det_label = det_label[supplement_mask] - - if det_label.shape[0] == 0: - segm_results.append([[] - for _ in range(self.mask_head.num_classes) - ]) - else: - segm_result = self.mask_head.get_seg_masks( - mask_pred, det_bbox, det_label, self.test_cfg, - ori_shapes[i], scale_factors[i], rescale) - segm_results.append(segm_result) - return segm_results - - def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels): - """Test for mask head with test time augmentation.""" - if det_bboxes.shape[0] == 0: - segm_result = [[] for _ in range(self.mask_head.num_classes)] - else: - aug_masks = [] - for x, img_meta in zip(feats, img_metas): - img_shape = img_meta[0]['img_shape'] - scale_factor = img_meta[0]['scale_factor'] - flip = img_meta[0]['flip'] - flip_direction = img_meta[0]['flip_direction'] - _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape, - scale_factor, flip, flip_direction) - mask_rois = bbox2roi([_bboxes]) - mask_results = self._mask_forward(x, mask_rois) - # convert to numpy array to save memory - aug_masks.append( - mask_results['mask_pred'].sigmoid().cpu().numpy()) - merged_masks = merge_aug_masks(aug_masks, img_metas, self.test_cfg) - - ori_shape = img_metas[0][0]['ori_shape'] - segm_result = self.mask_head.get_seg_masks( - merged_masks, - det_bboxes, - det_labels, - self.test_cfg, - ori_shape, - scale_factor=1.0, - rescale=False) - return segm_result diff --git a/spaces/tomsoderlund/rest-api-with-gradio/app.py b/spaces/tomsoderlund/rest-api-with-gradio/app.py deleted file mode 100644 index 7286778f96a6f8cd584d41733737d1458741c66f..0000000000000000000000000000000000000000 --- a/spaces/tomsoderlund/rest-api-with-gradio/app.py +++ /dev/null @@ -1,18 +0,0 @@ -import gradio - -def my_inference_function(name): - return "Hello " + name + "!" - -gradio_interface = gradio.Interface( - fn=my_inference_function, - inputs="text", - outputs="text", - examples=[ - ["Jill"], - ["Sam"] - ], - title="REST API with Gradio and Huggingface Spaces", - description="This is a demo of how to build an AI powered REST API with Gradio and Huggingface Spaces – for free! Based on [this article](https://www.tomsoderlund.com/ai/building-ai-powered-rest-api). See the **Use via API** link at the bottom of this page.", - article="© Tom Söderlund 2022" -) -gradio_interface.launch() diff --git a/spaces/trholding/SpeechCloning/app.py b/spaces/trholding/SpeechCloning/app.py deleted file mode 100644 index c33d98d9225a5f06eef0fd2bf9936477fd6ef7a3..0000000000000000000000000000000000000000 --- a/spaces/trholding/SpeechCloning/app.py +++ /dev/null @@ -1,276 +0,0 @@ -import os - -import gradio as gr -import numpy as np -import soundfile -import soundfile as sf -import torch -from tqdm import tqdm - -os.system("git clone https://github.com/DigitalPhonetics/IMS-Toucan.git toucan_codebase") -os.system("mv toucan_codebase/* .") - -from run_model_downloader import download_models - -download_models() - -from Preprocessing.TextFrontend import ArticulatoryCombinedTextFrontend -from Preprocessing.AudioPreprocessor import AudioPreprocessor -from TrainingInterfaces.Text_to_Spectrogram.AutoAligner.Aligner import Aligner -from TrainingInterfaces.Text_to_Spectrogram.FastSpeech2.DurationCalculator import DurationCalculator -from InferenceInterfaces.UtteranceCloner import UtteranceCloner -from Preprocessing.articulatory_features import get_feature_to_index_lookup - - -def float2pcm(sig, dtype='int16'): - """ - https://gist.github.com/HudsonHuang/fbdf8e9af7993fe2a91620d3fb86a182 - """ - sig = np.asarray(sig) - if sig.dtype.kind != 'f': - raise TypeError("'sig' must be a float array") - dtype = np.dtype(dtype) - if dtype.kind not in 'iu': - raise TypeError("'dtype' must be an integer type") - i = np.iinfo(dtype) - abs_max = 2 ** (i.bits - 1) - offset = i.min + abs_max - return (sig * abs_max + offset).clip(i.min, i.max).astype(dtype) - - -class TTS_Interface: - - def __init__(self): - self.device = "cuda" if torch.cuda.is_available() else "cpu" - - self.utterance_cloner = UtteranceCloner(model_id="Meta", device=self.device) - self.speaker_path_lookup = { - "Voice 1": "reference_audios/voice_1.flac", - "Voice 2": "reference_audios/voice_2.wav", - "Voice 3": "reference_audios/voice_3.wav", - } - self.acoustic_model = Aligner() - self.acoustic_model.load_state_dict(torch.load("Models/Aligner/aligner.pt", map_location='cpu')["asr_model"]) - self.acoustic_model = self.acoustic_model.to(self.device) - self.dc = DurationCalculator(reduction_factor=1) - self.tf = ArticulatoryCombinedTextFrontend(language="en") - example_audio, sr = soundfile.read("reference_audios/clone_me_5.wav") - self.ap = AudioPreprocessor(input_sr=sr, output_sr=16000, ) - - ## finetune aligner - steps = 10 - tokens = list() # we need an ID sequence for training rather than a sequence of phonological features - for vector in self.tf.string_to_tensor( - "Betty Botter bought some butter, but she said the butters bitter. If I put it in my batter, it will make my batter bitter. But a bit of better butter will make my batter better."): - if vector[get_feature_to_index_lookup()["word-boundary"]] == 0: - # we don't include word boundaries when performing alignment, since they are not always present in audio. - for phone in self.tf.phone_to_vector: - if vector.numpy().tolist()[13:] == self.tf.phone_to_vector[phone][13:]: - # the first 12 dimensions are for modifiers, so we ignore those when trying to find the phoneme in the ID lookup - tokens.append(self.tf.phone_to_id[phone]) - # this is terribly inefficient, but it's fine - break - tokens = torch.LongTensor(tokens).squeeze().to(self.device) - tokens_len = torch.LongTensor([len(tokens)]).to(self.device) - mel = self.ap.audio_to_mel_spec_tensor(example_audio, normalize=True).transpose(0, 1).unsqueeze(0).to(self.device) - mel.requires_grad = True - mel_len = torch.LongTensor([len(mel[0])]).to(self.device) - # actual fine-tuning starts here - optim_asr = torch.optim.SGD(self.acoustic_model.parameters(), lr=0.1) - self.acoustic_model.train() - for _ in tqdm(list(range(steps))): - pred = self.acoustic_model(mel) - loss = self.acoustic_model.ctc_loss(pred.transpose(0, 1).log_softmax(2), tokens, mel_len, tokens_len) - optim_asr.zero_grad() - loss.backward() - torch.nn.utils.clip_grad_norm_(self.acoustic_model.parameters(), 1.0) - optim_asr.step() - self.acoustic_model.eval() - ## done finetuning - - reference_audio = "reference_audios/clone_me_5.wav" - prompt = "Betty Botter bought some butter, but she said the butters bitter. If I put it in my batter, it will make my batter bitter. But a bit of better butter will make my batter better." - text_list = prompt.replace(".", ".|").replace("?", "?|").replace("!", "!|").split("|") - # we don't split on the punctuation marks because we want to retain them. - - self.split_audio(reference_audio, text_list) - # at this point, split_1.wav, split_2.wav and split_3.wav should exist. - - self.utterance_cloner.tts.set_utterance_embedding("reference_audios/voice_1.flac") - self.part_1_voice_1 = self.utterance_cloner.clone_utterance(path_to_reference_audio="split_1.wav", - reference_transcription=text_list[0], - clone_speaker_identity=False, - lang="en") - self.utterance_cloner.tts.set_utterance_embedding("reference_audios/voice_2.wav") - self.part_1_voice_2 = self.utterance_cloner.clone_utterance(path_to_reference_audio="split_1.wav", - reference_transcription=text_list[0], - clone_speaker_identity=False, - lang="en") - self.utterance_cloner.tts.set_utterance_embedding("reference_audios/voice_3.wav") - self.part_1_voice_3 = self.utterance_cloner.clone_utterance(path_to_reference_audio="split_1.wav", - reference_transcription=text_list[0], - clone_speaker_identity=False, - lang="en") - - self.utterance_cloner.tts.set_utterance_embedding("reference_audios/voice_1.flac") - self.part_2_voice_1 = self.utterance_cloner.clone_utterance(path_to_reference_audio="split_2.wav", - reference_transcription=text_list[1], - clone_speaker_identity=False, - lang="en") - self.utterance_cloner.tts.set_utterance_embedding("reference_audios/voice_2.wav") - self.part_2_voice_2 = self.utterance_cloner.clone_utterance(path_to_reference_audio="split_2.wav", - reference_transcription=text_list[1], - clone_speaker_identity=False, - lang="en") - self.utterance_cloner.tts.set_utterance_embedding("reference_audios/voice_3.wav") - self.part_2_voice_3 = self.utterance_cloner.clone_utterance(path_to_reference_audio="split_2.wav", - reference_transcription=text_list[1], - clone_speaker_identity=False, - lang="en") - - self.utterance_cloner.tts.set_utterance_embedding("reference_audios/voice_1.flac") - self.part_3_voice_1 = self.utterance_cloner.clone_utterance(path_to_reference_audio="split_3.wav", - reference_transcription=text_list[2], - clone_speaker_identity=False, - lang="en") - self.utterance_cloner.tts.set_utterance_embedding("reference_audios/voice_2.wav") - self.part_3_voice_2 = self.utterance_cloner.clone_utterance(path_to_reference_audio="split_3.wav", - reference_transcription=text_list[2], - clone_speaker_identity=False, - lang="en") - self.utterance_cloner.tts.set_utterance_embedding("reference_audios/voice_3.wav") - self.part_3_voice_3 = self.utterance_cloner.clone_utterance(path_to_reference_audio="split_3.wav", - reference_transcription=text_list[2], - clone_speaker_identity=False, - lang="en") - - def read(self, _, speaker_1, speaker_2, speaker_3): - reference_audio = "reference_audios/clone_me_5.wav" - - if speaker_1 == "Voice 1": - part_1 = self.part_1_voice_1 - elif speaker_1 == "Voice 2": - part_1 = self.part_1_voice_2 - elif speaker_1 == "Voice 3": - part_1 = self.part_1_voice_3 - - if speaker_2 == "Voice 1": - part_2 = self.part_2_voice_1 - elif speaker_2 == "Voice 2": - part_2 = self.part_2_voice_2 - elif speaker_2 == "Voice 3": - part_2 = self.part_2_voice_3 - - if speaker_3 == "Voice 1": - part_3 = self.part_3_voice_1 - elif speaker_3 == "Voice 2": - part_3 = self.part_3_voice_2 - elif speaker_3 == "Voice 3": - part_3 = self.part_3_voice_3 - - return "Utility/toucan.png", \ - reference_audio, \ - self.speaker_path_lookup["Voice 1"], \ - self.speaker_path_lookup["Voice 2"], \ - self.speaker_path_lookup["Voice 3"], \ - (24000, float2pcm(torch.cat([torch.tensor(part_1), torch.tensor(part_2), torch.tensor(part_3)], dim=0).numpy())) - - def split_audio(self, path_to_audio, text_list): - # extract audio - audio, sr = sf.read(path_to_audio) - ap = AudioPreprocessor(input_sr=sr, output_sr=16000, melspec_buckets=80, hop_length=256, n_fft=1024, cut_silence=False) - norm_wave = ap.audio_to_wave_tensor(normalize=True, audio=audio) - melspec = ap.audio_to_mel_spec_tensor(audio=norm_wave, normalize=False, explicit_sampling_rate=16000).transpose(0, 1) - - # extract phonemes - lines = list() - self.tf.use_word_boundaries = False # this causes problems when splitting otherwise - for segment in text_list: - if segment.strip() != "": - lines.append(self.tf.string_to_tensor(segment, handle_missing=False).squeeze()) - self.tf.use_word_boundaries = True - - # postprocess phonemes: [~ sentence ~ #] --> [sentence ~] except for the first one, which is [~ sentence ~] - processed_lines = list() - for index, line in enumerate(lines): - if index == 0: - processed_lines.append(line[:-1]) - else: - processed_lines.append(line[1:-1]) - lines = processed_lines - joined_phonemes = torch.cat(lines, dim=0) - - # get durations of each phone in audio as average of an ensemble - alignment_paths = list() - ensemble_of_durations = list() - for ensemble in range(1): - alignment_paths.append(self.acoustic_model.inference(mel=melspec.to(self.device), - tokens=joined_phonemes.to(self.device), - save_img_for_debug=None, - return_ctc=False)) - for alignment_path in alignment_paths: - ensemble_of_durations.append(self.dc(torch.LongTensor(alignment_path), vis=None).squeeze()) - durations = list() - for i, _ in enumerate(ensemble_of_durations[0]): - duration_of_phone = list() - for ensemble_member in ensemble_of_durations: - duration_of_phone.append(ensemble_member.squeeze()[i]) - durations.append(sum(duration_of_phone) / len(duration_of_phone)) - - # cut audio according to duration sum of each line in transcript - line_lens = [len(x) for x in lines] - index = 0 - segment_durations = list() - for num_phones in line_lens: - segment_durations.append(sum(durations[index: index + num_phones])) - index += num_phones - spec_to_wave_factor = len(norm_wave) / sum(segment_durations) - wave_segment_lens = [int(x * spec_to_wave_factor) for x in segment_durations] - start_index = 0 - wave_segments = list() - for index, segment_len in enumerate(wave_segment_lens): - if index == len(wave_segment_lens) - 1: - wave_segments.append(norm_wave[start_index:]) - else: - wave_segments.append(norm_wave[start_index: start_index + segment_len]) - start_index += segment_len - - # write the audio segments into new files - for index, wave_segment in enumerate(wave_segments): - sf.write(f"split_{index + 1}.wav", wave_segment, 16000) - - -meta_model = TTS_Interface() -article = "

                  This is still a work in progress, models will be exchanged for better ones as soon as they are done. More diverse training data can help with more exact cloning. For example we are still trying to incorporate more singing data.

                  Click here to learn more about the IMS Toucan Speech Synthesis Toolkit

                  " - -iface = gr.Interface(fn=meta_model.read, - inputs=[gr.inputs.Dropdown( - [ - "Betty Botter bought some butter, but she said the butters bitter. If I put it in my batter, it will make my batter bitter. But a bit of better butter will make my batter better."], - type="value", - default="Betty Botter bought some butter, but she said the butters bitter. If I put it in my batter, it will make my batter bitter. But a bit of better butter will make my batter better.", - label="Select which utterance should be customized"), - gr.inputs.Dropdown(["Voice 1", - "Voice 2", - "Voice 3"], type="value", default="Voice 1", label="Speaker selection for the first sentence"), - gr.inputs.Dropdown(["Voice 1", - "Voice 2", - "Voice 3"], type="value", default="Voice 2", label="Speaker selection for the second sentence"), - gr.inputs.Dropdown(["Voice 1", - "Voice 2", - "Voice 3"], type="value", default="Voice 3", label="Speaker selection for the third sentence")], - outputs=[gr.outputs.Image(label="Alignment of Phonemes to Audio"), - gr.outputs.Audio(type="file", label="Original Audio"), - gr.outputs.Audio(type="file", label="Reference-Voice 1"), - gr.outputs.Audio(type="file", label="Reference-Voice 2"), - gr.outputs.Audio(type="file", label="Reference-Voice 3"), - gr.outputs.Audio(type="numpy", label="Customized Audio")], - layout="vertical", - title="Speech Customization", - thumbnail="Utility/toucan.png", - theme="default", - allow_flagging="never", - allow_screenshot=False, - description="In this demo, an audio is split automatically into individual sentences. Then each of the sentences is re-synthesized into speech with the exact same prosody, but with a voice that you can choose. This allows customizing any existing read speech while retaining as much from the original reading as possible. Unfortunately, we cannot show you the reference audio and the reference voices ahead of time, so they will be displayed together with the resulting cloned speech.", - article=article) -iface.launch(enable_queue=True) diff --git a/spaces/ttt246/brain/Brain/src/rising_plugin/pinecone_engine.py b/spaces/ttt246/brain/Brain/src/rising_plugin/pinecone_engine.py deleted file mode 100644 index cf0a2a895d6aefc047a282966cde9fbf716fc7a7..0000000000000000000000000000000000000000 --- a/spaces/ttt246/brain/Brain/src/rising_plugin/pinecone_engine.py +++ /dev/null @@ -1,93 +0,0 @@ -# initialize pinecone -import pinecone -from typing import Any - -from ..common.brain_exception import BrainException -from ..common.http_response_codes import responses -from ..common.utils import ( - PINECONE_INDEX_NAME, - PINECONE_NAMESPACE, -) -from ..model.req_model import ReqModel - -DIMENSION = 1536 -METRIC = "cosine" -POD_TYPE = "p1.x1" - - -# get the existing index in pinecone or create a new one -def init_pinecone(index_name, setting: ReqModel, flag=True): - try: - pinecone.init(api_key=setting.pinecone_key, environment=setting.pinecone_env) - if flag: - return pinecone.Index(index_name) - else: - # create a new index in pinecone - return pinecone.create_index( - index_name, dimension=DIMENSION, metric=METRIC, pod_type=POD_TYPE - ) - except Exception as ex: - raise BrainException(code=508, message=responses[508]) - - -"""add item in pinecone""" - - -def add_pinecone( - namespace: str, key: str, setting: ReqModel, value: list[float] -) -> Any: - index = init_pinecone(index_name=PINECONE_INDEX_NAME, setting=setting) - - upsert_response = index.upsert( - vectors=[{"id": key, "values": value}], - namespace=namespace, - ) - return upsert_response - - -"""update item in pinecone""" - - -def update_pinecone( - setting: ReqModel, namespace: str, key: str, value: list[float] -) -> Any: - index = init_pinecone(index_name=PINECONE_INDEX_NAME, setting=setting) - - upsert_response = index.update( - id=key, - values=value, - namespace=namespace, - ) - return upsert_response - - -"""delete item in pinecone""" - - -def delete_pinecone(setting: ReqModel, namespace: str, key: str) -> Any: - index = init_pinecone(index_name=PINECONE_INDEX_NAME, setting=setting) - delete_response = index.delete(ids=[key], namespace=namespace) - return delete_response - - -"""delete all item in the namespace""" - - -def delete_all_pinecone(setting: ReqModel, namespace: str) -> Any: - index = init_pinecone(index_name=PINECONE_INDEX_NAME, setting=setting) - delete_response = index.delete(delete_all=True, namespace=namespace) - return delete_response - - -"""generate index name of pinecone""" - - -def get_pinecone_index_name(uuid): - return PINECONE_INDEX_NAME + "-" + uuid - - -"""generate a namespace of pinecone""" - - -def get_pinecone_index_namespace(uuid): - return PINECONE_NAMESPACE + "-" + uuid diff --git a/spaces/ulysses115/diffsvc_test/network/vocoders/vocoder_utils.py b/spaces/ulysses115/diffsvc_test/network/vocoders/vocoder_utils.py deleted file mode 100644 index db5d5ca1765928e4b047db04435a8a39b52592ca..0000000000000000000000000000000000000000 --- a/spaces/ulysses115/diffsvc_test/network/vocoders/vocoder_utils.py +++ /dev/null @@ -1,15 +0,0 @@ -import librosa - -from utils.hparams import hparams -import numpy as np - - -def denoise(wav, v=0.1): - spec = librosa.stft(y=wav, n_fft=hparams['fft_size'], hop_length=hparams['hop_size'], - win_length=hparams['win_size'], pad_mode='constant') - spec_m = np.abs(spec) - spec_m = np.clip(spec_m - v, a_min=0, a_max=None) - spec_a = np.angle(spec) - - return librosa.istft(spec_m * np.exp(1j * spec_a), hop_length=hparams['hop_size'], - win_length=hparams['win_size']) diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Adobe Photoshop CC 2019 19.1.6.5940 Mac os x Tips and Tricks for Professional Results.md b/spaces/usbethFlerru/sovits-modelsV2/example/Adobe Photoshop CC 2019 19.1.6.5940 Mac os x Tips and Tricks for Professional Results.md deleted file mode 100644 index 2e67e631a3bd7e5ea3a9d37e5051d5e387a1809b..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/Adobe Photoshop CC 2019 19.1.6.5940 Mac os x Tips and Tricks for Professional Results.md +++ /dev/null @@ -1,10 +0,0 @@ -
                  -

                  Download Adobe Photoshop CC 2018 19.1.6.5940 for Free is the updated application for the macOS. It is the best image editing tool. Many other websites are postulating to provide the facility of downloading free software. But, the main issue is the trouble in downloading. To solve this, we are providing the facility to download Adobe Photoshop CC 2018 19.1.6.5940 for Mac. You can also download Adobe Premiere Pro CC 2019.

                  -

                  يمكن أن يقوم photoshop cc بتحرير وإنشاء صور نقطية في طبقات متعددة ويدعم الأقنعة وتركيب ألفا والعديد من نماذج الألوان بما في ذلك النموذج اللوني أحمر أخضر أزرق (RGB) والنموذج اللوني سيان ماجنتا أصفر أسود (CMYK) والفضاء اللوني إل آ بيه واللون الموضعي واللون الثنائي . لأنه يَستخدم تنسيقات ملفات PSD و PSB الخاصة به لدعم هذه الميزات. بالإضافة إلى الرسومات النقطية، يتمتع adobe photoshop free download بقدرات محدودة على تحرير النصوص والرسوميات الشعاعية أو عرضها (خاصةً من خلال مسار القطع)، فضلاً عن الرسومات ثلاثية الأبعاد والفيديو. يمكن توسيع مجموعة الميزات الخاصة به عن طريق المكونات الإضافية ، وهي برامج تم تطويرها وتوزيعها بشكل مستقل عن photoshop trial والتي تعمل بداخله وتقدم ميزات جديدة أو مُحسنة.

                  -

                  Adobe Photoshop CC 2019 19.1.6.5940 Mac os x


                  DOWNLOAD ✺✺✺ https://urlcod.com/2uyWQk



                  -

                  إلى جانب adobe photoshop free download، تقوم أدوبي أيضًا بتطوير ونشر photoshop element 2022 و Photoshop Lightroom وPhotoshop Express وAdobe Photoshop Sketch. اعتبارًا من نوفمبر 2019، أصدرت أدوبي أيضًا إصدارًا كاملاً من الفوتوشوب لجهاز آي باد، وعلى الرغم من أنه كان محدود في البداية، إلا أن أدوبي تُخطط لجلب المزيد من الميزات إلى الفوتوشوب لأجهزة آي باد. بشكل جماعي، تم تصنيفهم على أنهم «عائلة فوتوشوب أدوبي».

                  -

                  Adobe Acrobat DC 2018 x32 2019.010.20069
                  Adobe After Effects CC 2018.1.2 15.1.2.69
                  Adobe Animate CC 2018.0.2 18.0.2.126
                  Adobe Audition CC 2018.1.1 11.1.1.3
                  Adobe Bridge CC 2018.1 8.1.0.383
                  Adobe Character Animator CC 2018 1.5.0.138
                  Adobe Dreamweaver CC 2018.2 18.2.0.10165
                  Adobe Encore CS6 6.0.2.004
                  Adobe ExtendScript Toolkit CC x32 4.0.0.1
                  Adobe Extension Manager CC x32 7.3.2
                  Adobe Fireworks CS6 x32 12.0.1.273
                  Adobe Illustrator CC 2018.1 22.1.0.312
                  Adobe InCopy CC 2018.1 13.1.0.76
                  Adobe InDesign CC 2018.1 13.1.0.76
                  Adobe Lightroom Classic CC 7.5.0.10
                  Adobe Media Encoder CC 2018.1.2 12.1.2.69
                  Adobe Photoshop CC 2018.1.6 19.1.6.5940
                  Adobe Photoshop Elements 2018 16.0.0
                  Adobe Prelude CC 2018.1.1 7.1.1.80
                  Adobe Premiere Pro CC 2018.1.2 12.1.2.69
                  Adobe Premiere Elements 2018 16.0.0
                  Adobe SpeedGrade CC 2015.1 9.1.0.0
                  Adobe Photoshop Camera Raw 10.5.32

                  -

                  Hola el programa que tengo es adobe photoshop cc 2018. hice tal cual de reemplazar el ejecutable y me sale que falta un archivo xxxx.dll y no puede abrir. Volvi al ejecutable anterior y funciono. Pero adobe me manda carteles de que va a desactivar el photoshop. Gracias. Espero sus comentarios.

                  -

                  aaccfb2cb3
                  -
                  -
                  \ No newline at end of file diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Anticloud For Adobe Creative Cloud 2018 Rev.4 ! Crack !!LINK!!.md b/spaces/usbethFlerru/sovits-modelsV2/example/Anticloud For Adobe Creative Cloud 2018 Rev.4 ! Crack !!LINK!!.md deleted file mode 100644 index b5a22d8c37fa098fc2b2318b8deca00020bab310..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/Anticloud For Adobe Creative Cloud 2018 Rev.4 ! Crack !!LINK!!.md +++ /dev/null @@ -1,6 +0,0 @@ -

                  Anticloud For Adobe Creative Cloud 2018 Rev.4 ! Crack


                  Download Zip ❤❤❤ https://urlcod.com/2uyVpv



                  -
                  -Creative Cloud app saved my day 1) Go to Adobe website, register and ... Adobe CC 2018 all Anticloud Rev.4 Win software to crack patch is. 4d29de3e1b
                  -
                  -
                  -

                  diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/BS.Player PRO 2.68 Build 1099 Final Keys [ATOM] Full Version Features Benefits and Reviews.md b/spaces/usbethFlerru/sovits-modelsV2/example/BS.Player PRO 2.68 Build 1099 Final Keys [ATOM] Full Version Features Benefits and Reviews.md deleted file mode 100644 index 2f1bed1a480224c8b023ca48c360e5d0b9aa76a2..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/BS.Player PRO 2.68 Build 1099 Final Keys [ATOM] Full Version Features Benefits and Reviews.md +++ /dev/null @@ -1,6 +0,0 @@ -

                  BS.Player PRO 2.68 Build 1099 Final Keys [ATOM] Full Version


                  Download Zip >>>>> https://urlcod.com/2uyXaa



                  - - aaccfb2cb3
                  -
                  -
                  -

                  diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Descarregar llibres en catal epub download 5 webs per descarregar llibres gratuts de manera legal[3].md b/spaces/usbethFlerru/sovits-modelsV2/example/Descarregar llibres en catal epub download 5 webs per descarregar llibres gratuts de manera legal[3].md deleted file mode 100644 index 88771177760f97659ba9f21a8073edd9b29d9bd8..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/Descarregar llibres en catal epub download 5 webs per descarregar llibres gratuts de manera legal[3].md +++ /dev/null @@ -1,6 +0,0 @@ -

                  descarregarllibresencatalanepubdownload


                  DOWNLOAD »»» https://urlcod.com/2uyVsX



                  -
                  - aaccfb2cb3
                  -
                  -
                  -

                  diff --git a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/reference/yolo/nas/model.md b/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/reference/yolo/nas/model.md deleted file mode 100644 index 9c34659690a0ce0c439f48ac3bb48e97db1c1c04..0000000000000000000000000000000000000000 --- a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/reference/yolo/nas/model.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -description: Learn about the Neural Architecture Search (NAS) feature available in Ultralytics YOLO. Find out how NAS can improve object detection models and increase accuracy. Get started today!. -keywords: Ultralytics YOLO, object detection, NAS, Neural Architecture Search, model optimization, accuracy improvement ---- - -## NAS ---- -### ::: ultralytics.yolo.nas.model.NAS -

                  diff --git a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/ultralytics/vit/rtdetr/predict.py b/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/ultralytics/vit/rtdetr/predict.py deleted file mode 100644 index 77c02c24d037076be6d39d6ccaf5148d8835bf66..0000000000000000000000000000000000000000 --- a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/ultralytics/vit/rtdetr/predict.py +++ /dev/null @@ -1,44 +0,0 @@ -# Ultralytics YOLO 🚀, AGPL-3.0 license - -import torch - -from ultralytics.yolo.data.augment import LetterBox -from ultralytics.yolo.engine.predictor import BasePredictor -from ultralytics.yolo.engine.results import Results -from ultralytics.yolo.utils import ops - - -class RTDETRPredictor(BasePredictor): - - def postprocess(self, preds, img, orig_imgs): - """Postprocess predictions and returns a list of Results objects.""" - nd = preds[0].shape[-1] - bboxes, scores = preds[0].split((4, nd - 4), dim=-1) - results = [] - for i, bbox in enumerate(bboxes): # (300, 4) - bbox = ops.xywh2xyxy(bbox) - score, cls = scores[i].max(-1, keepdim=True) # (300, 1) - idx = score.squeeze(-1) > self.args.conf # (300, ) - if self.args.classes is not None: - idx = (cls == torch.tensor(self.args.classes, device=cls.device)).any(1) & idx - pred = torch.cat([bbox, score, cls], dim=-1)[idx] # filter - orig_img = orig_imgs[i] if isinstance(orig_imgs, list) else orig_imgs - oh, ow = orig_img.shape[:2] - if not isinstance(orig_imgs, torch.Tensor): - pred[..., [0, 2]] *= ow - pred[..., [1, 3]] *= oh - path = self.batch[0] - img_path = path[i] if isinstance(path, list) else path - results.append(Results(orig_img=orig_img, path=img_path, names=self.model.names, boxes=pred)) - return results - - def pre_transform(self, im): - """Pre-transform input image before inference. - - Args: - im (List(np.ndarray)): (N, 3, h, w) for tensor, [(h, w, 3) x N] for list. - - Return: A list of transformed imgs. - """ - # The size must be square(640) and scaleFilled. - return [LetterBox(self.imgsz, auto=False, scaleFill=True)(image=x) for x in im] diff --git a/spaces/vict0rsch/climateGAN/apply_events.py b/spaces/vict0rsch/climateGAN/apply_events.py deleted file mode 100644 index f7df616f9af7a3c8877af50b017f1bbc0df78889..0000000000000000000000000000000000000000 --- a/spaces/vict0rsch/climateGAN/apply_events.py +++ /dev/null @@ -1,642 +0,0 @@ -import argparse - - -def parse_args(): - parser = argparse.ArgumentParser() - parser.add_argument( - "-b", - "--batch_size", - type=int, - default=4, - help="Batch size to process input images to events. Defaults to 4", - ) - parser.add_argument( - "-i", - "--images_paths", - type=str, - required=True, - help="Path to a directory with image files", - ) - parser.add_argument( - "-o", - "--output_path", - type=str, - default=None, - help="Path to a directory were events should be written. " - + "Will NOT write anything to disk if this flag is not used.", - ) - parser.add_argument( - "-s", - "--save_input", - action="store_true", - default=False, - help="Binary flag to include the input image to the model (after crop and" - + " resize) in the images written or uploaded (depending on saving options.)", - ) - parser.add_argument( - "-r", - "--resume_path", - type=str, - default=None, - help="Path to a directory containing the trainer to resume." - + " In particular it must contain `opts.yam` and `checkpoints/`." - + " Typically this points to a Masker, which holds the path to a" - + " Painter in its opts", - ) - parser.add_argument( - "--no_time", - action="store_true", - default=False, - help="Binary flag to prevent the timing of operations.", - ) - parser.add_argument( - "-f", - "--flood_mask_binarization", - type=float, - default=0.5, - help="Value to use to binarize masks (mask > value). " - + "Set to -1 to use soft masks (not binarized). Defaults to 0.5.", - ) - parser.add_argument( - "-t", - "--target_size", - type=int, - default=640, - help="Output image size (when not using `keep_ratio_128`): images are resized" - + " such that their smallest side is `target_size` then cropped in the middle" - + " of the largest side such that the resulting input image (and output images)" - + " has height and width `target_size x target_size`. **Must** be a multiple of" - + " 2^7=128 (up/downscaling inside the models). Defaults to 640.", - ) - parser.add_argument( - "--half", - action="store_true", - default=False, - help="Binary flag to use half precision (float16). Defaults to False.", - ) - parser.add_argument( - "-n", - "--n_images", - default=-1, - type=int, - help="Limit the number of images processed (if you have 100 images in " - + "a directory but n is 10 then only the first 10 images will be loaded" - + " for processing)", - ) - parser.add_argument( - "--no_conf", - action="store_true", - default=False, - help="disable writing the apply_events hash and command in the output folder", - ) - parser.add_argument( - "--overwrite", - action="store_true", - default=False, - help="Do not check for existing outdir, i.e. force overwrite" - + " potentially existing files in the output path", - ) - parser.add_argument( - "--no_cloudy", - action="store_true", - default=False, - help="Prevent the use of the cloudy intermediate" - + " image to create the flood image. Rendering will" - + " be more colorful but may seem less realistic", - ) - parser.add_argument( - "--keep_ratio_128", - action="store_true", - default=False, - help="When loading the input images, resize and crop them in order for their " - + "dimensions to match the closest multiples" - + " of 128. Will force a batch size of 1 since images" - + " now have different dimensions. " - + "Use --max_im_width to cap the resulting dimensions.", - ) - parser.add_argument( - "--fuse", - action="store_true", - default=False, - help="Use batch norm fusion to speed up inference", - ) - parser.add_argument( - "--save_masks", - action="store_true", - default=False, - help="Save output masks along events", - ) - parser.add_argument( - "-m", - "--max_im_width", - type=int, - default=-1, - help="When using --keep_ratio_128, some images may still be too large. Use " - + "--max_im_width to cap the resized image's width. Defaults to -1 (no cap).", - ) - parser.add_argument( - "--upload", - action="store_true", - help="Upload to comet.ml in a project called `climategan-apply`", - ) - parser.add_argument( - "--zip_outdir", - "-z", - action="store_true", - help="Zip the output directory as '{outdir.parent}/{outdir.name}.zip'", - ) - return parser.parse_args() - - -args = parse_args() - - -print("\n• Imports\n") -import time - -import_time = time.time() -import sys -import shutil -from collections import OrderedDict -from pathlib import Path - -import comet_ml # noqa: F401 -import torch -import numpy as np -import skimage.io as io -from skimage.color import rgba2rgb -from skimage.transform import resize -from tqdm import tqdm - -from climategan.trainer import Trainer -from climategan.bn_fusion import bn_fuse -from climategan.tutils import print_num_parameters -from climategan.utils import Timer, find_images, get_git_revision_hash, to_128, resolve - -import_time = time.time() - import_time - - -def to_m1_p1(img, i): - """ - rescales a [0, 1] image to [-1, +1] - - Args: - img (np.array): float32 numpy array of an image in [0, 1] - i (int): Index of the image being rescaled - - Raises: - ValueError: If the image is not in [0, 1] - - Returns: - np.array(np.float32): array in [-1, +1] - """ - if img.min() >= 0 and img.max() <= 1: - return (img.astype(np.float32) - 0.5) * 2 - raise ValueError(f"Data range mismatch for image {i} : ({img.min()}, {img.max()})") - - -def uint8(array): - """ - convert an array to np.uint8 (does not rescale or anything else than changing dtype) - - Args: - array (np.array): array to modify - - Returns: - np.array(np.uint8): converted array - """ - return array.astype(np.uint8) - - -def resize_and_crop(img, to=640): - """ - Resizes an image so that it keeps the aspect ratio and the smallest dimensions - is `to`, then crops this resized image in its center so that the output is `to x to` - without aspect ratio distortion - - Args: - img (np.array): np.uint8 255 image - - Returns: - np.array: [0, 1] np.float32 image - """ - # resize keeping aspect ratio: smallest dim is 640 - h, w = img.shape[:2] - if h < w: - size = (to, int(to * w / h)) - else: - size = (int(to * h / w), to) - - r_img = resize(img, size, preserve_range=True, anti_aliasing=True) - r_img = uint8(r_img) - - # crop in the center - H, W = r_img.shape[:2] - - top = (H - to) // 2 - left = (W - to) // 2 - - rc_img = r_img[top : top + to, left : left + to, :] - - return rc_img / 255.0 - - -def print_time(text, time_series, purge=-1): - """ - Print a timeseries's mean and std with a label - - Args: - text (str): label of the time series - time_series (list): list of timings - purge (int, optional): ignore first n values of time series. Defaults to -1. - """ - if not time_series: - return - - if purge > 0 and len(time_series) > purge: - time_series = time_series[purge:] - - m = np.mean(time_series) - s = np.std(time_series) - - print( - f"{text.capitalize() + ' ':.<26} {m:.5f}" - + (f" +/- {s:.5f}" if len(time_series) > 1 else "") - ) - - -def print_store(store, purge=-1): - """ - Pretty-print time series store - - Args: - store (dict): maps string keys to lists of times - purge (int, optional): ignore first n values of time series. Defaults to -1. - """ - singles = OrderedDict({k: v for k, v in store.items() if len(v) == 1}) - multiples = OrderedDict({k: v for k, v in store.items() if len(v) > 1}) - empties = {k: v for k, v in store.items() if len(v) == 0} - - if empties: - print("Ignoring empty stores ", ", ".join(empties.keys())) - print() - - for k in singles: - print_time(k, singles[k], purge) - - print() - print("Unit: s/batch") - for k in multiples: - print_time(k, multiples[k], purge) - print() - - -def write_apply_config(out): - """ - Saves the args to `apply_events.py` in a text file for future reference - """ - cwd = Path.cwd().expanduser().resolve() - command = f"cd {str(cwd)}\n" - command += " ".join(sys.argv) - git_hash = get_git_revision_hash() - with (out / "command.txt").open("w") as f: - f.write(command) - with (out / "hash.txt").open("w") as f: - f.write(git_hash) - - -def get_outdir_name(half, keep_ratio, max_im_width, target_size, bin_value, cloudy): - """ - Create the output directory's name based on uer-provided arguments - """ - name_items = [] - if half: - name_items.append("half") - if keep_ratio: - name_items.append("AR") - if max_im_width and keep_ratio: - name_items.append(f"{max_im_width}") - if target_size and not keep_ratio: - name_items.append("S") - name_items.append(f"{target_size}") - if bin_value != 0.5: - name_items.append(f"bin{bin_value}") - if not cloudy: - name_items.append("no_cloudy") - - return "-".join(name_items) - - -def make_outdir( - outdir, overwrite, half, keep_ratio, max_im_width, target_size, bin_value, cloudy -): - """ - Creates the output directory if it does not exist. If it does exist, - prompts the user for confirmation (except if `overwrite` is True). - If the output directory's name is "_auto_" then it is created as: - outdir.parent / get_outdir_name(...) - """ - if outdir.name == "_auto_": - outdir = outdir.parent / get_outdir_name( - half, keep_ratio, max_im_width, target_size, bin_value, cloudy - ) - if outdir.exists() and not overwrite: - print( - f"\nWARNING: outdir ({str(outdir)}) already exists." - + " Files with existing names will be overwritten" - ) - if "n" in input(">>> Continue anyway? [y / n] (default: y) : "): - print("Interrupting execution from user input.") - sys.exit() - print() - outdir.mkdir(exist_ok=True, parents=True) - return outdir - - -def get_time_stores(import_time): - return OrderedDict( - { - "imports": [import_time], - "setup": [], - "data pre-processing": [], - "encode": [], - "mask": [], - "flood": [], - "depth": [], - "segmentation": [], - "smog": [], - "wildfire": [], - "all events": [], - "numpy": [], - "inference on all images": [], - "write": [], - } - ) - - -if __name__ == "__main__": - - # ----------------------------------------- - # ----- Initialize script variables ----- - # ----------------------------------------- - print( - "• Using args\n\n" - + "\n".join(["{:25}: {}".format(k, v) for k, v in vars(args).items()]), - ) - - batch_size = args.batch_size - bin_value = args.flood_mask_binarization - cloudy = not args.no_cloudy - fuse = args.fuse - half = args.half - save_masks = args.save_masks - images_paths = resolve(args.images_paths) - keep_ratio = args.keep_ratio_128 - max_im_width = args.max_im_width - n_images = args.n_images - outdir = resolve(args.output_path) if args.output_path is not None else None - resume_path = args.resume_path - target_size = args.target_size - time_inference = not args.no_time - upload = args.upload - zip_outdir = args.zip_outdir - - # ------------------------------------- - # ----- Validate size arguments ----- - # ------------------------------------- - if keep_ratio: - if target_size != 640: - print( - "\nWARNING: using --keep_ratio_128 overwrites target_size" - + " which is ignored." - ) - if batch_size != 1: - print("\nWARNING: batch_size overwritten to 1 when using keep_ratio_128") - batch_size = 1 - if max_im_width > 0 and max_im_width % 128 != 0: - new_im_width = int(max_im_width / 128) * 128 - print("\nWARNING: max_im_width should be <0 or a multiple of 128.") - print( - " Was {} but is now overwritten to {}".format( - max_im_width, new_im_width - ) - ) - max_im_width = new_im_width - else: - if target_size % 128 != 0: - print(f"\nWarning: target size {target_size} is not a multiple of 128.") - target_size = target_size - (target_size % 128) - print(f"Setting target_size to {target_size}.") - - # ------------------------------------- - # ----- Create output directory ----- - # ------------------------------------- - if outdir is not None: - outdir = make_outdir( - outdir, - args.overwrite, - half, - keep_ratio, - max_im_width, - target_size, - bin_value, - cloudy, - ) - - # ------------------------------- - # ----- Create time store ----- - # ------------------------------- - stores = get_time_stores(import_time) - - # ----------------------------------- - # ----- Load Trainer instance ----- - # ----------------------------------- - with Timer(store=stores.get("setup", []), ignore=time_inference): - print("\n• Initializing trainer\n") - torch.set_grad_enabled(False) - trainer = Trainer.resume_from_path( - resume_path, - setup=True, - inference=True, - new_exp=None, - ) - print() - print_num_parameters(trainer, True) - if fuse: - trainer.G = bn_fuse(trainer.G) - if half: - trainer.G.half() - - # -------------------------------------------- - # ----- Read data from input directory ----- - # -------------------------------------------- - print("\n• Reading & Pre-processing Data\n") - - # find all images - data_paths = find_images(images_paths) - base_data_paths = data_paths - # filter images - if 0 < n_images < len(data_paths): - data_paths = data_paths[:n_images] - # repeat data - elif n_images > len(data_paths): - repeats = n_images // len(data_paths) + 1 - data_paths = base_data_paths * repeats - data_paths = data_paths[:n_images] - - with Timer(store=stores.get("data pre-processing", []), ignore=time_inference): - # read images to numpy arrays - data = [io.imread(str(d)) for d in data_paths] - # rgba to rgb - data = [im if im.shape[-1] == 3 else uint8(rgba2rgb(im) * 255) for im in data] - # resize images to target_size or - if keep_ratio: - # to closest multiples of 128 <= max_im_width, keeping aspect ratio - new_sizes = [to_128(d, max_im_width) for d in data] - data = [resize(d, ns, anti_aliasing=True) for d, ns in zip(data, new_sizes)] - else: - # to args.target_size - data = [resize_and_crop(d, target_size) for d in data] - new_sizes = [(target_size, target_size) for _ in data] - # resize() produces [0, 1] images, rescale to [-1, 1] - data = [to_m1_p1(d, i) for i, d in enumerate(data)] - - n_batchs = len(data) // batch_size - if len(data) % batch_size != 0: - n_batchs += 1 - - print("Found", len(base_data_paths), "images. Inferring on", len(data), "images.") - - # -------------------------------------------- - # ----- Batch-process images to events ----- - # -------------------------------------------- - print(f"\n• Using device {str(trainer.device)}\n") - - all_events = [] - - with Timer(store=stores.get("inference on all images", []), ignore=time_inference): - for b in tqdm(range(n_batchs), desc="Infering events", unit="batch"): - - images = data[b * batch_size : (b + 1) * batch_size] - if not images: - continue - - # concatenate images in a batch batch_size x height x width x 3 - images = np.stack(images) - # Retreive numpy events as a dict {event: array[BxHxWxC]} - events = trainer.infer_all( - images, - numpy=True, - stores=stores, - bin_value=bin_value, - half=half, - cloudy=cloudy, - return_masks=save_masks, - ) - - # save resized and cropped image - if args.save_input: - events["input"] = uint8((images + 1) / 2 * 255) - - # store events to write after inference loop - all_events.append(events) - - # -------------------------------------------- - # ----- Save (write/upload) inferences ----- - # -------------------------------------------- - if outdir is not None or upload: - - if upload: - print("\n• Creating comet Experiment") - exp = comet_ml.Experiment(project_name="climategan-apply") - exp.log_parameters(vars(args)) - - # -------------------------------------------------------------- - # ----- Change inferred data structure to a list of dicts ----- - # -------------------------------------------------------------- - to_write = [] - events_names = list(all_events[0].keys()) - for events_data in all_events: - n_ims = len(events_data[events_names[0]]) - for i in range(n_ims): - item = {event: events_data[event][i] for event in events_names} - to_write.append(item) - - progress_bar_desc = "" - if outdir is not None: - print("\n• Output directory:\n") - print(str(outdir), "\n") - if upload: - progress_bar_desc = "Writing & Uploading events" - else: - progress_bar_desc = "Writing events" - else: - if upload: - progress_bar_desc = "Uploading events" - - # ------------------------------------ - # ----- Save individual images ----- - # ------------------------------------ - with Timer(store=stores.get("write", []), ignore=time_inference): - - # for each image - for t, event_dict in tqdm( - enumerate(to_write), - desc=progress_bar_desc, - unit="input image", - total=len(to_write), - ): - - idx = t % len(base_data_paths) - stem = Path(data_paths[idx]).stem - width = new_sizes[idx][1] - - if keep_ratio: - ar = "_AR" - else: - ar = "" - - # for each event type - event_bar = tqdm( - enumerate(event_dict.items()), - leave=False, - total=len(events_names), - unit="event", - ) - for e, (event, im_data) in event_bar: - event_bar.set_description( - f" {event.capitalize():<{len(progress_bar_desc) - 2}}" - ) - - if args.no_cloudy: - suffix = ar + "_no_cloudy" - else: - suffix = ar - - im_path = Path(f"{stem}_{event}_{width}{suffix}.png") - - if outdir is not None: - im_path = outdir / im_path - io.imsave(im_path, im_data) - - if upload: - exp.log_image(im_data, name=im_path.name) - if zip_outdir: - print("\n• Zipping output directory... ", end="", flush=True) - archive_path = Path(shutil.make_archive(outdir.name, "zip", root_dir=outdir)) - archive_path = archive_path.rename(outdir.parent / archive_path.name) - print("Done:\n") - print(str(archive_path)) - - # --------------------------- - # ----- Print timings ----- - # --------------------------- - if time_inference: - print("\n• Timings\n") - print_store(stores) - - # --------------------------------------------- - # ----- Save apply_events.py run config ----- - # --------------------------------------------- - if not args.no_conf and outdir is not None: - write_apply_config(outdir) diff --git a/spaces/victorbahlangene/Star-wars-app/README.md b/spaces/victorbahlangene/Star-wars-app/README.md deleted file mode 100644 index 459528b5f94836e7e2889b21a6867d82f5041145..0000000000000000000000000000000000000000 --- a/spaces/victorbahlangene/Star-wars-app/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Star Wars App -emoji: 📊 -colorFrom: pink -colorTo: indigo -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false ---- - -# Star-wars-app -classify star wars character, summarize facts about that character, and make a quiz about the character diff --git a/spaces/vivien/trompeloeil/src/World/components/objects/objects.js b/spaces/vivien/trompeloeil/src/World/components/objects/objects.js deleted file mode 100644 index f198619f960369c651e7643329e2a95d14b0321d..0000000000000000000000000000000000000000 --- a/spaces/vivien/trompeloeil/src/World/components/objects/objects.js +++ /dev/null @@ -1,30 +0,0 @@ -import { GLTFLoader } from 'https://unpkg.com/three@0.117.0/examples/jsm/loaders/GLTFLoader.js'; - -import { setupModel } from './setupModel.js'; - -async function loadObjects() { - const objectID = parseInt(document.querySelector('#object').value); - const loader = new GLTFLoader(); - let object; - - if (objectID == 0) { - const data = await loader.loadAsync('https://vivien000.github.io/trompeloeil/models/tie.glb'); - object = setupModel(data); - object.scale.set(0.00002, 0.00002, 0.00002); - object.position.set(0, 0, -0.05); - } else if (objectID == 1) { - const data = await loader.loadAsync('https://vivien000.github.io/trompeloeil/models/parrot.glb'); - object = setupModel(data); - object.scale.set(0.15, 0.15, 0.15); - object.position.set(0, 0, -0.02); - } else { - const data = await loader.loadAsync('https://vivien000.github.io/trompeloeil/models/vangogh.glb'); - object = setupModel(data); - object.scale.set(0.2, 0.2, 0.2); - object.position.set(0, -0.3, -0.5); - } - - return object; -} - -export { loadObjects }; diff --git a/spaces/vivym/image-matting-app/ppmatting/metrics/metric.py b/spaces/vivym/image-matting-app/ppmatting/metrics/metric.py deleted file mode 100644 index 2784dcf20fcffeadc326ad00d9b6a74d07ad58cf..0000000000000000000000000000000000000000 --- a/spaces/vivym/image-matting-app/ppmatting/metrics/metric.py +++ /dev/null @@ -1,278 +0,0 @@ -# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Grad and Conn is refer to https://github.com/yucornetto/MGMatting/blob/main/code-base/utils/evaluate.py -# Output of `Grad` is sightly different from the MATLAB version provided by Adobe (less than 0.1%) -# Output of `Conn` is smaller than the MATLAB version (~5%, maybe MATLAB has a different algorithm) -# So do not report results calculated by these functions in your paper. -# Evaluate your inference with the MATLAB file `DIM_evaluation_code/evaluate.m`. - -import cv2 -import numpy as np -from scipy.ndimage import convolve -from scipy.special import gamma -from skimage.measure import label - - -class MSE: - """ - Only calculate the unknown region if trimap provided. - """ - - def __init__(self): - self.mse_diffs = 0 - self.count = 0 - - def update(self, pred, gt, trimap=None): - """ - update metric. - Args: - pred (np.ndarray): The value range is [0., 255.]. - gt (np.ndarray): The value range is [0, 255]. - trimap (np.ndarray, optional) The value is in {0, 128, 255}. Default: None. - """ - if trimap is None: - trimap = np.ones_like(gt) * 128 - if not (pred.shape == gt.shape == trimap.shape): - raise ValueError( - 'The shape of `pred`, `gt` and `trimap` should be equal. ' - 'but they are {}, {} and {}'.format(pred.shape, gt.shape, - trimap.shape)) - pred[trimap == 0] = 0 - pred[trimap == 255] = 255 - - mask = trimap == 128 - pixels = float(mask.sum()) - pred = pred / 255. - gt = gt / 255. - diff = (pred - gt) * mask - mse_diff = (diff**2).sum() / pixels if pixels > 0 else 0 - - self.mse_diffs += mse_diff - self.count += 1 - - return mse_diff - - def evaluate(self): - mse = self.mse_diffs / self.count if self.count > 0 else 0 - return mse - - -class SAD: - """ - Only calculate the unknown region if trimap provided. - """ - - def __init__(self): - self.sad_diffs = 0 - self.count = 0 - - def update(self, pred, gt, trimap=None): - """ - update metric. - Args: - pred (np.ndarray): The value range is [0., 255.]. - gt (np.ndarray): The value range is [0., 255.]. - trimap (np.ndarray, optional)L The value is in {0, 128, 255}. Default: None. - """ - if trimap is None: - trimap = np.ones_like(gt) * 128 - if not (pred.shape == gt.shape == trimap.shape): - raise ValueError( - 'The shape of `pred`, `gt` and `trimap` should be equal. ' - 'but they are {}, {} and {}'.format(pred.shape, gt.shape, - trimap.shape)) - pred[trimap == 0] = 0 - pred[trimap == 255] = 255 - - mask = trimap == 128 - pred = pred / 255. - gt = gt / 255. - diff = (pred - gt) * mask - sad_diff = (np.abs(diff)).sum() - - sad_diff /= 1000 - self.sad_diffs += sad_diff - self.count += 1 - - return sad_diff - - def evaluate(self): - sad = self.sad_diffs / self.count if self.count > 0 else 0 - return sad - - -class Grad: - """ - Only calculate the unknown region if trimap provided. - Refer to: https://github.com/open-mlab/mmediting/blob/master/mmedit/core/evaluation/metrics.py - """ - - def __init__(self): - self.grad_diffs = 0 - self.count = 0 - - def gaussian(self, x, sigma): - return np.exp(-x**2 / (2 * sigma**2)) / (sigma * np.sqrt(2 * np.pi)) - - def dgaussian(self, x, sigma): - return -x * self.gaussian(x, sigma) / sigma**2 - - def gauss_filter(self, sigma, epsilon=1e-2): - half_size = np.ceil( - sigma * np.sqrt(-2 * np.log(np.sqrt(2 * np.pi) * sigma * epsilon))) - size = int(2 * half_size + 1) - - # create filter in x axis - filter_x = np.zeros((size, size)) - for i in range(size): - for j in range(size): - filter_x[i, j] = self.gaussian( - i - half_size, sigma) * self.dgaussian(j - half_size, sigma) - - # normalize filter - norm = np.sqrt((filter_x**2).sum()) - filter_x = filter_x / norm - filter_y = np.transpose(filter_x) - - return filter_x, filter_y - - def gauss_gradient(self, img, sigma): - filter_x, filter_y = self.gauss_filter(sigma) - img_filtered_x = cv2.filter2D( - img, -1, filter_x, borderType=cv2.BORDER_REPLICATE) - img_filtered_y = cv2.filter2D( - img, -1, filter_y, borderType=cv2.BORDER_REPLICATE) - return np.sqrt(img_filtered_x**2 + img_filtered_y**2) - - def update(self, pred, gt, trimap=None, sigma=1.4): - """ - update metric. - Args: - pred (np.ndarray): The value range is [0., 1.]. - gt (np.ndarray): The value range is [0, 255]. - trimap (np.ndarray, optional)L The value is in {0, 128, 255}. Default: None. - sigma (float, optional): Standard deviation of the gaussian kernel. Default: 1.4. - """ - if trimap is None: - trimap = np.ones_like(gt) * 128 - if not (pred.shape == gt.shape == trimap.shape): - raise ValueError( - 'The shape of `pred`, `gt` and `trimap` should be equal. ' - 'but they are {}, {} and {}'.format(pred.shape, gt.shape, - trimap.shape)) - pred[trimap == 0] = 0 - pred[trimap == 255] = 255 - - gt = gt.squeeze() - pred = pred.squeeze() - gt = gt.astype(np.float64) - pred = pred.astype(np.float64) - gt_normed = np.zeros_like(gt) - pred_normed = np.zeros_like(pred) - cv2.normalize(gt, gt_normed, 1., 0., cv2.NORM_MINMAX) - cv2.normalize(pred, pred_normed, 1., 0., cv2.NORM_MINMAX) - - gt_grad = self.gauss_gradient(gt_normed, sigma).astype(np.float32) - pred_grad = self.gauss_gradient(pred_normed, sigma).astype(np.float32) - - grad_diff = ((gt_grad - pred_grad)**2 * (trimap == 128)).sum() - - grad_diff /= 1000 - self.grad_diffs += grad_diff - self.count += 1 - - return grad_diff - - def evaluate(self): - grad = self.grad_diffs / self.count if self.count > 0 else 0 - return grad - - -class Conn: - """ - Only calculate the unknown region if trimap provided. - Refer to: Refer to: https://github.com/open-mlab/mmediting/blob/master/mmedit/core/evaluation/metrics.py - """ - - def __init__(self): - self.conn_diffs = 0 - self.count = 0 - - def update(self, pred, gt, trimap=None, step=0.1): - """ - update metric. - Args: - pred (np.ndarray): The value range is [0., 1.]. - gt (np.ndarray): The value range is [0, 255]. - trimap (np.ndarray, optional)L The value is in {0, 128, 255}. Default: None. - step (float, optional): Step of threshold when computing intersection between - `gt` and `pred`. Default: 0.1. - """ - if trimap is None: - trimap = np.ones_like(gt) * 128 - if not (pred.shape == gt.shape == trimap.shape): - raise ValueError( - 'The shape of `pred`, `gt` and `trimap` should be equal. ' - 'but they are {}, {} and {}'.format(pred.shape, gt.shape, - trimap.shape)) - pred[trimap == 0] = 0 - pred[trimap == 255] = 255 - - gt = gt.squeeze() - pred = pred.squeeze() - gt = gt.astype(np.float32) / 255 - pred = pred.astype(np.float32) / 255 - - thresh_steps = np.arange(0, 1 + step, step) - round_down_map = -np.ones_like(gt) - for i in range(1, len(thresh_steps)): - gt_thresh = gt >= thresh_steps[i] - pred_thresh = pred >= thresh_steps[i] - intersection = (gt_thresh & pred_thresh).astype(np.uint8) - - # connected components - _, output, stats, _ = cv2.connectedComponentsWithStats( - intersection, connectivity=4) - # start from 1 in dim 0 to exclude background - size = stats[1:, -1] - - # largest connected component of the intersection - omega = np.zeros_like(gt) - if len(size) != 0: - max_id = np.argmax(size) - # plus one to include background - omega[output == max_id + 1] = 1 - - mask = (round_down_map == -1) & (omega == 0) - round_down_map[mask] = thresh_steps[i - 1] - round_down_map[round_down_map == -1] = 1 - - gt_diff = gt - round_down_map - pred_diff = pred - round_down_map - # only calculate difference larger than or equal to 0.15 - gt_phi = 1 - gt_diff * (gt_diff >= 0.15) - pred_phi = 1 - pred_diff * (pred_diff >= 0.15) - - conn_diff = np.sum(np.abs(gt_phi - pred_phi) * (trimap == 128)) - - conn_diff /= 1000 - self.conn_diffs += conn_diff - self.count += 1 - - return conn_diff - - def evaluate(self): - conn = self.conn_diffs / self.count if self.count > 0 else 0 - return conn diff --git a/spaces/vrajeshbhatt/Job-Title-Prediction/README.md b/spaces/vrajeshbhatt/Job-Title-Prediction/README.md deleted file mode 100644 index 75c4a990545cbc2328a0c058a3165c5969dcee76..0000000000000000000000000000000000000000 --- a/spaces/vrajeshbhatt/Job-Title-Prediction/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Job Title Prediction -emoji: 🌖 -colorFrom: gray -colorTo: red -sdk: gradio -sdk_version: 3.24.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/wanghuoto/gogoai/src/lib/hooks/use-enter-submit.tsx b/spaces/wanghuoto/gogoai/src/lib/hooks/use-enter-submit.tsx deleted file mode 100644 index d66b2d3253baff164235d4ca791aae6d84721835..0000000000000000000000000000000000000000 --- a/spaces/wanghuoto/gogoai/src/lib/hooks/use-enter-submit.tsx +++ /dev/null @@ -1,23 +0,0 @@ -import { useRef, type RefObject } from 'react' - -export function useEnterSubmit(): { - formRef: RefObject - onKeyDown: (event: React.KeyboardEvent) => void -} { - const formRef = useRef(null) - - const handleKeyDown = ( - event: React.KeyboardEvent - ): void => { - if ( - event.key === 'Enter' && - !event.shiftKey && - !event.nativeEvent.isComposing - ) { - formRef.current?.requestSubmit() - event.preventDefault() - } - } - - return { formRef, onKeyDown: handleKeyDown } -} diff --git a/spaces/wzhouxiff/RestoreFormerPlusPlus/README.md b/spaces/wzhouxiff/RestoreFormerPlusPlus/README.md deleted file mode 100644 index ef849d6fc77c51d67a22851684dcd269b7c2bbc3..0000000000000000000000000000000000000000 --- a/spaces/wzhouxiff/RestoreFormerPlusPlus/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: RestoreFormerPlusPlus -emoji: 🏢 -colorFrom: indigo -colorTo: indigo -sdk: gradio -sdk_version: 3.44.2 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/wzq10314/VITS-Umamusume-voice-synthesizer1/hubert_model.py b/spaces/wzq10314/VITS-Umamusume-voice-synthesizer1/hubert_model.py deleted file mode 100644 index 6c7f8716c268d0f371f5a9f7995f59bd4b9082d1..0000000000000000000000000000000000000000 --- a/spaces/wzq10314/VITS-Umamusume-voice-synthesizer1/hubert_model.py +++ /dev/null @@ -1,221 +0,0 @@ -import copy -from typing import Optional, Tuple -import random - -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.nn.modules.utils import consume_prefix_in_state_dict_if_present - -class Hubert(nn.Module): - def __init__(self, num_label_embeddings: int = 100, mask: bool = True): - super().__init__() - self._mask = mask - self.feature_extractor = FeatureExtractor() - self.feature_projection = FeatureProjection() - self.positional_embedding = PositionalConvEmbedding() - self.norm = nn.LayerNorm(768) - self.dropout = nn.Dropout(0.1) - self.encoder = TransformerEncoder( - nn.TransformerEncoderLayer( - 768, 12, 3072, activation="gelu", batch_first=True - ), - 12, - ) - self.proj = nn.Linear(768, 256) - - self.masked_spec_embed = nn.Parameter(torch.FloatTensor(768).uniform_()) - self.label_embedding = nn.Embedding(num_label_embeddings, 256) - - def mask(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: - mask = None - if self.training and self._mask: - mask = _compute_mask((x.size(0), x.size(1)), 0.8, 10, x.device, 2) - x[mask] = self.masked_spec_embed.to(x.dtype) - return x, mask - - def encode( - self, x: torch.Tensor, layer: Optional[int] = None - ) -> Tuple[torch.Tensor, torch.Tensor]: - x = self.feature_extractor(x) - x = self.feature_projection(x.transpose(1, 2)) - x, mask = self.mask(x) - x = x + self.positional_embedding(x) - x = self.dropout(self.norm(x)) - x = self.encoder(x, output_layer=layer) - return x, mask - - def logits(self, x: torch.Tensor) -> torch.Tensor: - logits = torch.cosine_similarity( - x.unsqueeze(2), - self.label_embedding.weight.unsqueeze(0).unsqueeze(0), - dim=-1, - ) - return logits / 0.1 - - def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: - x, mask = self.encode(x) - x = self.proj(x) - logits = self.logits(x) - return logits, mask - - -class HubertSoft(Hubert): - def __init__(self): - super().__init__() - - @torch.inference_mode() - def units(self, wav: torch.Tensor) -> torch.Tensor: - wav = F.pad(wav, ((400 - 320) // 2, (400 - 320) // 2)) - x, _ = self.encode(wav) - return self.proj(x) - - -class FeatureExtractor(nn.Module): - def __init__(self): - super().__init__() - self.conv0 = nn.Conv1d(1, 512, 10, 5, bias=False) - self.norm0 = nn.GroupNorm(512, 512) - self.conv1 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv2 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv3 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv4 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv5 = nn.Conv1d(512, 512, 2, 2, bias=False) - self.conv6 = nn.Conv1d(512, 512, 2, 2, bias=False) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = F.gelu(self.norm0(self.conv0(x))) - x = F.gelu(self.conv1(x)) - x = F.gelu(self.conv2(x)) - x = F.gelu(self.conv3(x)) - x = F.gelu(self.conv4(x)) - x = F.gelu(self.conv5(x)) - x = F.gelu(self.conv6(x)) - return x - - -class FeatureProjection(nn.Module): - def __init__(self): - super().__init__() - self.norm = nn.LayerNorm(512) - self.projection = nn.Linear(512, 768) - self.dropout = nn.Dropout(0.1) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = self.norm(x) - x = self.projection(x) - x = self.dropout(x) - return x - - -class PositionalConvEmbedding(nn.Module): - def __init__(self): - super().__init__() - self.conv = nn.Conv1d( - 768, - 768, - kernel_size=128, - padding=128 // 2, - groups=16, - ) - self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = self.conv(x.transpose(1, 2)) - x = F.gelu(x[:, :, :-1]) - return x.transpose(1, 2) - - -class TransformerEncoder(nn.Module): - def __init__( - self, encoder_layer: nn.TransformerEncoderLayer, num_layers: int - ) -> None: - super(TransformerEncoder, self).__init__() - self.layers = nn.ModuleList( - [copy.deepcopy(encoder_layer) for _ in range(num_layers)] - ) - self.num_layers = num_layers - - def forward( - self, - src: torch.Tensor, - mask: torch.Tensor = None, - src_key_padding_mask: torch.Tensor = None, - output_layer: Optional[int] = None, - ) -> torch.Tensor: - output = src - for layer in self.layers[:output_layer]: - output = layer( - output, src_mask=mask, src_key_padding_mask=src_key_padding_mask - ) - return output - - -def _compute_mask( - shape: Tuple[int, int], - mask_prob: float, - mask_length: int, - device: torch.device, - min_masks: int = 0, -) -> torch.Tensor: - batch_size, sequence_length = shape - - if mask_length < 1: - raise ValueError("`mask_length` has to be bigger than 0.") - - if mask_length > sequence_length: - raise ValueError( - f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and `sequence_length`: {sequence_length}`" - ) - - # compute number of masked spans in batch - num_masked_spans = int(mask_prob * sequence_length / mask_length + random.random()) - num_masked_spans = max(num_masked_spans, min_masks) - - # make sure num masked indices <= sequence_length - if num_masked_spans * mask_length > sequence_length: - num_masked_spans = sequence_length // mask_length - - # SpecAugment mask to fill - mask = torch.zeros((batch_size, sequence_length), device=device, dtype=torch.bool) - - # uniform distribution to sample from, make sure that offset samples are < sequence_length - uniform_dist = torch.ones( - (batch_size, sequence_length - (mask_length - 1)), device=device - ) - - # get random indices to mask - mask_indices = torch.multinomial(uniform_dist, num_masked_spans) - - # expand masked indices to masked spans - mask_indices = ( - mask_indices.unsqueeze(dim=-1) - .expand((batch_size, num_masked_spans, mask_length)) - .reshape(batch_size, num_masked_spans * mask_length) - ) - offsets = ( - torch.arange(mask_length, device=device)[None, None, :] - .expand((batch_size, num_masked_spans, mask_length)) - .reshape(batch_size, num_masked_spans * mask_length) - ) - mask_idxs = mask_indices + offsets - - # scatter indices to mask - mask = mask.scatter(1, mask_idxs, True) - - return mask - - -def hubert_soft( - path: str -) -> HubertSoft: - r"""HuBERT-Soft from `"A Comparison of Discrete and Soft Speech Units for Improved Voice Conversion"`. - Args: - path (str): path of a pretrained model - """ - hubert = HubertSoft() - checkpoint = torch.load(path) - consume_prefix_in_state_dict_if_present(checkpoint, "module.") - hubert.load_state_dict(checkpoint) - hubert.eval() - return hubert diff --git a/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/reid/docs/conf.py b/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/reid/docs/conf.py deleted file mode 100644 index 4d27eedbac1d47dd6d1226b37e846a07364a0f1c..0000000000000000000000000000000000000000 --- a/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/reid/docs/conf.py +++ /dev/null @@ -1,181 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Configuration file for the Sphinx documentation builder. -# -# This file does only contain a selection of the most common options. For a -# full list see the documentation: -# http://www.sphinx-doc.org/en/master/config - -# -- Path setup -------------------------------------------------------------- - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# -import os -import sys - -sys.path.insert(0, os.path.abspath('..')) - -# -- Project information ----------------------------------------------------- - -project = u'torchreid' -copyright = u'2019, Kaiyang Zhou' -author = u'Kaiyang Zhou' - -version_file = '../torchreid/__init__.py' -with open(version_file, 'r') as f: - exec(compile(f.read(), version_file, 'exec')) -__version__ = locals()['__version__'] - -# The short X.Y version -version = __version__ -# The full version, including alpha/beta/rc tags -release = __version__ - -# -- General configuration --------------------------------------------------- - -# If your documentation needs a minimal Sphinx version, state it here. -# -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'sphinx.ext.autodoc', - 'sphinxcontrib.napoleon', - 'sphinx.ext.viewcode', - 'sphinx.ext.githubpages', - 'sphinx_markdown_tables', -] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -# -source_suffix = ['.rst', '.md'] -# source_suffix = '.rst' -source_parsers = {'.md': 'recommonmark.parser.CommonMarkParser'} - -# The master toctree document. -master_doc = 'index' - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = None - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This pattern also affects html_static_path and html_extra_path. -exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store'] - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = None - -# -- Options for HTML output ------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -# -html_theme = 'sphinx_rtd_theme' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# -# html_theme_options = {} - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# Custom sidebar templates, must be a dictionary that maps document names -# to template names. -# -# The default sidebars (for documents that don't match any pattern) are -# defined by theme itself. Builtin themes are using these templates by -# default: ``['localtoc.html', 'relations.html', 'sourcelink.html', -# 'searchbox.html']``. -# -# html_sidebars = {} - -# -- Options for HTMLHelp output --------------------------------------------- - -# Output file base name for HTML help builder. -htmlhelp_basename = 'torchreiddoc' - -# -- Options for LaTeX output ------------------------------------------------ - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # - # 'papersize': 'letterpaper', - - # The font size ('10pt', '11pt' or '12pt'). - # - # 'pointsize': '10pt', - - # Additional stuff for the LaTeX preamble. - # - # 'preamble': '', - - # Latex figure (float) alignment - # - # 'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ( - master_doc, 'torchreid.tex', u'torchreid Documentation', - u'Kaiyang Zhou', 'manual' - ), -] - -# -- Options for manual page output ------------------------------------------ - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'torchreid', u'torchreid Documentation', [author], 1) -] - -# -- Options for Texinfo output ---------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ( - master_doc, 'torchreid', u'torchreid Documentation', author, - 'torchreid', 'One line description of project.', 'Miscellaneous' - ), -] - -# -- Options for Epub output ------------------------------------------------- - -# Bibliographic Dublin Core info. -epub_title = project - -# The unique identifier of the text. This can be a ISBN number -# or the project homepage. -# -# epub_identifier = '' - -# A unique identification for the text. -# -# epub_uid = '' - -# A list of files that should not be packed into the epub file. -epub_exclude_files = ['search.html'] - -# -- Extension configuration ------------------------------------------------- diff --git a/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/reid/torchreid/utils/GPU-Re-Ranking/extension/adjacency_matrix/setup.py b/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/reid/torchreid/utils/GPU-Re-Ranking/extension/adjacency_matrix/setup.py deleted file mode 100644 index abd3e3a4e1fbd1db7b4bdd1d080d48381082c60a..0000000000000000000000000000000000000000 --- a/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/reid/torchreid/utils/GPU-Re-Ranking/extension/adjacency_matrix/setup.py +++ /dev/null @@ -1,36 +0,0 @@ -""" - Understanding Image Retrieval Re-Ranking: A Graph Neural Network Perspective - - Xuanmeng Zhang, Minyue Jiang, Zhedong Zheng, Xiao Tan, Errui Ding, Yi Yang - - Project Page : https://github.com/Xuanmeng-Zhang/gnn-re-ranking - - Paper: https://arxiv.org/abs/2012.07620v2 - - ====================================================================== - - On the Market-1501 dataset, we accelerate the re-ranking processing from 89.2s to 9.4ms - with one K40m GPU, facilitating the real-time post-processing. Similarly, we observe - that our method achieves comparable or even better retrieval results on the other four - image retrieval benchmarks, i.e., VeRi-776, Oxford-5k, Paris-6k and University-1652, - with limited time cost. -""" - -from setuptools import Extension, setup -import torch -import torch.nn as nn -from torch.autograd import Function -from torch.utils.cpp_extension import CUDAExtension, BuildExtension - -setup( - name='build_adjacency_matrix', - ext_modules=[ - CUDAExtension( - 'build_adjacency_matrix', [ - 'build_adjacency_matrix.cpp', - 'build_adjacency_matrix_kernel.cu', - ] - ), - ], - cmdclass={'build_ext': BuildExtension} -) diff --git a/spaces/xl2533/FinDoc/build_index/parser/pdf_parser.py b/spaces/xl2533/FinDoc/build_index/parser/pdf_parser.py deleted file mode 100644 index 44605c01946fe4c071d09e408d6b5bfeab3f146f..0000000000000000000000000000000000000000 --- a/spaces/xl2533/FinDoc/build_index/parser/pdf_parser.py +++ /dev/null @@ -1,27 +0,0 @@ -# -*-coding:utf-8 -*- -import PyPDF2 -from build_index.parser.base import BaseParser - - -class PDFParser(BaseParser): - def header_remove(self): - # 删除研报的页头 - pass - - def footnote_remove(self): - # 删除研报的页脚 - pass - - def parse_file(self, file): - # store pages of - text_list = [] - - with open(file, "rb") as fp: - pdf = PyPDF2.PdfReader(fp) - num_pages = len(pdf.pages) - for page in range(num_pages-1): - page_text = pdf.pages[page].extract_text() - text_list.append(page_text) - text = '\n'.join(text_list) - metadata = {'source': file, 'pages': num_pages} - return text, metadata diff --git a/spaces/xxx1/VQA_CAP_GPT/models/VLE/__init__.py b/spaces/xxx1/VQA_CAP_GPT/models/VLE/__init__.py deleted file mode 100644 index 61ef85599b93b4ea46ec87ab6a0cd733e591c94e..0000000000000000000000000000000000000000 --- a/spaces/xxx1/VQA_CAP_GPT/models/VLE/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -from .modeling_vle import ( - VLEModel, - VLEForVQA, - VLEForITM, - VLEForMLM, - VLEForPBC -) - -from .configuration_vle import VLEConfig -from .processing_vle import VLEProcessor -from .pipeline_vle import VLEForVQAPipeline, VLEForITMPipeline, VLEForPBCPipeline diff --git a/spaces/yangheng/Super-Resolution-Anime-Diffusion/RealESRGANv030/scripts/generate_meta_info.py b/spaces/yangheng/Super-Resolution-Anime-Diffusion/RealESRGANv030/scripts/generate_meta_info.py deleted file mode 100644 index 081cd085b917b114a97673d3ee900bf578104e28..0000000000000000000000000000000000000000 --- a/spaces/yangheng/Super-Resolution-Anime-Diffusion/RealESRGANv030/scripts/generate_meta_info.py +++ /dev/null @@ -1,65 +0,0 @@ -import argparse -import cv2 -import glob -import os - - -def main(args): - txt_file = open(args.meta_info, "w") - for folder, root in zip(args.input, args.root): - img_paths = sorted(glob.glob(os.path.join(folder, "*"))) - for img_path in img_paths: - status = True - if args.check: - # read the image once for check, as some images may have errors - try: - img = cv2.imread(img_path) - except (IOError, OSError) as error: - print(f"Read {img_path} error: {error}") - status = False - if img is None: - status = False - print(f"Img is None: {img_path}") - if status: - # get the relative path - img_name = os.path.relpath(img_path, root) - print(img_name) - txt_file.write(f"{img_name}\n") - - -if __name__ == "__main__": - """Generate meta info (txt file) for only Ground-Truth images. - - It can also generate meta info from several folders into one txt file. - """ - parser = argparse.ArgumentParser() - parser.add_argument( - "--input", - nargs="+", - default=["datasets/DF2K/DF2K_HR", "datasets/DF2K/DF2K_multiscale"], - help="Input folder, can be a list", - ) - parser.add_argument( - "--root", - nargs="+", - default=["datasets/DF2K", "datasets/DF2K"], - help="Folder root, should have the length as input folders", - ) - parser.add_argument( - "--meta_info", - type=str, - default="datasets/DF2K/meta_info/meta_info_DF2Kmultiscale.txt", - help="txt path for meta info", - ) - parser.add_argument( - "--check", action="store_true", help="Read image to check whether it is ok" - ) - args = parser.parse_args() - - assert len(args.input) == len(args.root), ( - "Input folder and folder root should have the same length, but got " - f"{len(args.input)} and {len(args.root)}." - ) - os.makedirs(os.path.dirname(args.meta_info), exist_ok=True) - - main(args) diff --git a/spaces/yangheng/Waifu2X-Image-Scale/app.py b/spaces/yangheng/Waifu2X-Image-Scale/app.py deleted file mode 100644 index e44f2dc5c397b3860d83972ef1c13523f83ac9f5..0000000000000000000000000000000000000000 --- a/spaces/yangheng/Waifu2X-Image-Scale/app.py +++ /dev/null @@ -1,69 +0,0 @@ -import os - - -import autocuda -from pyabsa.utils.pyabsa_utils import fprint - -import gradio as gr -import torch -import time -from Waifu2x.magnify import ImageMagnifier - -magnifier = ImageMagnifier() - -start_time = time.time() - -CUDA_VISIBLE_DEVICES = '' -device = autocuda.auto_cuda() - -dtype = torch.float16 if device != 'cpu' else torch.float32 - -def magnify_image(image, scale_factor=2): - start_time = time.time() - try: - if image.size[0] > 500 or image.size[1] > 500: - message = 'Failed! Image too large, please resize to <1000x1000 or clone the repo and code to allow larger images on your local machine.' - else: - image = magnifier.magnify(image, scale_factor=scale_factor) - fprint(f'Inference time: {time.time() - start_time:.2f}s') - message = f'Success! Processed image with scale factor {scale_factor}...' - except Exception as e: - message = f'Error: {e}' - return image, message - -with gr.Blocks() as demo: - if not os.path.exists('imgs'): - os.mkdir('imgs') - - gr.Markdown('# Free Anime Image Scale Up Demo (CPU)') - gr.Markdown('## 免费动漫插图图片分辨率放大 (最大支持500x500,更大尺寸请clone repo本地运行)') - gr.Markdown('## Powered by Waifu2x') - gr.Markdown("## Author: [yangheng95](https://github.com/yangheng95) Github:[Github](https://github.com/yangheng95/SuperResolutionAnimeDiffusion)") - - with gr.Row(): - with gr.Column(scale=40): - with gr.Group(): - image_in = gr.Image(label="Image", height=512, tool="editor", type="pil") - - with gr.Row(): - scale_factor = gr.Slider(1, 8, label='Scale factor (to magnify image) (1, 2, 4, 8)', - value=2, - step=1) - message = gr.TextArea(label='message', lines=1, default='') - with gr.Row(): - generate = gr.Button(value="Magnify", label="Magnify") - - error_output = gr.Markdown() - - with gr.Column(scale=60): - gr.Markdown('## Click the right button to save the magnified image') - gr.Markdown('## 右键点击图片保存放大后的图片') - with gr.Group(): - image_out = gr.Image(height=512) - inputs = [image_in, scale_factor] - outputs = [image_out, message] - generate.click(magnify_image, inputs=inputs, outputs=outputs, api_name="magnify_image") - -print(f"Space built in {time.time() - start_time:.2f} seconds") - -demo.launch(share=False) diff --git a/spaces/ybelkada/interfacegan_pp/models/model_settings.py b/spaces/ybelkada/interfacegan_pp/models/model_settings.py deleted file mode 100644 index 65554100918908e5735e8a757439fb77ecc38e0e..0000000000000000000000000000000000000000 --- a/spaces/ybelkada/interfacegan_pp/models/model_settings.py +++ /dev/null @@ -1,102 +0,0 @@ -# python3.7 -"""Contains basic configurations for models used in this project. - -Please download the public released models from the following two repositories -OR train your own models, and then put them into `pretrain` folder. - -ProgressiveGAN: https://github.com/tkarras/progressive_growing_of_gans -StyleGAN: https://github.com/NVlabs/stylegan -StyleGAN: - -NOTE: Any new model should be registered in `MODEL_POOL` before using. -""" - -import os.path - -BASE_DIR = os.path.dirname(os.path.relpath(__file__)) - -MODEL_DIR = BASE_DIR + '/pretrain' - -MODEL_POOL = { - 'pggan_celebahq': { - 'tf_model_path': MODEL_DIR + '/karras2018iclr-celebahq-1024x1024.pkl', - 'model_path': MODEL_DIR + '/pggan_celebahq.pth', - 'gan_type': 'pggan', - 'dataset_name': 'celebahq', - 'latent_space_dim': 512, - 'resolution': 1024, - 'min_val': -1.0, - 'max_val': 1.0, - 'output_channels': 3, - 'channel_order': 'RGB', - 'fused_scale': False, - }, - 'stylegan_celebahq': { - 'tf_model_path': - MODEL_DIR + '/karras2019stylegan-celebahq-1024x1024.pkl', - 'model_path': MODEL_DIR + '/stylegan_celebahq.pth', - 'gan_type': 'stylegan', - 'dataset_name': 'celebahq', - 'latent_space_dim': 512, - 'w_space_dim': 512, - 'resolution': 1024, - 'min_val': -1.0, - 'max_val': 1.0, - 'output_channels': 3, - 'channel_order': 'RGB', - 'fused_scale': 'auto', - }, - 'stylegan_ffhq': { - 'tf_model_path': MODEL_DIR + '/karras2019stylegan-ffhq-1024x1024.pkl', - 'model_path': MODEL_DIR + '/stylegan_ffhq.pth', - 'gan_type': 'stylegan', - 'dataset_name': 'ffhq', - 'latent_space_dim': 512, - 'w_space_dim': 512, - 'resolution': 1024, - 'min_val': -1.0, - 'max_val': 1.0, - 'output_channels': 3, - 'channel_order': 'RGB', - 'fused_scale': 'auto', - }, - 'stylegan2_ffhq': { - 'tf_model_path': MODEL_DIR + '/karras2019stylegan-ffhq-1024x1024.pkl', - 'model_path': MODEL_DIR + '/stylegan2-ffhq-1024x1024.pkl', - 'gan_type': 'stylegan2', - 'dataset_name': 'ffhq', - 'latent_space_dim': 512, - 'w_space_dim': 512, - 'c_space_dim': 512, - 'resolution': 1024, - 'min_val': -1.0, - 'max_val': 1.0, - 'output_channels': 3, - 'channel_order': 'RGB', - 'fused_scale': 'auto', - }, - 'stylegan3_ffhq': { - 'model_path': MODEL_DIR + '/stylegan3-t-ffhq-1024x1024.pkl', - 'gan_type': 'stylegan3', - 'dataset_name': 'ffhq', - 'latent_space_dim': 512, - 'w_space_dim': 512, - 'c_space_dim': 512, - 'resolution': 1024, - 'min_val': -1.0, - 'max_val': 1.0, - 'output_channels': 3, - 'channel_order': 'RGB', - 'fused_scale': 'auto', - }, -} - -# Settings for StyleGAN. -STYLEGAN_TRUNCATION_PSI = 0.7 # 1.0 means no truncation -STYLEGAN_TRUNCATION_LAYERS = 8 # 0 means no truncation -STYLEGAN_RANDOMIZE_NOISE = False - -# Settings for model running. -USE_CUDA = False - -MAX_IMAGES_ON_DEVICE = 8 diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/dpt/image_processing_dpt.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/dpt/image_processing_dpt.py deleted file mode 100644 index 93374dbd92596ef3bae4ba9cd474ca7629536792..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/dpt/image_processing_dpt.py +++ /dev/null @@ -1,387 +0,0 @@ -# coding=utf-8 -# Copyright 2022 The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Image processor class for DPT.""" - -import math -from typing import Dict, Iterable, List, Optional, Tuple, Union - -import numpy as np - -from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict -from ...image_transforms import resize, to_channel_dimension_format -from ...image_utils import ( - IMAGENET_STANDARD_MEAN, - IMAGENET_STANDARD_STD, - ChannelDimension, - ImageInput, - PILImageResampling, - get_image_size, - infer_channel_dimension_format, - is_scaled_image, - is_torch_available, - is_torch_tensor, - make_list_of_images, - to_numpy_array, - valid_images, -) -from ...utils import TensorType, is_vision_available, logging - - -if is_torch_available(): - import torch - -if is_vision_available(): - import PIL - - -logger = logging.get_logger(__name__) - - -def get_resize_output_image_size( - input_image: np.ndarray, - output_size: Union[int, Iterable[int]], - keep_aspect_ratio: bool, - multiple: int, - input_data_format: Optional[Union[str, ChannelDimension]] = None, -) -> Tuple[int, int]: - def constraint_to_multiple_of(val, multiple, min_val=0, max_val=None): - x = round(val / multiple) * multiple - - if max_val is not None and x > max_val: - x = math.floor(val / multiple) * multiple - - if x < min_val: - x = math.ceil(val / multiple) * multiple - - return x - - output_size = (output_size, output_size) if isinstance(output_size, int) else output_size - - input_height, input_width = get_image_size(input_image, input_data_format) - output_height, output_width = output_size - - # determine new height and width - scale_height = output_height / input_height - scale_width = output_width / input_width - - if keep_aspect_ratio: - # scale as little as possible - if abs(1 - scale_width) < abs(1 - scale_height): - # fit width - scale_height = scale_width - else: - # fit height - scale_width = scale_height - - new_height = constraint_to_multiple_of(scale_height * input_height, multiple=multiple) - new_width = constraint_to_multiple_of(scale_width * input_width, multiple=multiple) - - return (new_height, new_width) - - -class DPTImageProcessor(BaseImageProcessor): - r""" - Constructs a DPT image processor. - - Args: - do_resize (`bool`, *optional*, defaults to `True`): - Whether to resize the image's (height, width) dimensions. Can be overidden by `do_resize` in `preprocess`. - size (`Dict[str, int]` *optional*, defaults to `{"height": 384, "width": 384}`): - Size of the image after resizing. Can be overidden by `size` in `preprocess`. - resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): - Defines the resampling filter to use if resizing the image. Can be overidden by `resample` in `preprocess`. - keep_aspect_ratio (`bool`, *optional*, defaults to `False`): - If `True`, the image is resized to the largest possible size such that the aspect ratio is preserved. Can - be overidden by `keep_aspect_ratio` in `preprocess`. - ensure_multiple_of (`int`, *optional*, defaults to 1): - If `do_resize` is `True`, the image is resized to a size that is a multiple of this value. Can be overidden - by `ensure_multiple_of` in `preprocess`. - do_rescale (`bool`, *optional*, defaults to `True`): - Whether to rescale the image by the specified scale `rescale_factor`. Can be overidden by `do_rescale` in - `preprocess`. - rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): - Scale factor to use if rescaling the image. Can be overidden by `rescale_factor` in `preprocess`. - do_normalize (`bool`, *optional*, defaults to `True`): - Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` - method. - image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): - Mean to use if normalizing the image. This is a float or list of floats the length of the number of - channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. - image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): - Standard deviation to use if normalizing the image. This is a float or list of floats the length of the - number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. - """ - - model_input_names = ["pixel_values"] - - def __init__( - self, - do_resize: bool = True, - size: Dict[str, int] = None, - resample: PILImageResampling = PILImageResampling.BILINEAR, - keep_aspect_ratio: bool = False, - ensure_multiple_of: int = 1, - do_rescale: bool = True, - rescale_factor: Union[int, float] = 1 / 255, - do_normalize: bool = True, - image_mean: Optional[Union[float, List[float]]] = None, - image_std: Optional[Union[float, List[float]]] = None, - **kwargs, - ) -> None: - super().__init__(**kwargs) - size = size if size is not None else {"height": 384, "width": 384} - size = get_size_dict(size) - self.do_resize = do_resize - self.size = size - self.keep_aspect_ratio = keep_aspect_ratio - self.ensure_multiple_of = ensure_multiple_of - self.resample = resample - self.do_rescale = do_rescale - self.rescale_factor = rescale_factor - self.do_normalize = do_normalize - self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN - self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD - - def resize( - self, - image: np.ndarray, - size: Dict[str, int], - keep_aspect_ratio: bool = False, - ensure_multiple_of: int = 1, - resample: PILImageResampling = PILImageResampling.BICUBIC, - data_format: Optional[Union[str, ChannelDimension]] = None, - input_data_format: Optional[Union[str, ChannelDimension]] = None, - **kwargs, - ) -> np.ndarray: - """ - Resize an image to target size `(size["height"], size["width"])`. If `keep_aspect_ratio` is `True`, the image - is resized to the largest possible size such that the aspect ratio is preserved. If `ensure_multiple_of` is - set, the image is resized to a size that is a multiple of this value. - - Args: - image (`np.ndarray`): - Image to resize. - size (`Dict[str, int]`): - Target size of the output image. - keep_aspect_ratio (`bool`, *optional*, defaults to `False`): - If `True`, the image is resized to the largest possible size such that the aspect ratio is preserved. - ensure_multiple_of (`int`, *optional*, defaults to 1): - The image is resized to a size that is a multiple of this value. - resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): - Defines the resampling filter to use if resizing the image. Otherwise, the image is resized to size - specified in `size`. - resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): - Resampling filter to use when resiizing the image. - data_format (`str` or `ChannelDimension`, *optional*): - The channel dimension format of the image. If not provided, it will be the same as the input image. - input_data_format (`str` or `ChannelDimension`, *optional*): - The channel dimension format of the input image. If not provided, it will be inferred. - """ - size = get_size_dict(size) - if "height" not in size or "width" not in size: - raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}") - output_size = get_resize_output_image_size( - image, - output_size=(size["height"], size["width"]), - keep_aspect_ratio=keep_aspect_ratio, - multiple=ensure_multiple_of, - input_data_format=input_data_format, - ) - return resize( - image, - size=output_size, - resample=resample, - data_format=data_format, - input_data_format=input_data_format, - **kwargs, - ) - - def preprocess( - self, - images: ImageInput, - do_resize: bool = None, - size: int = None, - keep_aspect_ratio: bool = None, - ensure_multiple_of: int = None, - resample: PILImageResampling = None, - do_rescale: bool = None, - rescale_factor: float = None, - do_normalize: bool = None, - image_mean: Optional[Union[float, List[float]]] = None, - image_std: Optional[Union[float, List[float]]] = None, - return_tensors: Optional[Union[str, TensorType]] = None, - data_format: ChannelDimension = ChannelDimension.FIRST, - input_data_format: Optional[Union[str, ChannelDimension]] = None, - **kwargs, - ) -> PIL.Image.Image: - """ - Preprocess an image or batch of images. - - Args: - images (`ImageInput`): - Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If - passing in images with pixel values between 0 and 1, set `do_rescale=False`. - do_resize (`bool`, *optional*, defaults to `self.do_resize`): - Whether to resize the image. - size (`Dict[str, int]`, *optional*, defaults to `self.size`): - Size of the image after reszing. If `keep_aspect_ratio` is `True`, the image is resized to the largest - possible size such that the aspect ratio is preserved. If `ensure_multiple_of` is set, the image is - resized to a size that is a multiple of this value. - keep_aspect_ratio (`bool`, *optional*, defaults to `self.keep_aspect_ratio`): - Whether to keep the aspect ratio of the image. If False, the image will be resized to (size, size). If - True, the image will be resized to keep the aspect ratio and the size will be the maximum possible. - ensure_multiple_of (`int`, *optional*, defaults to `self.ensure_multiple_of`): - Ensure that the image size is a multiple of this value. - resample (`int`, *optional*, defaults to `self.resample`): - Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only - has an effect if `do_resize` is set to `True`. - do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): - Whether to rescale the image values between [0 - 1]. - rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): - Rescale factor to rescale the image by if `do_rescale` is set to `True`. - do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): - Whether to normalize the image. - image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): - Image mean. - image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): - Image standard deviation. - return_tensors (`str` or `TensorType`, *optional*): - The type of tensors to return. Can be one of: - - Unset: Return a list of `np.ndarray`. - - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. - data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): - The channel dimension format for the output image. Can be one of: - - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - - `ChannelDimension.LAST`: image in (height, width, num_channels) format. - input_data_format (`ChannelDimension` or `str`, *optional*): - The channel dimension format for the input image. If unset, the channel dimension format is inferred - from the input image. Can be one of: - - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. - """ - do_resize = do_resize if do_resize is not None else self.do_resize - size = size if size is not None else self.size - size = get_size_dict(size) - keep_aspect_ratio = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio - ensure_multiple_of = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of - resample = resample if resample is not None else self.resample - do_rescale = do_rescale if do_rescale is not None else self.do_rescale - rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor - do_normalize = do_normalize if do_normalize is not None else self.do_normalize - image_mean = image_mean if image_mean is not None else self.image_mean - image_std = image_std if image_std is not None else self.image_std - - images = make_list_of_images(images) - - if not valid_images(images): - raise ValueError( - "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " - "torch.Tensor, tf.Tensor or jax.ndarray." - ) - - if do_resize and size is None or resample is None: - raise ValueError("Size and resample must be specified if do_resize is True.") - - if do_rescale and rescale_factor is None: - raise ValueError("Rescale factor must be specified if do_rescale is True.") - - if do_normalize and (image_mean is None or image_std is None): - raise ValueError("Image mean and std must be specified if do_normalize is True.") - - # All transformations expect numpy arrays. - images = [to_numpy_array(image) for image in images] - - if is_scaled_image(images[0]) and do_rescale: - logger.warning_once( - "It looks like you are trying to rescale already rescaled images. If the input" - " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." - ) - - if input_data_format is None: - # We assume that all images have the same channel dimension format. - input_data_format = infer_channel_dimension_format(images[0]) - - if do_resize: - images = [ - self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) - for image in images - ] - - if do_rescale: - images = [ - self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) - for image in images - ] - - if do_normalize: - images = [ - self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) - for image in images - ] - - images = [ - to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images - ] - - data = {"pixel_values": images} - return BatchFeature(data=data, tensor_type=return_tensors) - - # Copied from transformers.models.beit.image_processing_beit.BeitImageProcessor.post_process_semantic_segmentation with Beit->DPT - def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple] = None): - """ - Converts the output of [`DPTForSemanticSegmentation`] into semantic segmentation maps. Only supports PyTorch. - - Args: - outputs ([`DPTForSemanticSegmentation`]): - Raw outputs of the model. - target_sizes (`List[Tuple]` of length `batch_size`, *optional*): - List of tuples corresponding to the requested final size (height, width) of each prediction. If unset, - predictions will not be resized. - - Returns: - semantic_segmentation: `List[torch.Tensor]` of length `batch_size`, where each item is a semantic - segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is - specified). Each entry of each `torch.Tensor` correspond to a semantic class id. - """ - # TODO: add support for other frameworks - logits = outputs.logits - - # Resize logits and compute semantic segmentation maps - if target_sizes is not None: - if len(logits) != len(target_sizes): - raise ValueError( - "Make sure that you pass in as many target sizes as the batch dimension of the logits" - ) - - if is_torch_tensor(target_sizes): - target_sizes = target_sizes.numpy() - - semantic_segmentation = [] - - for idx in range(len(logits)): - resized_logits = torch.nn.functional.interpolate( - logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False - ) - semantic_map = resized_logits[0].argmax(dim=0) - semantic_segmentation.append(semantic_map) - else: - semantic_segmentation = logits.argmax(dim=1) - semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] - - return semantic_segmentation diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/gpt_sw3/__init__.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/gpt_sw3/__init__.py deleted file mode 100644 index e7c08f0e27e747ea5468e0f9f014df4225dbd424..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/gpt_sw3/__init__.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import TYPE_CHECKING - -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available - - -_import_structure = {} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_gpt_sw3"] = ["GPTSw3Tokenizer"] - - -if TYPE_CHECKING: - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_gpt_sw3 import GPTSw3Tokenizer - -else: - import sys - - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/pix2struct/convert_pix2struct_original_pytorch_to_hf.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/pix2struct/convert_pix2struct_original_pytorch_to_hf.py deleted file mode 100644 index 457c2236694ad1367fada658a10905400e537da1..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/pix2struct/convert_pix2struct_original_pytorch_to_hf.py +++ /dev/null @@ -1,155 +0,0 @@ -# coding=utf-8 -# Copyright 2023 The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import argparse -import os -import re - -import torch -from flax.traverse_util import flatten_dict -from t5x import checkpoints - -from transformers import ( - AutoTokenizer, - Pix2StructConfig, - Pix2StructForConditionalGeneration, - Pix2StructImageProcessor, - Pix2StructProcessor, - Pix2StructTextConfig, - Pix2StructVisionConfig, -) - - -def get_flax_param(t5x_checkpoint_path): - flax_params = checkpoints.load_t5x_checkpoint(t5x_checkpoint_path) - flax_params = flatten_dict(flax_params) - return flax_params - - -def rename_and_convert_flax_params(flax_dict): - converted_dict = {} - - CONVERSION_MAPPING = { - "token_embedder": "embeddings", - "encoder_norm": "layernorm", - "kernel": "weight", - ".out": ".output", - "scale": "weight", - "embedders_0.pos_embedding": "row_embedder.weight", - "embedders_1.pos_embedding": "column_embedder.weight", - } - - DECODER_CONVERSION_MAPPING = { - "query": "attention.query", - "key": "attention.key", - "value": "attention.value", - "output.dense": "output", - "encoder_decoder_attention.o": "encoder_decoder_attention.attention.o", - "pre_self_attention_layer_norm": "self_attention.layer_norm", - "pre_cross_attention_layer_norm": "encoder_decoder_attention.layer_norm", - "mlp.": "mlp.DenseReluDense.", - "pre_mlp_layer_norm": "mlp.layer_norm", - "self_attention.o": "self_attention.attention.o", - "decoder.embeddings.embedding": "decoder.embed_tokens.weight", - "decoder.relpos_bias.rel_embedding": "decoder.layer.0.self_attention.attention.relative_attention_bias.weight", - "decoder.decoder_norm.weight": "decoder.final_layer_norm.weight", - "decoder.logits_dense.weight": "decoder.lm_head.weight", - } - - for key in flax_dict.keys(): - if "target" in key: - # remove the first prefix from the key - new_key = ".".join(key[1:]) - - # rename the key - for old, new in CONVERSION_MAPPING.items(): - new_key = new_key.replace(old, new) - - if "decoder" in new_key: - for old, new in DECODER_CONVERSION_MAPPING.items(): - new_key = new_key.replace(old, new) - - if "layers" in new_key and "decoder" not in new_key: - # use regex to replace the layer number - new_key = re.sub(r"layers_(\d+)", r"layer.\1", new_key) - new_key = new_key.replace("encoder", "encoder.encoder") - - elif "layers" in new_key and "decoder" in new_key: - # use regex to replace the layer number - new_key = re.sub(r"layers_(\d+)", r"layer.\1", new_key) - - converted_dict[new_key] = flax_dict[key] - - converted_torch_dict = {} - # convert converted_dict into torch format - for key in converted_dict.keys(): - if ("embed_tokens" not in key) and ("embedder" not in key): - converted_torch_dict[key] = torch.from_numpy(converted_dict[key].T) - else: - converted_torch_dict[key] = torch.from_numpy(converted_dict[key]) - - return converted_torch_dict - - -def convert_pix2struct_original_pytorch_checkpoint_to_hf( - t5x_checkpoint_path, pytorch_dump_folder_path, use_large=False, is_vqa=False -): - flax_params = get_flax_param(t5x_checkpoint_path) - - if not use_large: - encoder_config = Pix2StructVisionConfig() - decoder_config = Pix2StructTextConfig() - else: - encoder_config = Pix2StructVisionConfig( - hidden_size=1536, d_ff=3968, num_attention_heads=24, num_hidden_layers=18 - ) - decoder_config = Pix2StructTextConfig(hidden_size=1536, d_ff=3968, num_heads=24, num_layers=18) - config = Pix2StructConfig( - vision_config=encoder_config.to_dict(), text_config=decoder_config.to_dict(), is_vqa=is_vqa - ) - - model = Pix2StructForConditionalGeneration(config) - - torch_params = rename_and_convert_flax_params(flax_params) - model.load_state_dict(torch_params) - - tok = AutoTokenizer.from_pretrained("ybelkada/test-pix2struct-tokenizer") - image_processor = Pix2StructImageProcessor() - processor = Pix2StructProcessor(image_processor=image_processor, tokenizer=tok) - - if use_large: - processor.image_processor.max_patches = 4096 - - processor.image_processor.is_vqa = True - - # mkdir if needed - os.makedirs(pytorch_dump_folder_path, exist_ok=True) - - model.save_pretrained(pytorch_dump_folder_path) - processor.save_pretrained(pytorch_dump_folder_path) - - print("Model saved in {}".format(pytorch_dump_folder_path)) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.") - parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") - parser.add_argument("--use_large", action="store_true", help="Use large model.") - parser.add_argument("--is_vqa", action="store_true", help="Use large model.") - args = parser.parse_args() - - convert_pix2struct_original_pytorch_checkpoint_to_hf( - args.t5x_checkpoint_path, args.pytorch_dump_folder_path, args.use_large - ) diff --git a/spaces/ykilcher/apes/interface.py b/spaces/ykilcher/apes/interface.py deleted file mode 100644 index a131cdc7c200007d257a58ce0f1114da7a650c68..0000000000000000000000000000000000000000 --- a/spaces/ykilcher/apes/interface.py +++ /dev/null @@ -1,70 +0,0 @@ -#!/usr/bin/env python3 - -import gradio as gr - -import numpy as np -import torch -import pickle -import types - -from huggingface_hub import hf_hub_url, cached_download - -# with open('../models/gamma500/network-snapshot-010000.pkl', 'rb') as f: -with open(cached_download(hf_hub_url('ykilcher/apes', 'gamma500/network-snapshot-010000.pkl')), 'rb') as f: - G = pickle.load(f)['G_ema']# torch.nn.Module - -device = torch.device("cpu") -if torch.cuda.is_available(): - device = torch.device("cuda") - G = G.to(device) -else: - _old_forward = G.forward - - def _new_forward(self, *args, **kwargs): - kwargs["force_fp32"] = True - return _old_forward(*args, **kwargs) - - G.forward = types.MethodType(_new_forward, G) - - _old_synthesis_forward = G.synthesis.forward - - def _new_synthesis_forward(self, *args, **kwargs): - kwargs["force_fp32"] = True - return _old_synthesis_forward(*args, **kwargs) - - G.synthesis.forward = types.MethodType(_new_synthesis_forward, G.synthesis) - - -def generate(num_images, interpolate): - if interpolate: - z1 = torch.randn([1, G.z_dim])# latent codes - z2 = torch.randn([1, G.z_dim])# latent codes - zs = torch.cat([z1 + (z2 - z1) * i / (num_images-1) for i in range(num_images)], 0) - else: - zs = torch.randn([num_images, G.z_dim])# latent codes - with torch.no_grad(): - zs = zs.to(device) - img = G(zs, None, force_fp32=True, noise_mode='const') - img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8) - return img.cpu().numpy() - -def greet(num_images, interpolate): - img = generate(round(num_images), interpolate) - imgs = list(img) - if len(imgs) == 1: - return imgs[0] - grid_len = int(np.ceil(np.sqrt(len(imgs)))) * 2 - grid_height = int(np.ceil(len(imgs) / grid_len)) - grid = np.zeros((grid_height * imgs[0].shape[0], grid_len * imgs[0].shape[1], 3), dtype=np.uint8) - for i, img in enumerate(imgs): - y = (i // grid_len) * img.shape[0] - x = (i % grid_len) * img.shape[1] - grid[y:y+img.shape[0], x:x+img.shape[1], :] = img - return grid - - -iface = gr.Interface(fn=greet, inputs=[ - gr.inputs.Slider(default=1, label="Num Images", minimum=1, maximum=9, step=1), - gr.inputs.Checkbox(default=False, label="Interpolate") - ], outputs="image") -iface.launch() diff --git a/spaces/yl12053/so-vits-4.1-Grass-Wonder/resample.py b/spaces/yl12053/so-vits-4.1-Grass-Wonder/resample.py deleted file mode 100644 index 301e2924ec588ec61a55a2a6f2b2f68726dfda5b..0000000000000000000000000000000000000000 --- a/spaces/yl12053/so-vits-4.1-Grass-Wonder/resample.py +++ /dev/null @@ -1,49 +0,0 @@ -import os -import argparse -import librosa -import numpy as np -from multiprocessing import Pool, cpu_count -from scipy.io import wavfile -from tqdm import tqdm - - -def process(item): - spkdir, wav_name, args = item - # speaker 's5', 'p280', 'p315' are excluded, - speaker = spkdir.replace("\\", "/").split("/")[-1] - wav_path = os.path.join(args.in_dir, speaker, wav_name) - if os.path.exists(wav_path) and '.wav' in wav_path: - os.makedirs(os.path.join(args.out_dir2, speaker), exist_ok=True) - wav, sr = librosa.load(wav_path, sr=None) - wav, _ = librosa.effects.trim(wav, top_db=40) - peak = np.abs(wav).max() - if peak > 1.0: - wav = 0.98 * wav / peak - wav2 = librosa.resample(wav, orig_sr=sr, target_sr=args.sr2) - if not args.skip_loudnorm: - wav2 /= max(wav2.max(), -wav2.min()) - save_name = wav_name - save_path2 = os.path.join(args.out_dir2, speaker, save_name) - wavfile.write( - save_path2, - args.sr2, - (wav2 * np.iinfo(np.int16).max).astype(np.int16) - ) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--sr2", type=int, default=44100, help="sampling rate") - parser.add_argument("--in_dir", type=str, default="./dataset_raw", help="path to source dir") - parser.add_argument("--out_dir2", type=str, default="./dataset/44k", help="path to target dir") - parser.add_argument("--skip_loudnorm", action="store_true", help="Skip loudness matching if you have done it") - args = parser.parse_args() - processs = 30 if cpu_count() > 60 else (cpu_count()-2 if cpu_count() > 4 else 1) - pool = Pool(processes=processs) - - for speaker in os.listdir(args.in_dir): - spk_dir = os.path.join(args.in_dir, speaker) - if os.path.isdir(spk_dir): - print(spk_dir) - for _ in tqdm(pool.imap_unordered(process, [(spk_dir, i, args) for i in os.listdir(spk_dir) if i.endswith("wav")])): - pass diff --git a/spaces/yl12053/so-vits-4.1-Kitasan-Black/cluster/train_cluster.py b/spaces/yl12053/so-vits-4.1-Kitasan-Black/cluster/train_cluster.py deleted file mode 100644 index 8644566388a4107c4442da14c0de090bcd4a91b8..0000000000000000000000000000000000000000 --- a/spaces/yl12053/so-vits-4.1-Kitasan-Black/cluster/train_cluster.py +++ /dev/null @@ -1,84 +0,0 @@ -import time,pdb -import tqdm -from time import time as ttime -import os -from pathlib import Path -import logging -import argparse -from kmeans import KMeansGPU -import torch -import numpy as np -from sklearn.cluster import KMeans,MiniBatchKMeans - -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) -from time import time as ttime -import pynvml,torch - -def train_cluster(in_dir, n_clusters, use_minibatch=True, verbose=False,use_gpu=False):#gpu_minibatch真拉,虽然库支持但是也不考虑 - logger.info(f"Loading features from {in_dir}") - features = [] - nums = 0 - for path in tqdm.tqdm(in_dir.glob("*.soft.pt")): - # for name in os.listdir(in_dir): - # path="%s/%s"%(in_dir,name) - features.append(torch.load(path,map_location="cpu").squeeze(0).numpy().T) - # print(features[-1].shape) - features = np.concatenate(features, axis=0) - print(nums, features.nbytes/ 1024**2, "MB , shape:",features.shape, features.dtype) - features = features.astype(np.float32) - logger.info(f"Clustering features of shape: {features.shape}") - t = time.time() - if(use_gpu==False): - if use_minibatch: - kmeans = MiniBatchKMeans(n_clusters=n_clusters,verbose=verbose, batch_size=4096, max_iter=80).fit(features) - else: - kmeans = KMeans(n_clusters=n_clusters,verbose=verbose).fit(features) - else: - kmeans = KMeansGPU(n_clusters=n_clusters, mode='euclidean', verbose=2 if verbose else 0,max_iter=500,tol=1e-2)# - features=torch.from_numpy(features)#.to(device) - labels = kmeans.fit_predict(features)# - - print(time.time()-t, "s") - - x = { - "n_features_in_": kmeans.n_features_in_ if use_gpu==False else features.shape[1], - "_n_threads": kmeans._n_threads if use_gpu==False else 4, - "cluster_centers_": kmeans.cluster_centers_ if use_gpu==False else kmeans.centroids.cpu().numpy(), - } - print("end") - - return x - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument('--dataset', type=Path, default="./dataset/44k", - help='path of training data directory') - parser.add_argument('--output', type=Path, default="logs/44k", - help='path of model output directory') - parser.add_argument('--gpu',action='store_true', default=False , - help='to use GPU') - - - args = parser.parse_args() - - checkpoint_dir = args.output - dataset = args.dataset - use_gpu = args.gpu - n_clusters = 10000 - - ckpt = {} - for spk in os.listdir(dataset): - if os.path.isdir(dataset/spk): - print(f"train kmeans for {spk}...") - in_dir = dataset/spk - x = train_cluster(in_dir, n_clusters,use_minibatch=False,verbose=False,use_gpu=use_gpu) - ckpt[spk] = x - - checkpoint_path = checkpoint_dir / f"kmeans_{n_clusters}.pt" - checkpoint_path.parent.mkdir(exist_ok=True, parents=True) - torch.save( - ckpt, - checkpoint_path, - ) - diff --git a/spaces/ynhe/AskAnything/models/grit_src/grit/modeling/meta_arch/grit.py b/spaces/ynhe/AskAnything/models/grit_src/grit/modeling/meta_arch/grit.py deleted file mode 100644 index 101725fd455e723360eaafc26db37beb226a9233..0000000000000000000000000000000000000000 --- a/spaces/ynhe/AskAnything/models/grit_src/grit/modeling/meta_arch/grit.py +++ /dev/null @@ -1,66 +0,0 @@ -from typing import Dict, List, Optional, Tuple -import torch -from detectron2.config import configurable -from detectron2.structures import ImageList, Instances, Boxes -from detectron2.modeling.meta_arch.build import META_ARCH_REGISTRY -from detectron2.modeling.meta_arch.rcnn import GeneralizedRCNN - - -@META_ARCH_REGISTRY.register() -class GRiT(GeneralizedRCNN): - @configurable - def __init__( - self, - **kwargs): - super().__init__(**kwargs) - assert self.proposal_generator is not None - - @classmethod - def from_config(cls, cfg): - ret = super().from_config(cfg) - return ret - - def inference( - self, - batched_inputs: Tuple[Dict[str, torch.Tensor]], - detected_instances: Optional[List[Instances]] = None, - do_postprocess: bool = True, - ): - assert not self.training - assert detected_instances is None - - images = self.preprocess_image(batched_inputs) - features = self.backbone(images.tensor) - proposals, _ = self.proposal_generator(images, features, None) - results, _ = self.roi_heads(features, proposals) - if do_postprocess: - assert not torch.jit.is_scripting(), \ - "Scripting is not supported for postprocess." - return GRiT._postprocess( - results, batched_inputs, images.image_sizes) - else: - return results - - def forward(self, batched_inputs: List[Dict[str, torch.Tensor]]): - if not self.training: - return self.inference(batched_inputs) - - images = self.preprocess_image(batched_inputs) - - gt_instances = [x["instances"].to(self.device) for x in batched_inputs] - - targets_task = batched_inputs[0]['task'] - for anno_per_image in batched_inputs: - assert targets_task == anno_per_image['task'] - - features = self.backbone(images.tensor) - proposals, proposal_losses = self.proposal_generator( - images, features, gt_instances) - proposals, roihead_textdecoder_losses = self.roi_heads( - features, proposals, gt_instances, targets_task=targets_task) - - losses = {} - losses.update(roihead_textdecoder_losses) - losses.update(proposal_losses) - - return losses \ No newline at end of file diff --git a/spaces/yoimiya/White-box-Cartoonization/README.md b/spaces/yoimiya/White-box-Cartoonization/README.md deleted file mode 100644 index 9860239cf42c94e385faaaa75a85311e010d64f7..0000000000000000000000000000000000000000 --- a/spaces/yoimiya/White-box-Cartoonization/README.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -python_version: 3.7 -title: White Box Cartoonization -emoji: 📚 -colorFrom: purple -colorTo: green -sdk: gradio -sdk_version: 2.9.4 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: hylee/White-box-Cartoonization ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/younker/chatgpt-turbo/client/node_modules/autoprefixer/lib/hacks/break-props.js b/spaces/younker/chatgpt-turbo/client/node_modules/autoprefixer/lib/hacks/break-props.js deleted file mode 100644 index 45252cd1d965b7c88777fc0c54830f02ce9d6e17..0000000000000000000000000000000000000000 --- a/spaces/younker/chatgpt-turbo/client/node_modules/autoprefixer/lib/hacks/break-props.js +++ /dev/null @@ -1,63 +0,0 @@ -let Declaration = require('../declaration') - -class BreakProps extends Declaration { - /** - * Change name for -webkit- and -moz- prefix - */ - prefixed(prop, prefix) { - return `${prefix}column-${prop}` - } - - /** - * Return property name by final spec - */ - normalize(prop) { - if (prop.includes('inside')) { - return 'break-inside' - } - if (prop.includes('before')) { - return 'break-before' - } - return 'break-after' - } - - /** - * Change prefixed value for avoid-column and avoid-page - */ - set(decl, prefix) { - if ( - (decl.prop === 'break-inside' && decl.value === 'avoid-column') || - decl.value === 'avoid-page' - ) { - decl.value = 'avoid' - } - return super.set(decl, prefix) - } - - /** - * Don’t prefix some values - */ - insert(decl, prefix, prefixes) { - if (decl.prop !== 'break-inside') { - return super.insert(decl, prefix, prefixes) - } - if (/region/i.test(decl.value) || /page/i.test(decl.value)) { - return undefined - } - return super.insert(decl, prefix, prefixes) - } -} - -BreakProps.names = [ - 'break-inside', - 'page-break-inside', - 'column-break-inside', - 'break-before', - 'page-break-before', - 'column-break-before', - 'break-after', - 'page-break-after', - 'column-break-after' -] - -module.exports = BreakProps diff --git a/spaces/yuhangzang/ContextDet-Demo/models/deformable_detr/ms_deform_attn.py b/spaces/yuhangzang/ContextDet-Demo/models/deformable_detr/ms_deform_attn.py deleted file mode 100644 index a28bb1bc106ca86fc659deb8bde164b91091b8c6..0000000000000000000000000000000000000000 --- a/spaces/yuhangzang/ContextDet-Demo/models/deformable_detr/ms_deform_attn.py +++ /dev/null @@ -1,412 +0,0 @@ -# ------------------------------------------------------------------------ -# Grounding DINO -# url: https://github.com/IDEA-Research/GroundingDINO -# Copyright (c) 2023 IDEA. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------ -# Deformable DETR -# Copyright (c) 2020 SenseTime. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------------------------------ -# Modified from: -# https://github.com/fundamentalvision/Deformable-DETR/blob/main/models/ops/functions/ms_deform_attn_func.py -# https://github.com/fundamentalvision/Deformable-DETR/blob/main/models/ops/modules/ms_deform_attn.py -# https://github.com/open-mmlab/mmcv/blob/master/mmcv/ops/multi_scale_deform_attn.py -# ------------------------------------------------------------------------------------------------ - -import math -import warnings -from typing import Optional - -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.autograd import Function -from torch.autograd.function import once_differentiable -from torch.nn.init import constant_, xavier_uniform_ - -try: - from csrc import _C -except: - warnings.warn("Failed to load custom C++ ops. Running on CPU mode Only!") - - -# helpers -def _is_power_of_2(n): - if (not isinstance(n, int)) or (n < 0): - raise ValueError("invalid input for _is_power_of_2: {} (type: {})".format(n, type(n))) - return (n & (n - 1) == 0) and n != 0 - - -class MultiScaleDeformableAttnFunction(Function): - @staticmethod - def forward( - ctx, - value, - value_spatial_shapes, - value_level_start_index, - sampling_locations, - attention_weights, - im2col_step, - ): - ctx.im2col_step = im2col_step - output = _C.ms_deform_attn_forward( - value, - value_spatial_shapes, - value_level_start_index, - sampling_locations, - attention_weights, - ctx.im2col_step, - ) - ctx.save_for_backward( - value, - value_spatial_shapes, - value_level_start_index, - sampling_locations, - attention_weights, - ) - return output - - @staticmethod - @once_differentiable - def backward(ctx, grad_output): - ( - value, - value_spatial_shapes, - value_level_start_index, - sampling_locations, - attention_weights, - ) = ctx.saved_tensors - grad_value, grad_sampling_loc, grad_attn_weight = _C.ms_deform_attn_backward( - value, - value_spatial_shapes, - value_level_start_index, - sampling_locations, - attention_weights, - grad_output, - ctx.im2col_step, - ) - - return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None - - -def multi_scale_deformable_attn_pytorch( - value: torch.Tensor, - value_spatial_shapes: torch.Tensor, - sampling_locations: torch.Tensor, - attention_weights: torch.Tensor, -) -> torch.Tensor: - - bs, _, num_heads, embed_dims = value.shape - _, num_queries, num_heads, num_levels, num_points, _ = sampling_locations.shape - value_list = value.split([H_ * W_ for H_, W_ in value_spatial_shapes], dim=1) - sampling_grids = 2 * sampling_locations - 1 - sampling_value_list = [] - for level, (H_, W_) in enumerate(value_spatial_shapes): - # bs, H_*W_, num_heads, embed_dims -> - # bs, H_*W_, num_heads*embed_dims -> - # bs, num_heads*embed_dims, H_*W_ -> - # bs*num_heads, embed_dims, H_, W_ - value_l_ = ( - value_list[level].flatten(2).transpose(1, 2).reshape(bs * num_heads, embed_dims, H_, W_) - ) - # bs, num_queries, num_heads, num_points, 2 -> - # bs, num_heads, num_queries, num_points, 2 -> - # bs*num_heads, num_queries, num_points, 2 - sampling_grid_l_ = sampling_grids[:, :, :, level].transpose(1, 2).flatten(0, 1) - # bs*num_heads, embed_dims, num_queries, num_points - sampling_value_l_ = F.grid_sample( - value_l_, sampling_grid_l_, mode="bilinear", padding_mode="zeros", align_corners=False - ) - sampling_value_list.append(sampling_value_l_) - # (bs, num_queries, num_heads, num_levels, num_points) -> - # (bs, num_heads, num_queries, num_levels, num_points) -> - # (bs, num_heads, 1, num_queries, num_levels*num_points) - attention_weights = attention_weights.transpose(1, 2).reshape( - bs * num_heads, 1, num_queries, num_levels * num_points - ) - output = ( - (torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights) - .sum(-1) - .view(bs, num_heads * embed_dims, num_queries) - ) - return output.transpose(1, 2).contiguous() - - -class MultiScaleDeformableAttention(nn.Module): - """Multi-Scale Deformable Attention Module used in Deformable-DETR - - `Deformable DETR: Deformable Transformers for End-to-End Object Detection. - `_. - - Args: - embed_dim (int): The embedding dimension of Attention. Default: 256. - num_heads (int): The number of attention heads. Default: 8. - num_levels (int): The number of feature map used in Attention. Default: 4. - num_points (int): The number of sampling points for each query - in each head. Default: 4. - img2col_steps (int): The step used in image_to_column. Defualt: 64. - dropout (float): Dropout layer used in output. Default: 0.1. - batch_first (bool): if ``True``, then the input and output tensor will be - provided as `(bs, n, embed_dim)`. Default: False. `(n, bs, embed_dim)` - """ - - def __init__( - self, - embed_dim: int = 256, - num_levels: int = 4, - num_heads: int = 8, - num_points: int = 4, - img2col_step: int = 64, - batch_first: bool = False, - ): - super().__init__() - if embed_dim % num_heads != 0: - raise ValueError( - "embed_dim must be divisible by num_heads, but got {} and {}".format( - embed_dim, num_heads - ) - ) - head_dim = embed_dim // num_heads - - self.batch_first = batch_first - - if not _is_power_of_2(head_dim): - warnings.warn( - """ - You'd better set d_model in MSDeformAttn to make sure that - each dim of the attention head a power of 2, which is more efficient. - """ - ) - - self.im2col_step = img2col_step - self.embed_dim = embed_dim - self.num_heads = num_heads - self.num_levels = num_levels - self.num_points = num_points - self.sampling_offsets = nn.Linear(embed_dim, num_heads * num_levels * num_points * 2) - self.attention_weights = nn.Linear(embed_dim, num_heads * num_levels * num_points) - self.value_proj = nn.Linear(embed_dim, embed_dim) - self.output_proj = nn.Linear(embed_dim, embed_dim) - - self.init_weights() - - def _reset_parameters(self): - return self.init_weights() - - def init_weights(self): - """ - Default initialization for Parameters of Module. - """ - constant_(self.sampling_offsets.weight.data, 0.0) - thetas = torch.arange(self.num_heads, dtype=torch.float32) * ( - 2.0 * math.pi / self.num_heads - ) - grid_init = torch.stack([thetas.cos(), thetas.sin()], -1) - grid_init = ( - (grid_init / grid_init.abs().max(-1, keepdim=True)[0]) - .view(self.num_heads, 1, 1, 2) - .repeat(1, self.num_levels, self.num_points, 1) - ) - for i in range(self.num_points): - grid_init[:, :, i, :] *= i + 1 - with torch.no_grad(): - self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1)) - constant_(self.attention_weights.weight.data, 0.0) - constant_(self.attention_weights.bias.data, 0.0) - xavier_uniform_(self.value_proj.weight.data) - constant_(self.value_proj.bias.data, 0.0) - xavier_uniform_(self.output_proj.weight.data) - constant_(self.output_proj.bias.data, 0.0) - - def freeze_sampling_offsets(self): - print("Freeze sampling offsets") - self.sampling_offsets.weight.requires_grad = False - self.sampling_offsets.bias.requires_grad = False - - def freeze_attention_weights(self): - print("Freeze attention weights") - self.attention_weights.weight.requires_grad = False - self.attention_weights.bias.requires_grad = False - - def forward( - self, - query: torch.Tensor, - key: Optional[torch.Tensor] = None, - value: Optional[torch.Tensor] = None, - query_pos: Optional[torch.Tensor] = None, - key_padding_mask: Optional[torch.Tensor] = None, - reference_points: Optional[torch.Tensor] = None, - spatial_shapes: Optional[torch.Tensor] = None, - level_start_index: Optional[torch.Tensor] = None, - **kwargs - ) -> torch.Tensor: - - """Forward Function of MultiScaleDeformableAttention - - Args: - query (torch.Tensor): Query embeddings with shape - `(num_query, bs, embed_dim)` - key (torch.Tensor): Key embeddings with shape - `(num_key, bs, embed_dim)` - value (torch.Tensor): Value embeddings with shape - `(num_key, bs, embed_dim)` - query_pos (torch.Tensor): The position embedding for `query`. Default: None. - key_padding_mask (torch.Tensor): ByteTensor for `query`, with shape `(bs, num_key)`, - indicating which elements within `key` to be ignored in attention. - reference_points (torch.Tensor): The normalized reference points - with shape `(bs, num_query, num_levels, 2)`, - all elements is range in [0, 1], top-left (0, 0), - bottom-right (1, 1), including padding are. - or `(N, Length_{query}, num_levels, 4)`, add additional - two dimensions `(h, w)` to form reference boxes. - spatial_shapes (torch.Tensor): Spatial shape of features in different levels. - With shape `(num_levels, 2)`, last dimension represents `(h, w)`. - level_start_index (torch.Tensor): The start index of each level. A tensor with - shape `(num_levels, )` which can be represented as - `[0, h_0 * w_0, h_0 * w_0 + h_1 * w_1, ...]`. - - Returns: - torch.Tensor: forward results with shape `(num_query, bs, embed_dim)` - """ - if value is None: - value = query - - if query_pos is not None: - query = query + query_pos - - if not self.batch_first: - # change to (bs, num_query ,embed_dims) - query = query.permute(1, 0, 2) - value = value.permute(1, 0, 2) - - bs, num_query, _ = query.shape - bs, num_value, _ = value.shape - - assert (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() == num_value - - value = self.value_proj(value) - if key_padding_mask is not None: - value = value.masked_fill(key_padding_mask[..., None], float(0)) - value = value.view(bs, num_value, self.num_heads, -1) - sampling_offsets = self.sampling_offsets(query).view( - bs, num_query, self.num_heads, self.num_levels, self.num_points, 2 - ) - attention_weights = self.attention_weights(query).view( - bs, num_query, self.num_heads, self.num_levels * self.num_points - ) - attention_weights = attention_weights.softmax(-1) - attention_weights = attention_weights.view( - bs, - num_query, - self.num_heads, - self.num_levels, - self.num_points, - ) - - # bs, num_query, num_heads, num_levels, num_points, 2 - if reference_points.shape[-1] == 2: - offset_normalizer = torch.stack([spatial_shapes[..., 1], spatial_shapes[..., 0]], -1) - sampling_locations = ( - reference_points[:, :, None, :, None, :] - + sampling_offsets / offset_normalizer[None, None, None, :, None, :] - ) - elif reference_points.shape[-1] == 4: - sampling_locations = ( - reference_points[:, :, None, :, None, :2] - + sampling_offsets - / self.num_points - * reference_points[:, :, None, :, None, 2:] - * 0.5 - ) - else: - raise ValueError( - "Last dim of reference_points must be 2 or 4, but get {} instead.".format( - reference_points.shape[-1] - ) - ) - - if torch.cuda.is_available() and value.is_cuda: - halffloat = False - if value.dtype == torch.float16: - halffloat = True - value = value.float() - sampling_locations = sampling_locations.float() - attention_weights = attention_weights.float() - - output = MultiScaleDeformableAttnFunction.apply( - value, - spatial_shapes, - level_start_index, - sampling_locations, - attention_weights, - self.im2col_step, - ) - - if halffloat: - output = output.half() - else: - output = multi_scale_deformable_attn_pytorch( - value, spatial_shapes, sampling_locations, attention_weights - ) - - output = self.output_proj(output) - - if not self.batch_first: - output = output.permute(1, 0, 2) - - return output - - -def create_dummy_class(klass, dependency, message=""): - """ - When a dependency of a class is not available, create a dummy class which throws ImportError - when used. - - Args: - klass (str): name of the class. - dependency (str): name of the dependency. - message: extra message to print - Returns: - class: a class object - """ - err = "Cannot import '{}', therefore '{}' is not available.".format(dependency, klass) - if message: - err = err + " " + message - - class _DummyMetaClass(type): - # throw error on class attribute access - def __getattr__(_, __): # noqa: B902 - raise ImportError(err) - - class _Dummy(object, metaclass=_DummyMetaClass): - # throw error on constructor - def __init__(self, *args, **kwargs): - raise ImportError(err) - - return _Dummy - - -def create_dummy_func(func, dependency, message=""): - """ - When a dependency of a function is not available, create a dummy function which throws - ImportError when used. - - Args: - func (str): name of the function. - dependency (str or list[str]): name(s) of the dependency. - message: extra message to print - Returns: - function: a function object - """ - err = "Cannot import '{}', therefore '{}' is not available.".format(dependency, func) - if message: - err = err + " " + message - - if isinstance(dependency, (list, tuple)): - dependency = ",".join(dependency) - - def _dummy(*args, **kwargs): - raise ImportError(err) - - return _dummy diff --git a/spaces/zoeozone/mrm8488-Alpacoom/style.css b/spaces/zoeozone/mrm8488-Alpacoom/style.css deleted file mode 100644 index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000 --- a/spaces/zoeozone/mrm8488-Alpacoom/style.css +++ /dev/null @@ -1,28 +0,0 @@ -body { - padding: 2rem; - font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif; -} - -h1 { - font-size: 16px; - margin-top: 0; -} - -p { - color: rgb(107, 114, 128); - font-size: 15px; - margin-bottom: 10px; - margin-top: 5px; -} - -.card { - max-width: 620px; - margin: 0 auto; - padding: 16px; - border: 1px solid lightgray; - border-radius: 16px; -} - -.card p:last-child { - margin-bottom: 0; -} diff --git a/spaces/zxcgqq/nsfw/README.md b/spaces/zxcgqq/nsfw/README.md deleted file mode 100644 index d40697c7d8b24b51353a84ff19253b9d2e5c737d..0000000000000000000000000000000000000000 --- a/spaces/zxcgqq/nsfw/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Nsfw -emoji: 😻 -colorFrom: blue -colorTo: green -sdk: gradio -sdk_version: 3.29.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/zxy666/bingo-chatai666/src/components/settings.tsx b/spaces/zxy666/bingo-chatai666/src/components/settings.tsx deleted file mode 100644 index 80b8a2d3b252b875f5b6f7dfc2f6e3ad9cdfb22a..0000000000000000000000000000000000000000 --- a/spaces/zxy666/bingo-chatai666/src/components/settings.tsx +++ /dev/null @@ -1,157 +0,0 @@ -import { useEffect, useState } from 'react' -import { useAtom } from 'jotai' -import { Switch } from '@headlessui/react' -import { toast } from 'react-hot-toast' -import { hashAtom, voiceAtom } from '@/state' -import { - Dialog, - DialogContent, - DialogDescription, - DialogFooter, - DialogHeader, - DialogTitle -} from '@/components/ui/dialog' -import { Button } from './ui/button' -import { Input } from './ui/input' -import { ChunkKeys, parseCookies, extraCurlFromCookie, encodeHeadersToCookie, getCookie, setCookie } from '@/lib/utils' -import { ExternalLink } from './external-link' -import { useCopyToClipboard } from '@/lib/hooks/use-copy-to-clipboard' - - -export function Settings() { - const { isCopied, copyToClipboard } = useCopyToClipboard({ timeout: 2000 }) - const [loc, setLoc] = useAtom(hashAtom) - const [curlValue, setCurlValue] = useState(extraCurlFromCookie(parseCookies(document.cookie, ChunkKeys))) - const [imageOnly, setImageOnly] = useState(getCookie('IMAGE_ONLY') !== '0') - const [enableTTS, setEnableTTS] = useAtom(voiceAtom) - - useEffect(() => { - if (isCopied) { - toast.success('复制成功') - } - }, [isCopied]) - - if (loc === 'settings') { - return ( - setLoc('')} modal> - - - 设置你的用户信息 - - 请使用 Edge 浏览器 - - 打开并登录 Bing - - ,然后再打开 - Challenge 接口 - 右键 》检查。打开开发者工具,在网络里面找到 Create 接口 》右键复制》复制为 cURL(bash),粘贴到此处,然后保存。 -
                  - 图文示例: - 如何获取 BING_HEADER - - -
                  - -
                  - setCurlValue(e.target.value)} - /> -
                  - 身份信息仅用于画图(推荐) - setImageOnly(checked)} - > - - -
                  - - - - - - - -
                  - ) - } else if (loc === 'voice') { - return ( - setLoc('')} modal> - - - 语音设置 - - 目前仅支持 PC 端 Edge 及 Chrome 浏览器 - - - -
                  - 启用语音回答 - setEnableTTS(checked)} - > - - -
                  - - - - -
                  -
                  - ) - } - return null -}