diff --git a/spaces/101-5/gpt4free/g4f/.v1/testing/test_main.py b/spaces/101-5/gpt4free/g4f/.v1/testing/test_main.py deleted file mode 100644 index 7c28f1d2b7e8c6da449ac6e47358881de7ea4fe5..0000000000000000000000000000000000000000 --- a/spaces/101-5/gpt4free/g4f/.v1/testing/test_main.py +++ /dev/null @@ -1,27 +0,0 @@ -import gpt4free -from gpt4free import Provider, quora, forefront - -# usage You -response = gpt4free.Completion.create(Provider.You, prompt='Write a poem on Lionel Messi') -print(response) - -# usage Poe -token = quora.Account.create(logging=False) -response = gpt4free.Completion.create(Provider.Poe, prompt='Write a poem on Lionel Messi', token=token, model='ChatGPT') -print(response) - -# usage forefront -token = forefront.Account.create(logging=False) -response = gpt4free.Completion.create( - Provider.ForeFront, prompt='Write a poem on Lionel Messi', model='gpt-4', token=token -) -print(response) -print(f'END') - -# usage theb -response = gpt4free.Completion.create(Provider.Theb, prompt='Write a poem on Lionel Messi') -print(response) - -# usage cocalc -response = gpt4free.Completion.create(Provider.CoCalc, prompt='Write a poem on Lionel Messi', cookie_input='') -print(response) diff --git a/spaces/17TheWord/RealESRGAN/realesrgan/archs/discriminator_arch.py b/spaces/17TheWord/RealESRGAN/realesrgan/archs/discriminator_arch.py deleted file mode 100644 index 4b66ab1226d6793de846bc9828bbe427031a0e2d..0000000000000000000000000000000000000000 --- a/spaces/17TheWord/RealESRGAN/realesrgan/archs/discriminator_arch.py +++ /dev/null @@ -1,67 +0,0 @@ -from basicsr.utils.registry import ARCH_REGISTRY -from torch import nn as nn -from torch.nn import functional as F -from torch.nn.utils import spectral_norm - - -@ARCH_REGISTRY.register() -class UNetDiscriminatorSN(nn.Module): - """Defines a U-Net discriminator with spectral normalization (SN) - - It is used in Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data. - - Arg: - num_in_ch (int): Channel number of inputs. Default: 3. - num_feat (int): Channel number of base intermediate features. Default: 64. - skip_connection (bool): Whether to use skip connections between U-Net. Default: True. - """ - - def __init__(self, num_in_ch, num_feat=64, skip_connection=True): - super(UNetDiscriminatorSN, self).__init__() - self.skip_connection = skip_connection - norm = spectral_norm - # the first convolution - self.conv0 = nn.Conv2d(num_in_ch, num_feat, kernel_size=3, stride=1, padding=1) - # downsample - self.conv1 = norm(nn.Conv2d(num_feat, num_feat * 2, 4, 2, 1, bias=False)) - self.conv2 = norm(nn.Conv2d(num_feat * 2, num_feat * 4, 4, 2, 1, bias=False)) - self.conv3 = norm(nn.Conv2d(num_feat * 4, num_feat * 8, 4, 2, 1, bias=False)) - # upsample - self.conv4 = norm(nn.Conv2d(num_feat * 8, num_feat * 4, 3, 1, 1, bias=False)) - self.conv5 = norm(nn.Conv2d(num_feat * 4, num_feat * 2, 3, 1, 1, bias=False)) - self.conv6 = norm(nn.Conv2d(num_feat * 2, num_feat, 3, 1, 1, bias=False)) - # extra convolutions - self.conv7 = norm(nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=False)) - self.conv8 = norm(nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=False)) - self.conv9 = nn.Conv2d(num_feat, 1, 3, 1, 1) - - def forward(self, x): - # downsample - x0 = F.leaky_relu(self.conv0(x), negative_slope=0.2, inplace=True) - x1 = F.leaky_relu(self.conv1(x0), negative_slope=0.2, inplace=True) - x2 = F.leaky_relu(self.conv2(x1), negative_slope=0.2, inplace=True) - x3 = F.leaky_relu(self.conv3(x2), negative_slope=0.2, inplace=True) - - # upsample - x3 = F.interpolate(x3, scale_factor=2, mode='bilinear', align_corners=False) - x4 = F.leaky_relu(self.conv4(x3), negative_slope=0.2, inplace=True) - - if self.skip_connection: - x4 = x4 + x2 - x4 = F.interpolate(x4, scale_factor=2, mode='bilinear', align_corners=False) - x5 = F.leaky_relu(self.conv5(x4), negative_slope=0.2, inplace=True) - - if self.skip_connection: - x5 = x5 + x1 - x5 = F.interpolate(x5, scale_factor=2, mode='bilinear', align_corners=False) - x6 = F.leaky_relu(self.conv6(x5), negative_slope=0.2, inplace=True) - - if self.skip_connection: - x6 = x6 + x0 - - # extra convolutions - out = F.leaky_relu(self.conv7(x6), negative_slope=0.2, inplace=True) - out = F.leaky_relu(self.conv8(out), negative_slope=0.2, inplace=True) - out = self.conv9(out) - - return out diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Aayirathil Oruvan 2010 HD Full Movie Uncut Version with English Subtitles - The Unforgettable Saga of Love and War.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Aayirathil Oruvan 2010 HD Full Movie Uncut Version with English Subtitles - The Unforgettable Saga of Love and War.md deleted file mode 100644 index b49aa2b09b3b3ea41cac992cc03ad3470d7b7f4a..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Aayirathil Oruvan 2010 HD Full Movie Uncut Version with English Subtitles - The Unforgettable Saga of Love and War.md +++ /dev/null @@ -1,99 +0,0 @@ - -

Aayirathil Oruvan 2010 HD Full Movie Uncut Version with English Subtitles

-

If you are a fan of Tamil cinema, you might have heard of Aayirathil Oruvan, a 2010 epic fantasy adventure film directed by Selvaraghavan. The film was a commercial failure when it was released, but over the years, it has gained a cult following among the audience and critics alike. The film is praised for its ambitious vision, original story, stunning visuals, and haunting music. It is also known for its controversial and violent scenes, which were censored in the theatrical release. However, in 2021, the director released an uncut version of the film on an OTT platform, which restored the original footage and added more depth and clarity to the film. In this article, we will explore what makes Aayirathil Oruvan a masterpiece of Tamil cinema, and why you should watch the uncut version with English subtitles.

-

Introduction

-

What is Aayirathil Oruvan?

-

Aayirathil Oruvan (English: One Man in a Thousand) is a 2010 Tamil-language film written and directed by Selvaraghavan, starring Karthi, Reemma Sen, Andrea Jeremiah, Parthiban, and Pratap Pothan. The film is loosely based on the historical novel Ponniyin Selvan by Kalki Krishnamurthy, which narrates the story of the Chola dynasty in medieval India. However, Selvaraghavan adds his own twist to the tale by setting it in a fictional world where the Chola kingdom has been lost for centuries and is rediscovered by a group of explorers in the present day. The film blends elements of fantasy, adventure, horror, mystery, and romance to create a unique cinematic experience.

-

aayirathil oruvan 2010 hd full movie uncut version with english subtitles


Download https://byltly.com/2uKwsc



-

Why is it a cult classic?

-

Aayirathil Oruvan was one of the most expensive and ambitious films ever made in Tamil cinema at that time. It took more than three years to complete, with extensive research, pre-production, shooting, and post-production. The film was shot in various locations across India, Thailand, Vietnam, and Jordan. The film also featured elaborate sets, costumes, props, and special effects to create a realistic and immersive world. The film had a grand musical score composed by G.V. Prakash Kumar, with lyrics by Vairamuthu. The film also had some of the most talented actors in Tamil cinema who gave memorable performances.

-

and lack of commercial appeal. The film also faced several controversies and legal issues, such as plagiarism allegations, censorship cuts, and distribution problems. The film was a box office flop and was soon forgotten by the mainstream audience.

-

However, over the years, the film started to gain a loyal fan base who appreciated the film for its artistic vision, innovative storytelling, and technical brilliance. The film also received positive reviews from some critics who re-evaluated the film and recognized its merits. The film became a cult classic and a benchmark for Tamil cinema. The film also inspired many filmmakers and artists who were influenced by its style and themes. The film also generated a lot of curiosity and speculation among the fans who wanted to know more about the film's world and characters.

-

What is the uncut version and why is it different?

-

The uncut version of Aayirathil Oruvan is the director's original cut of the film, which was never released in theatres due to censorship issues. The uncut version has more than 30 minutes of additional footage that was either trimmed or deleted from the theatrical release. The uncut version also has improved sound design, color grading, and subtitles. The uncut version reveals more details and explanations about the film's plot, characters, and themes. The uncut version also has more graphic and disturbing scenes that showcase the brutality and horror of the film's world. The uncut version is considered to be the definitive version of the film by the director and the fans.

-

The uncut version of Aayirathil Oruvan was released on an OTT platform in January 2021, coinciding with the 11th anniversary of the film's release. The uncut version received an overwhelming response from the fans and critics who watched it for the first time or revisited it after a long time. The uncut version also attracted new viewers who were curious about the film's hype and reputation. The uncut version created a huge buzz on social media and online forums, where people discussed and debated about the film's various aspects. The uncut version also received appreciation from celebrities and industry insiders who praised the film's vision and quality.

-

Plot Summary

-

The expedition to find the lost Chola kingdom

-

The film begins with a prologue that shows how the Chola kingdom was attacked by the Pandya kingdom in 1279 CE. The Chola king and his people fled to a secret location to escape from their enemies. However, their whereabouts were never known to anyone.

-

watch aayirathil oruvan 2010 hd uncut movie online with subtitles
-aayirathil oruvan 2010 full hd movie download uncut version english subs
-how to stream aayirathil oruvan 2010 hd movie uncut version with subtitles
-aayirathil oruvan 2010 hd uncut movie review and ratings with english subtitles
-where to find aayirathil oruvan 2010 hd full movie uncut version with subs
-aayirathil oruvan 2010 hd movie uncut version english subtitles cast and crew
-aayirathil oruvan 2010 hd full movie uncut version with subtitles plot and summary
-aayirathil oruvan 2010 hd movie uncut version with english subs trailer and teaser
-aayirathil oruvan 2010 hd full movie uncut version with subtitles songs and soundtrack
-aayirathil oruvan 2010 hd movie uncut version english subtitles trivia and facts
-aayirathil oruvan 2010 hd full movie uncut version with subtitles awards and nominations
-aayirathil oruvan 2010 hd movie uncut version with english subs box office and budget
-aayirathil oruvan 2010 hd full movie uncut version with subtitles behind the scenes and making
-aayirathil oruvan 2010 hd movie uncut version english subtitles analysis and interpretation
-aayirathil oruvan 2010 hd full movie uncut version with subtitles comparison and contrast
-aayirathil oruvan 2010 hd movie uncut version with english subs fan theories and speculations
-aayirathil oruvan 2010 hd full movie uncut version with subtitles memes and jokes
-aayirathil oruvan 2010 hd movie uncut version english subtitles quotes and dialogues
-aayirathil oruvan 2010 hd full movie uncut version with subtitles references and easter eggs
-aayirathil oruvan 2010 hd movie uncut version with english subs controversies and criticisms
-aayirathil oruvan 2010 hd full movie uncut version with subtitles sequel and prequel
-aayirathil oruvan 2010 hd movie uncut version english subtitles remake and reboot
-aayirathil oruvan 2010 hd full movie uncut version with subtitles adaptation and inspiration
-aayirathil oruvan 2010 hd movie uncut version with english subs genre and theme
-aayirathil oruvan 2010 hd full movie uncut version with subtitles symbolism and imagery
-aayirathil oruvan 2010 hd movie uncut version english subtitles style and tone
-aayirathil oruvan 2010 hd full movie uncut version with subtitles message and moral
-aayirathil oruvan 2010 hd movie uncut version with english subs history and background
-aayirathil oruvan 2010 hd full movie uncut version with subtitles influence and impact
-aayirathil oruvan 2010 hd movie uncut version english subtitles best and worst scenes
-aayirathil oruvan 2010 hd full movie uncut version with subtitles favorite and least favorite characters
-aayirathil oruvan 2010 hd movie uncut version with english subs recommendations and suggestions
-aayirathil oruvan 2010 hd full movie uncut version with subtitles opinions and feedbacks
-aayirathil oruvan 2010 hd movie uncut version english subtitles questions and answers
-aayirathil oruvan 2010 hd full movie uncut version with subtitles challenges and quizzes
-aayirathil oruvan 2010 hd movie uncut version with english subs facts and myths
-aayirathil oruvan 2010 hd full movie uncut version with subtitles secrets and surprises
-aayirathil oruvan 2010 hd movie uncut version english subtitles mistakes and errors
-aayirathil oruvan 2010 hd full movie uncut version with subtitles tips and tricks
-aayirathil oruvan 2010 hd movie uncut version with english subs fun facts and trivia

-

the kingdom through a sea route. Anitha decides to follow the map and hires a team of mercenaries led by a man named Ravichandran (Parthiban) to escort her. She also recruits a local guide named Muthu (Karthi), who is a coolie and a smuggler. Muthu agrees to join the expedition for money and also to impress his love interest Lavanya (Andrea Jeremiah), who is a part of Anitha's team.

-

The challenges and dangers faced by the team

-

The team sets sail on a ship and follows the map. Along the way, they encounter various obstacles and threats, such as storms, pirates, sea monsters, and hostile tribes. The team also faces internal conflicts and mistrust, as some of the members have ulterior motives and hidden agendas. The team also learns that they are not the only ones looking for the kingdom, as there is another rival team led by a mysterious man named Chidambaram (Pratap Pothan), who claims to be a descendant of the Chola king.

-

The secrets and mysteries of the ancient civilization

-

After facing many hardships and losses, the team finally reaches the island where the kingdom is supposed to be located. However, they are shocked to find that the kingdom is not a normal human civilization, but a bizarre and twisted world where the Chola people have devolved into primitive and savage beings. The Chola people live in fear and worship a tyrannical king who rules over them with an iron fist. The king also has a secret weapon that can destroy anyone who opposes him.

-

The team realizes that they have entered a dangerous and deadly place, where they have to fight for their survival and sanity. They also discover many secrets and mysteries about the kingdom, such as its history, culture, religion, and technology. They also learn that the kingdom is connected to an ancient prophecy that involves Anitha and Muthu.

-

The climax and the twist ending

-

The film reaches its climax when the team confronts the king and his weapon in his palace. The king reveals his identity and his connection to Chidambaram. He also reveals his plan to use his weapon to destroy the Pandya kingdom and reclaim his glory. However, his plan is thwarted by Anitha and Muthu, who manage to stop him and his weapon with their courage and intelligence. The film ends with a twist that changes everything about the film's story and characters.

-

Analysis and Review

-

The themes and messages of the film

-

and how it is influenced by one's culture and heritage. The film also questions the notions of civilization and how it is defined by one's values and morals. The film also questions the notions of power and how it is used and abused by those who have it. The film also questions the notions of loyalty and love and how they are tested and proven by one's actions and choices. The film also questions the notions of destiny and how it is shaped by one's will and fate.

-

The film conveys these themes and messages through its story, characters, and visuals. The film shows how history is not always what it seems, and how it can be manipulated and distorted by those who want to control it. The film shows how identity is not always fixed, and how it can change and evolve over time and circumstances. The film shows how civilization is not always superior, and how it can degrade and decay over generations and environments. The film shows how power is not always righteous, and how it can corrupt and destroy those who possess it. The film shows how loyalty and love are not always easy, and how they can be challenged and betrayed by those who claim them. The film shows how destiny is not always predetermined, and how it can be altered and fulfilled by those who pursue it.

-

The performances and characters of the film

-

Aayirathil Oruvan features some of the finest performances and characters in Tamil cinema. The film has a diverse and dynamic cast of actors who bring their characters to life with their skills and expressions. The film has three main protagonists who have their own arcs and motivations. Anitha is a strong and determined woman who is passionate about her work and her mission. She is also a compassionate and caring person who values human life and dignity. Muthu is a witty and charming man who is loyal to his friends and his love. He is also a brave and clever person who uses his wit and humor to overcome his challenges. Ravichandran is a ruthless and cunning leader who is loyal to his team and his duty. He is also a pragmatic and realistic person who does not hesitate to make tough decisions.

-

and ambitious person who wants to restore his ancestral glory and power. The king is a cruel and tyrannical ruler who oppresses his people and enemies with his weapon. He is also a delusional and paranoid person who believes that he is a god and the chosen one.

-

The film also has some supporting characters who have their own roles and personalities. Lavanya is a sweet and innocent girl who loves Muthu and supports him in his journey. She is also a brave and loyal person who sacrifices herself for him. The Chola people are a group of primitive and savage beings who live in fear and misery under the king's rule. They are also a group of loyal and proud beings who follow their traditions and customs. The Pandya people are a group of civilized and cultured beings who live in peace and harmony in their kingdom. They are also a group of brave and noble beings who fight for their freedom and justice.

-

The visuals and music of the film

-

Aayirathil Oruvan is a film that showcases some of the most stunning visuals and music in Tamil cinema. The film has a rich and diverse visual style that creates a realistic and immersive world. The film has a variety of locations, such as forests, deserts, islands, caves, temples, palaces, and cities. The film also has a variety of sets, costumes, props, and special effects that create a authentic and impressive world. The film also has a variety of shots, angles, lighting, and colors that create a dynamic and expressive world.

-

The film also has a grand and haunting musical score that enhances the mood and emotion of the film. The film has a variety of songs, such as folk songs, rock songs, classical songs, and theme songs. The film also has a variety of instruments, such as drums, guitars, flutes, violins, and trumpets. The film also has a variety of vocals, such as male vocals, female vocals, chorus vocals, and tribal vocals. The film also has a variety of lyrics, such as Tamil lyrics, English lyrics, Sanskrit lyrics, and gibberish lyrics.

-

The strengths and weaknesses of the film

-

vision, and quality. The film's weaknesses are its complexity, confusion, and controversy. The film's strengths are that it is a film that dares to be different and innovative in a industry that is often dominated by formulaic and commercial films. The film's strengths are that it is a film that has a clear and strong vision of what it wants to say and show, and does not compromise on its artistic integrity and values. The film's strengths are that it is a film that has a high level of quality in terms of its technical aspects, such as its production design, cinematography, editing, sound design, music, and special effects.

-

The film's weaknesses are that it is a film that is too complex and confusing for the average viewer to understand and appreciate. The film's weaknesses are that it is a film that has a lot of plot holes, inconsistencies, and contradictions that make it hard to follow and believe. The film's weaknesses are that it is a film that has a lot of controversial and violent scenes that make it hard to watch and enjoy. The film's weaknesses are that it is a film that has a lot of negative reviews and feedback from the audience and critics who did not like or appreciate the film.

-

Conclusion

-

Why you should watch Aayirathil Oruvan 2010 HD Full Movie Uncut Version with English Subtitles

-

In conclusion, Aayirathil Oruvan 2010 HD Full Movie Uncut Version with English Subtitles is a film that you should watch if you are looking for a different and unique cinematic experience. It is a film that will challenge your mind and senses with its story, characters, visuals, and music. It is a film that will make you think and feel with its themes and messages. It is a film that will surprise and shock you with its twists and turns. It is a film that will inspire and impress you with its vision and quality. It is a film that will make you appreciate the art and craft of filmmaking.

-

Aayirathil Oruvan 2010 HD Full Movie Uncut Version with English Subtitles is not a perfect film, but it is a masterpiece of Tamil cinema. It is a film that deserves your attention and respect. It is a film that you should not miss.

-

FAQs

-

Here are some frequently asked questions about Aayirathil Oruvan 2010 HD Full Movie Uncut Version with English Subtitles:

-
    -
  1. Where can I watch Aayirathil Oruvan 2010 HD Full Movie Uncut Version with English Subtitles?
  2. -

    You can watch Aayirathil Oruvan 2010 HD Full Movie Uncut Version with English Subtitles on the OTT platform Zee5, where it was released in January 2021.

    -
  3. What is the difference between the theatrical release and the uncut version of Aayirathil Oruvan?
  4. -

    and adds more clarity and depth to the film. The uncut version also has improved sound design, color grading, and subtitles.

    -
  5. What is the meaning of the title Aayirathil Oruvan?
  6. -

    The title Aayirathil Oruvan means One Man in a Thousand in Tamil. It refers to the protagonist Muthu, who is a common man who becomes a hero in his journey. It also refers to the antagonist Chidambaram, who is a rare man who claims to be a king in his lineage. It also refers to the director Selvaraghavan, who is a unique man who made a film like no other.

    -
  7. What is the genre of Aayirathil Oruvan?
  8. -

    Aayirathil Oruvan is a film that does not fit into one genre, but rather combines elements of various genres, such as fantasy, adventure, horror, mystery, and romance. The film can be considered as a historical fantasy adventure film with a touch of horror and mystery.

    -
  9. What is the message of Aayirathil Oruvan?
  10. -

    Aayirathil Oruvan is a film that has many messages and interpretations, depending on the viewer's perspective and understanding. Some of the possible messages are: - History is not always what it seems, and it can be rewritten by those who have the power and the will to do so. - Identity is not always fixed, and it can change and evolve over time and circumstances. - Civilization is not always superior, and it can degrade and decay over generations and environments. - Power is not always righteous, and it can corrupt and destroy those who possess it. - Loyalty and love are not always easy, and they can be challenged and betrayed by those who claim them. - Destiny is not always predetermined, and it can be altered and fulfilled by those who pursue it.

    -

    0a6ba089eb
    -
    -
    \ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Email Extractor 14 Serial Key.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Email Extractor 14 Serial Key.md deleted file mode 100644 index 7a6114e6d32a96b30b160b520e798e5ae4a55545..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Email Extractor 14 Serial Key.md +++ /dev/null @@ -1,142 +0,0 @@ - -

    Email Extractor 14 Serial Key: How to Get It and Why You Need It

    -

    If you are looking for a powerful and easy-to-use tool to extract email addresses from various sources, such as websites, search engines, files, folders, and email accounts, then you might have heard of Email Extractor 14. This software is designed to help you build your own email list for marketing, sales, or communication purposes. But how can you get the most out of this software? And how can you get a valid serial key to activate it? In this article, we will answer these questions and more.

    -

    email extractor 14 serial key


    Download ——— https://byltly.com/2uKznp



    -

    What is Email Extractor 14?

    -

    Email Extractor 14 is a software program that allows you to extract email addresses from various sources in a fast and efficient way. You can use it to find email addresses of your potential customers, clients, partners, or competitors. You can also use it to verify and clean your existing email list, removing duplicates and invalid emails. With Email Extractor 14, you can create your own targeted and customized email list that suits your needs and goals.

    -

    Features and Benefits of Email Extractor 14

    -

    Some of the features and benefits of Email Extractor 14 are:

    -
      -
    • It can extract email addresses from websites, search engines, files, folders, and email accounts.
    • -
    • It can extract email addresses based on keywords, domains, or specific criteria.
    • -
    • It can filter out unwanted email addresses, such as spam, junk, or disposable emails.
    • -
    • It can save the extracted email addresses in various formats, such as TXT, CSV, XLS, or XML.
    • -
    • It can export the extracted email addresses to other software programs, such as Outlook, Gmail, or MailChimp.
    • -
    • It can update itself automatically to ensure optimal performance and compatibility.
    • -
    • It has a user-friendly interface that is easy to navigate and operate.
    • -
    • It has a high speed and accuracy that can process thousands of email addresses per minute.
    • -
    • It has a low system requirement that can run on any Windows computer.
    • -
    • It has a lifetime license that allows you to use it forever without any recurring fees.
    • -
    -

    How to Use Email Extractor 14

    -

    To use Email Extractor 14, you need to follow these simple steps:

    -
      -
    1. Download and install the software from the official website.
    2. -
    3. Launch the software and enter your serial key to activate it.
    4. -
    5. Select the source from which you want to extract email addresses. You can choose from websites, search engines, files, folders, or email accounts.
    6. -
    7. Enter the parameters or criteria for your extraction. You can enter keywords, domains, filters, or other options.
    8. -
    9. Click on the "Start" button and wait for the extraction process to finish.
    10. -
    11. View the results and save them in your preferred format or export them to other software programs.
    12. -
    -

    What is a Serial Key and Why Do You Need One?

    -

    A serial key is a unique code that is used to activate a software program. It is also known as a license key, activation key, product key, or registration key. A serial key is usually composed of alphanumeric characters that are divided into groups by dashes or hyphens. For example: XXXX-XXXX-XXXX-XXXX.

    -

    You need a serial key to activate Email Extractor 14 because it is a paid software program that requires a valid license to use. Without a serial key, you will not be able to access all the features and functions of the software. You will also not be able to receive updates and support from the developer. Therefore, having a serial key is essential if you want to enjoy the full benefits of Email Extractor 14.

    -

    How a Serial Key Works

    -

    A serial key works by verifying the authenticity and legitimacy of the software program. When you enter your serial key into the software program, it will check if the key matches with its database. If the key is valid and genuine, it will grant you access to the software program. If the key is invalid or fake, it will deny you access to the software program. A serial key also helps prevent piracy and illegal distribution of the software program by ensuring that only authorized users can use it.

    -

    Advantages of Having a Serial Key

    -

    Some of the advantages of having a serial key are:

    -
      -
    • You can use all the features and functions of Email Extractor 14 without any limitations or restrictions.
    • -
    • You can receive updates and support from the developer whenever there are new versions or issues with the software program.
    • -
    • You can protect your investment and avoid wasting money on buying fake or cracked versions of the software program.
    • -
    • You can avoid legal troubles and penalties that may arise from using pirated or unlicensed versions of the software program.
    • -
    -

    Risks of Using a Cracked or Fake Serial Key

    -

    Some of the risks of using a cracked or fake serial key are:

    -
      -
    • You may not be able to use all the features and functions of Email Extractor 14 properly or at all.
    • -
    • You may expose your computer to viruses, malware, spyware, or other harmful programs that may damage your system or steal your data.
    • -
    • You may compromise your security and privacy by allowing hackers or cybercriminals to access your information or accounts.
    • -
    • You may violate the terms and conditions of the software program and face legal actions or lawsuits from the developer or other parties.
    • -
    -

    How to Get a Genuine Email Extractor 14 Serial Key

    -

    If you want to get a genuine Email Extractor 14 serial key, you have three options:

    -

    email extractor 14 activation code
    -email extractor 14 crack download
    -email extractor 14 license key free
    -email extractor 14 full version
    -email extractor 14 registration key
    -email extractor 14 keygen generator
    -email extractor 14 patch file
    -email extractor 14 torrent link
    -email extractor 14 product key online
    -email extractor 14 serial number finder
    -email extractor 14 cracked software
    -email extractor 14 activation key generator
    -email extractor 14 license code online
    -email extractor 14 serial key free download
    -email extractor 14 unlock code
    -email extractor 14 crack file download
    -email extractor 14 registration code free
    -email extractor 14 keygen download
    -email extractor 14 patch download
    -email extractor 14 torrent download
    -email extractor 14 product key generator
    -email extractor 14 serial number generator
    -email extractor 14 cracked version download
    -email extractor 14 activation key free
    -email extractor 14 license key generator
    -email extractor 14 serial key online
    -email extractor 14 unlock key
    -email extractor 14 crack software download
    -email extractor 14 registration key generator
    -email extractor 14 keygen online
    -email extractor 14 patch online
    -email extractor 14 torrent file download
    -email extractor 14 product key online free
    -email extractor 14 serial number online
    -email extractor 14 cracked version online
    -email extractor 14 activation code generator
    -email extractor 14 license code generator
    -email extractor 14 serial key generator online
    -email extractor 14 unlock code generator
    -email extractor 14 crack software online
    -email extractor 14 registration code generator online
    -email extractor 14 keygen free download
    -email extractor 14 patch free download
    -email extractor 14 torrent file online
    -email extractor 14 product key free download
    -email extractor 14 serial number free download
    -email extractor 14 cracked version free download
    -email extractor 14 activation code free download
    -email extractor 14 license code free download
    -email extractor 14 serial key free online

    -

    Buy from the Official Website

    -

    The best and safest way to get a genuine Email Extractor 14 serial key is to buy it from the official website. The official website is https://www.emailxtractor.com/. Here you can find all the information about Email Extractor 14, such as its features, benefits, price, and testimonials. You can also download a free trial version of the software program to test it before buying it. To buy a genuine Email Extractor 14 serial key from the official website, you need to follow these steps:

    -
      -
    1. Select the license type that suits your needs. You can choose between Single User License ($69), Multi User License ($99), or Corporate License ($199).
    2. -
    3. Click on the "Buy Now" button and proceed to checkout. You will be redirected to a secure payment page where you can enter your billing details and payment method.
    4. -
    5. Complete your payment and wait for your confirmation email. You will receive an email with your receipt and your serial key within minutes after your payment is processed.
    6. -
    7. Enter your serial key into Email Extractor 14 and enjoy using it!
    8. -
    -

    Contact the Customer Support

    -

    If you have any questions, issues, or problems with your purchase, activation, or usage of Email Extractor 14, you can contact the customer support team for assistance. The customer support team is available 24/7 via email, phone, or live chat. You can find their contact details on their website https://www.emailxtractor.com/contact-us/. They will respond to your queries as soon as possible and help you resolve any issues you may have. They will also provide you with tips, tricks, and best practices on how to use Email Extractor 14 effectively and efficiently.

    -

    Use a Coupon Code or a Discount Offer

    -

    If you want to save some money on buying a genuine Email Extractor 14 serial key, you can use a coupon code or a discount offer that may be available from time to time. website https://www.emailxtractor.com/, their social media pages, their newsletters, or their affiliates. You can also search for them online using keywords such as "Email Extractor 14 coupon code" or "Email Extractor 14 discount offer". To use a coupon code or a discount offer, you need to follow these steps:

    -
      -
    1. Find a valid and working coupon code or discount offer that applies to Email Extractor 14.
    2. -
    3. Copy the coupon code or click on the discount offer link to activate it.
    4. -
    5. Go to the official website https://www.emailxtractor.com/ and select the license type that suits your needs.
    6. -
    7. Paste the coupon code in the designated box or apply the discount offer automatically at checkout.
    8. -
    9. Complete your payment and wait for your confirmation email. You will receive an email with your receipt and your serial key within minutes after your payment is processed.
    10. -
    11. Enter your serial key into Email Extractor 14 and enjoy using it!
    12. -
    -

    Conclusion

    -

    Email Extractor 14 is a powerful and easy-to-use tool that can help you extract email addresses from various sources in a fast and efficient way. You can use it to build your own email list for marketing, sales, or communication purposes. However, to use Email Extractor 14, you need a valid serial key to activate it. A serial key is a unique code that verifies the authenticity and legitimacy of the software program. Having a serial key allows you to access all the features and functions of Email Extractor 14 without any limitations or restrictions. It also allows you to receive updates and support from the developer whenever there are new versions or issues with the software program. Moreover, having a serial key protects your investment and avoids legal troubles that may arise from using pirated or unlicensed versions of the software program.

    -

    If you want to get a genuine Email Extractor 14 serial key, you have three options: buy from the official website, contact the customer support, or use a coupon code or a discount offer. Buying from the official website is the best and safest way to get a genuine Email Extractor 14 serial key. You can find all the information about Email Extractor 14 on their website https://www.emailxtractor.com/. You can also download a free trial version of the software program to test it before buying it. Contacting the customer support is another option if you have any questions, issues, or problems with your purchase, activation, or usage of Email Extractor 14. The customer support team is available 24/7 via email, phone, or live chat. They will respond to your queries as soon as possible and help you resolve any issues you may have. They will also provide you with tips, tricks, and best practices on how to use Email Extractor 14 effectively and efficiently. Using a coupon code or a discount offer is another option if you want to save some money on buying a genuine Email Extractor 14 serial key. You can find these coupon codes or discount offers on their website https://www.emailxtractor.com/, their social media pages, their newsletters, or their affiliates. You can also search for them online using keywords such as "Email Extractor 14 coupon code" or "Email Extractor 14 discount offer".

    -

    We hope this article has helped you understand what Email Extractor 14 is, what a serial key is, why you need one, and how to get one. If you are interested in using Email Extractor 14 for your email extraction needs, we recommend you to get a genuine Email Extractor 14 serial key from one of the options mentioned above. This way, you can enjoy the full benefits of Email Extractor 14 without any hassle or risk. Thank you for reading!

    -

    FAQs

    -

    Here are some frequently asked questions about Email Extractor 14 and its serial key:

    -

    Q: Is Email Extractor 14 safe to use?

    -

    A: Yes, Email Extractor 14 is safe to use as long as you download it from the official website https://www.emailxtractor.com/ and use a genuine serial key to activate it. The software program does not contain any viruses, malware, spyware, or other harmful programs that may damage your system or steal your data. The software program also respects your privacy and does not collect or share any of your personal information without your consent.

    -

    Q: How long does it take to extract email addresses using Email Extractor 14?

    -

    A: The time it takes to extract email addresses using Email Extractor 14 depends on several factors, such as the source, the parameters, the filters, and the speed of your internet connection. However, in general, Email Extractor 14 can process thousands of email addresses per minute. Therefore, it can extract email addresses from various sources in a fast and efficient way.

    -

    Q: How many email addresses can I extract using Email Extractor 14?

    -

    A: There is no limit on how many email addresses you can extract using Email Extractor 14. You can extract as many email addresses as you want from various sources using Email Extractor 14. However, you should be aware of the ethical and legal implications of extracting and using email addresses for your purposes. You should always respect the privacy and consent of the email owners and follow the rules and regulations of email marketing and communication.

    -

    Q: Can I use Email Extractor 14 on multiple computers?

    -

    A: Yes, you can use Email Extractor 14 on multiple computers as long as you have a valid serial key for each computer. You can buy multiple licenses for Email Extractor 14 from the official website https://www.emailxtractor.com/. You can choose between Single User License ($69), Multi User License ($99), or Corporate License ($199). Each license type allows you to use Email Extractor 14 on a different number of computers.

    -

    Q: What if I lose my serial key?

    -

    A: If you lose your serial key for Email Extractor 14, you can contact the customer support team for assistance. They will help you recover your serial key as soon as possible. You can find their contact details on their website https://www.emailxtractor.com/contact-us/. You can also check your confirmation email that contains your receipt and your serial key.

    -

    0a6ba089eb
    -
    -
    \ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Alsat m live seriali me fal Nj serial q trajton tema si dashuria tradhtia hakmarrja dhe falja.md b/spaces/1gistliPinn/ChatGPT4/Examples/Alsat m live seriali me fal Nj serial q trajton tema si dashuria tradhtia hakmarrja dhe falja.md deleted file mode 100644 index 9ab28efb941d43788f1f09cc8166fe4b95896d4a..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Alsat m live seriali me fal Nj serial q trajton tema si dashuria tradhtia hakmarrja dhe falja.md +++ /dev/null @@ -1,6 +0,0 @@ -

    alsat m live seriali me fal


    Download File ☆☆☆☆☆ https://imgfil.com/2uy1DW



    -
    - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Ap Calculus Textbook Finney Pdf Download Master Calculus with Thomas and Finneys Classic Textbook.md b/spaces/1gistliPinn/ChatGPT4/Examples/Ap Calculus Textbook Finney Pdf Download Master Calculus with Thomas and Finneys Classic Textbook.md deleted file mode 100644 index ac05563fee2cbc9ec0cbf1fe8bfa22aabcac1c63..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Ap Calculus Textbook Finney Pdf Download Master Calculus with Thomas and Finneys Classic Textbook.md +++ /dev/null @@ -1,5 +0,0 @@ -
    -

    APEX Calculus is a calculus textbook written for traditional college/university calculus courses. It has the look and feel of the calculus book you likely use right now (Stewart, Thomas & Finney, etc.). The explanations of new concepts is clear, written for someone who does not yet know calculus. Each section ends with an exercise set with ample problems to practice & test skills (odd answers are in the back).

    -

    Ap Calculus Textbook Finney Pdf Download


    DOWNLOAD → https://imgfil.com/2uxYrI



    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Candydoll.tv Laura-B Set Updated !FREE!.md b/spaces/1gistliPinn/ChatGPT4/Examples/Candydoll.tv Laura-B Set Updated !FREE!.md deleted file mode 100644 index 26e831745dd9022c645286bd2e8a4110a3e60f05..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Candydoll.tv Laura-B Set Updated !FREE!.md +++ /dev/null @@ -1,6 +0,0 @@ -

    candydoll.tv Laura-B set updated


    Download File » https://imgfil.com/2uxZKK



    -
    - d5da3c52bf
    -
    -
    -

    diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Dpwh Blue Book Volume 2 Pdf Free Download HOT!.md b/spaces/1gistliPinn/ChatGPT4/Examples/Dpwh Blue Book Volume 2 Pdf Free Download HOT!.md deleted file mode 100644 index 6c1e0e4c52240f4df903f23b530f94c0f5672d9d..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Dpwh Blue Book Volume 2 Pdf Free Download HOT!.md +++ /dev/null @@ -1,6 +0,0 @@ -

    dpwh blue book volume 2 pdf free download


    DOWNLOAD ★ https://imgfil.com/2uy1Ws



    - - 3cee63e6c2
    -
    -
    -

    diff --git a/spaces/1toTree/lora_test/ppdiffusers/__init__.py b/spaces/1toTree/lora_test/ppdiffusers/__init__.py deleted file mode 100644 index b4656561df15b4db4d90e2ce012eb1c3bb56071d..0000000000000000000000000000000000000000 --- a/spaces/1toTree/lora_test/ppdiffusers/__init__.py +++ /dev/null @@ -1,162 +0,0 @@ -# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -# Copyright 2022 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# flake8: noqa - -from .configuration_utils import ConfigMixin -from .fastdeploy_utils import FastDeployRuntimeModel -from .ppnlp_patch_utils import * -from .utils import ( - OptionalDependencyNotAvailable, - is_fastdeploy_available, - is_inflect_available, - is_k_diffusion_available, - is_librosa_available, - is_onnx_available, - is_paddle_available, - is_paddlenlp_available, - is_scipy_available, - is_unidecode_available, - logging, -) -from .version import VERSION as __version__ - -try: - if not is_paddle_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - from .utils.dummy_paddle_objects import * # noqa F403 -else: - from .initializer import * - from .modeling_utils import ModelMixin - from .models import ( - AutoencoderKL, - PriorTransformer, - Transformer2DModel, - UNet1DModel, - UNet2DConditionModel, - UNet2DModel, - VQModel, - ) - from .optimization import ( - get_constant_schedule, - get_constant_schedule_with_warmup, - get_cosine_schedule_with_warmup, - get_cosine_with_hard_restarts_schedule_with_warmup, - get_linear_schedule_with_warmup, - get_polynomial_decay_schedule_with_warmup, - get_scheduler, - ) - from .pipeline_utils import DiffusionPipeline - from .pipelines import ( - DanceDiffusionPipeline, - DDIMPipeline, - DDPMPipeline, - KarrasVePipeline, - LDMPipeline, - LDMSuperResolutionPipeline, - PNDMPipeline, - RePaintPipeline, - ScoreSdeVePipeline, - ) - from .schedulers import ( - DDIMScheduler, - DDPMScheduler, - DPMSolverMultistepScheduler, - DPMSolverSinglestepScheduler, - EulerAncestralDiscreteScheduler, - EulerDiscreteScheduler, - HeunDiscreteScheduler, - IPNDMScheduler, - KarrasVeScheduler, - KDPM2AncestralDiscreteScheduler, - KDPM2DiscreteScheduler, - PNDMScheduler, - RePaintScheduler, - SchedulerMixin, - ScoreSdeVeScheduler, - UnCLIPScheduler, - VQDiffusionScheduler, - ) - from .schedulers.preconfig import PreconfigEulerAncestralDiscreteScheduler - from .training_utils import EMAModel - -try: - if not (is_paddle_available() and is_scipy_available()): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - from .utils.dummy_paddle_and_scipy_objects import * # noqa F403 -else: - from .schedulers import LMSDiscreteScheduler - from .schedulers.preconfig import PreconfigLMSDiscreteScheduler - -try: - if not (is_paddle_available() and is_paddlenlp_available()): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - from .utils.dummy_paddle_and_paddlenlp_objects import * # noqa F403 -else: - from .pipelines import ( - AltDiffusionImg2ImgPipeline, - AltDiffusionPipeline, - CycleDiffusionPipeline, - LDMBertModel, - LDMTextToImagePipeline, - PaintByExamplePipeline, - StableDiffusionDepth2ImgPipeline, - StableDiffusionImageVariationPipeline, - StableDiffusionImg2ImgPipeline, - StableDiffusionInpaintPipeline, - StableDiffusionInpaintPipelineLegacy, - StableDiffusionMegaPipeline, - StableDiffusionPipeline, - StableDiffusionPipelineAllinOne, - StableDiffusionPipelineSafe, - StableDiffusionUpscalePipeline, - UnCLIPPipeline, - VersatileDiffusionDualGuidedPipeline, - VersatileDiffusionImageVariationPipeline, - VersatileDiffusionPipeline, - VersatileDiffusionTextToImagePipeline, - VQDiffusionPipeline, - ) - -try: - if not (is_paddle_available() and is_paddlenlp_available() and is_k_diffusion_available()): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - from .utils.dummy_paddle_and_paddlenlp_and_k_diffusion_objects import * # noqa F403 -else: - from .pipelines import StableDiffusionKDiffusionPipeline - -try: - if not (is_paddle_available() and is_paddlenlp_available() and is_fastdeploy_available()): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - from .utils.dummy_paddle_and_paddlenlp_and_fastdeploy_objects import * # noqa F403 -else: - from .pipelines import ( - FastDeployStableDiffusionImg2ImgPipeline, - FastDeployStableDiffusionInpaintPipeline, - FastDeployStableDiffusionInpaintPipelineLegacy, - FastDeployStableDiffusionMegaPipeline, - FastDeployStableDiffusionPipeline, - ) -try: - if not (is_paddle_available() and is_librosa_available()): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - from .utils.dummy_paddle_and_librosa_objects import * # noqa F403 -else: - from .pipelines import AudioDiffusionPipeline, Mel diff --git a/spaces/2023Liu2023/bingo/src/lib/isomorphic/index.ts b/spaces/2023Liu2023/bingo/src/lib/isomorphic/index.ts deleted file mode 100644 index 738dc92f74079ab762d584fb7422a8c8c3b61547..0000000000000000000000000000000000000000 --- a/spaces/2023Liu2023/bingo/src/lib/isomorphic/index.ts +++ /dev/null @@ -1,17 +0,0 @@ -'use client' - -import Default from './browser' - -let exportsModel: any = {} - -if (process.browser) { - Object.assign(exportsModel, require('./browser').default) -} else { - Object.assign(exportsModel, require('./node').default) -} - -export default exportsModel! as typeof Default - -export const fetch: typeof Default.fetch = exportsModel!.fetch -export const WebSocket: typeof Default.WebSocket = exportsModel!.WebSocket -export const debug: typeof Default.debug = exportsModel!.debug diff --git a/spaces/7hao/bingo/src/lib/bots/bing/tts.ts b/spaces/7hao/bingo/src/lib/bots/bing/tts.ts deleted file mode 100644 index cd10b7d1d7581bf9cf46ff6755fcca550c558c9b..0000000000000000000000000000000000000000 --- a/spaces/7hao/bingo/src/lib/bots/bing/tts.ts +++ /dev/null @@ -1,82 +0,0 @@ -import { sleep } from './utils' - -const synth = window.speechSynthesis - -export class TTS { - currentText = '' - speakText = '' - private controller = new AbortController() - speaking = false - get isSpeaking() { - return this.speaking - } - finished = false - constructor() {} - abort = () => { - this.controller.abort() - } - - reset = () => { - this.speaking = false - this.finished = true - this.currentText = '' - this.speakText = '' - this.abort() - } - - speak = (text: string) => { - if (!synth || text?.trim()?.length < 2) { - return - } - this.currentText = text.replace(/[^\u4e00-\u9fa5_a-zA-Z0-9,。?,:;\.,:]+/g, '') - this.finished = false - this.loop() - } - - private async doSpeek() { - return new Promise((resolve) => { - const endIndex = this.finished ? this.currentText.length : - Math.max( - this.currentText.lastIndexOf('。'), - this.currentText.lastIndexOf(';'), - this.currentText.lastIndexOf('、'), - this.currentText.lastIndexOf('?'), - this.currentText.lastIndexOf('\n') - ) - const startIndex = this.speakText.length ? Math.max(0, this.currentText.lastIndexOf(this.speakText) + this.speakText.length) : 0 - - if (startIndex >= endIndex) { - return resolve(true) - } - const text = this.currentText.slice(startIndex, endIndex) - this.speakText = text - const utterThis = new SpeechSynthesisUtterance(text) - this.controller.signal.onabort = () => { - synth.cancel() - this.finished = true - resolve(false) - } - - utterThis.onend = function (event) { - resolve(true) - } - - utterThis.onerror = function (event) { - resolve(false) - } - - const voice = synth.getVoices().find(v => v.name.includes('Microsoft Yunxi Online')) ?? null - utterThis.voice = voice - synth.speak(utterThis) - }) - } - - private async loop() { - if (this.speaking) return - this.speaking = true - while(!this.finished) { - await Promise.all([sleep(1000), this.doSpeek()]) - } - this.speaking = false - } -} diff --git a/spaces/801artistry/RVC801/demucs/parser.py b/spaces/801artistry/RVC801/demucs/parser.py deleted file mode 100644 index 4e8a19cf976e3c6dfe411da64b8dce3e9a4548e0..0000000000000000000000000000000000000000 --- a/spaces/801artistry/RVC801/demucs/parser.py +++ /dev/null @@ -1,244 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import os -from pathlib import Path - - -def get_parser(): - parser = argparse.ArgumentParser("demucs", description="Train and evaluate Demucs.") - default_raw = None - default_musdb = None - if 'DEMUCS_RAW' in os.environ: - default_raw = Path(os.environ['DEMUCS_RAW']) - if 'DEMUCS_MUSDB' in os.environ: - default_musdb = Path(os.environ['DEMUCS_MUSDB']) - parser.add_argument( - "--raw", - type=Path, - default=default_raw, - help="Path to raw audio, can be faster, see python3 -m demucs.raw to extract.") - parser.add_argument("--no_raw", action="store_const", const=None, dest="raw") - parser.add_argument("-m", - "--musdb", - type=Path, - default=default_musdb, - help="Path to musdb root") - parser.add_argument("--is_wav", action="store_true", - help="Indicate that the MusDB dataset is in wav format (i.e. MusDB-HQ).") - parser.add_argument("--metadata", type=Path, default=Path("metadata/"), - help="Folder where metadata information is stored.") - parser.add_argument("--wav", type=Path, - help="Path to a wav dataset. This should contain a 'train' and a 'valid' " - "subfolder.") - parser.add_argument("--samplerate", type=int, default=44100) - parser.add_argument("--audio_channels", type=int, default=2) - parser.add_argument("--samples", - default=44100 * 10, - type=int, - help="number of samples to feed in") - parser.add_argument("--data_stride", - default=44100, - type=int, - help="Stride for chunks, shorter = longer epochs") - parser.add_argument("-w", "--workers", default=10, type=int, help="Loader workers") - parser.add_argument("--eval_workers", default=2, type=int, help="Final evaluation workers") - parser.add_argument("-d", - "--device", - help="Device to train on, default is cuda if available else cpu") - parser.add_argument("--eval_cpu", action="store_true", help="Eval on test will be run on cpu.") - parser.add_argument("--dummy", help="Dummy parameter, useful to create a new checkpoint file") - parser.add_argument("--test", help="Just run the test pipeline + one validation. " - "This should be a filename relative to the models/ folder.") - parser.add_argument("--test_pretrained", help="Just run the test pipeline + one validation, " - "on a pretrained model. ") - - parser.add_argument("--rank", default=0, type=int) - parser.add_argument("--world_size", default=1, type=int) - parser.add_argument("--master") - - parser.add_argument("--checkpoints", - type=Path, - default=Path("checkpoints"), - help="Folder where to store checkpoints etc") - parser.add_argument("--evals", - type=Path, - default=Path("evals"), - help="Folder where to store evals and waveforms") - parser.add_argument("--save", - action="store_true", - help="Save estimated for the test set waveforms") - parser.add_argument("--logs", - type=Path, - default=Path("logs"), - help="Folder where to store logs") - parser.add_argument("--models", - type=Path, - default=Path("models"), - help="Folder where to store trained models") - parser.add_argument("-R", - "--restart", - action='store_true', - help='Restart training, ignoring previous run') - - parser.add_argument("--seed", type=int, default=42) - parser.add_argument("-e", "--epochs", type=int, default=180, help="Number of epochs") - parser.add_argument("-r", - "--repeat", - type=int, - default=2, - help="Repeat the train set, longer epochs") - parser.add_argument("-b", "--batch_size", type=int, default=64) - parser.add_argument("--lr", type=float, default=3e-4) - parser.add_argument("--mse", action="store_true", help="Use MSE instead of L1") - parser.add_argument("--init", help="Initialize from a pre-trained model.") - - # Augmentation options - parser.add_argument("--no_augment", - action="store_false", - dest="augment", - default=True, - help="No basic data augmentation.") - parser.add_argument("--repitch", type=float, default=0.2, - help="Probability to do tempo/pitch change") - parser.add_argument("--max_tempo", type=float, default=12, - help="Maximum relative tempo change in %% when using repitch.") - - parser.add_argument("--remix_group_size", - type=int, - default=4, - help="Shuffle sources using group of this size. Useful to somewhat " - "replicate multi-gpu training " - "on less GPUs.") - parser.add_argument("--shifts", - type=int, - default=10, - help="Number of random shifts used for the shift trick.") - parser.add_argument("--overlap", - type=float, - default=0.25, - help="Overlap when --split_valid is passed.") - - # See model.py for doc - parser.add_argument("--growth", - type=float, - default=2., - help="Number of channels between two layers will increase by this factor") - parser.add_argument("--depth", - type=int, - default=6, - help="Number of layers for the encoder and decoder") - parser.add_argument("--lstm_layers", type=int, default=2, help="Number of layers for the LSTM") - parser.add_argument("--channels", - type=int, - default=64, - help="Number of channels for the first encoder layer") - parser.add_argument("--kernel_size", - type=int, - default=8, - help="Kernel size for the (transposed) convolutions") - parser.add_argument("--conv_stride", - type=int, - default=4, - help="Stride for the (transposed) convolutions") - parser.add_argument("--context", - type=int, - default=3, - help="Context size for the decoder convolutions " - "before the transposed convolutions") - parser.add_argument("--rescale", - type=float, - default=0.1, - help="Initial weight rescale reference") - parser.add_argument("--no_resample", action="store_false", - default=True, dest="resample", - help="No Resampling of the input/output x2") - parser.add_argument("--no_glu", - action="store_false", - default=True, - dest="glu", - help="Replace all GLUs by ReLUs") - parser.add_argument("--no_rewrite", - action="store_false", - default=True, - dest="rewrite", - help="No 1x1 rewrite convolutions") - parser.add_argument("--normalize", action="store_true") - parser.add_argument("--no_norm_wav", action="store_false", dest='norm_wav', default=True) - - # Tasnet options - parser.add_argument("--tasnet", action="store_true") - parser.add_argument("--split_valid", - action="store_true", - help="Predict chunks by chunks for valid and test. Required for tasnet") - parser.add_argument("--X", type=int, default=8) - - # Other options - parser.add_argument("--show", - action="store_true", - help="Show model architecture, size and exit") - parser.add_argument("--save_model", action="store_true", - help="Skip traning, just save final model " - "for the current checkpoint value.") - parser.add_argument("--save_state", - help="Skip training, just save state " - "for the current checkpoint value. You should " - "provide a model name as argument.") - - # Quantization options - parser.add_argument("--q-min-size", type=float, default=1, - help="Only quantize layers over this size (in MB)") - parser.add_argument( - "--qat", type=int, help="If provided, use QAT training with that many bits.") - - parser.add_argument("--diffq", type=float, default=0) - parser.add_argument( - "--ms-target", type=float, default=162, - help="Model size target in MB, when using DiffQ. Best model will be kept " - "only if it is smaller than this target.") - - return parser - - -def get_name(parser, args): - """ - Return the name of an experiment given the args. Some parameters are ignored, - for instance --workers, as they do not impact the final result. - """ - ignore_args = set([ - "checkpoints", - "deterministic", - "eval", - "evals", - "eval_cpu", - "eval_workers", - "logs", - "master", - "rank", - "restart", - "save", - "save_model", - "save_state", - "show", - "workers", - "world_size", - ]) - parts = [] - name_args = dict(args.__dict__) - for name, value in name_args.items(): - if name in ignore_args: - continue - if value != parser.get_default(name): - if isinstance(value, Path): - parts.append(f"{name}={value.name}") - else: - parts.append(f"{name}={value}") - if parts: - name = " ".join(parts) - else: - name = "default" - return name diff --git a/spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/models/utils.py b/spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/models/utils.py deleted file mode 100644 index 3623cf43619a7a4ff5fa31f2b056378697b04d61..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/models/utils.py +++ /dev/null @@ -1,132 +0,0 @@ -import math - -import numpy as np -import torch -import torch.nn as nn - -from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence - - -def sort_pack_padded_sequence(input, lengths): - sorted_lengths, indices = torch.sort(lengths, descending=True) - tmp = pack_padded_sequence(input[indices], sorted_lengths.cpu(), batch_first=True) - inv_ix = indices.clone() - inv_ix[indices] = torch.arange(0,len(indices)).type_as(inv_ix) - return tmp, inv_ix - -def pad_unsort_packed_sequence(input, inv_ix): - tmp, _ = pad_packed_sequence(input, batch_first=True) - tmp = tmp[inv_ix] - return tmp - -def pack_wrapper(module, attn_feats, attn_feat_lens): - packed, inv_ix = sort_pack_padded_sequence(attn_feats, attn_feat_lens) - if isinstance(module, torch.nn.RNNBase): - return pad_unsort_packed_sequence(module(packed)[0], inv_ix) - else: - return pad_unsort_packed_sequence(PackedSequence(module(packed[0]), packed[1]), inv_ix) - -def generate_length_mask(lens, max_length=None): - lens = torch.as_tensor(lens) - N = lens.size(0) - if max_length is None: - max_length = max(lens) - idxs = torch.arange(max_length).repeat(N).view(N, max_length) - idxs = idxs.to(lens.device) - mask = (idxs < lens.view(-1, 1)) - return mask - -def mean_with_lens(features, lens): - """ - features: [N, T, ...] (assume the second dimension represents length) - lens: [N,] - """ - lens = torch.as_tensor(lens) - if max(lens) != features.size(1): - max_length = features.size(1) - mask = generate_length_mask(lens, max_length) - else: - mask = generate_length_mask(lens) - mask = mask.to(features.device) # [N, T] - - while mask.ndim < features.ndim: - mask = mask.unsqueeze(-1) - feature_mean = features * mask - feature_mean = feature_mean.sum(1) - while lens.ndim < feature_mean.ndim: - lens = lens.unsqueeze(1) - feature_mean = feature_mean / lens.to(features.device) - # feature_mean = features * mask.unsqueeze(-1) - # feature_mean = feature_mean.sum(1) / lens.unsqueeze(1).to(features.device) - return feature_mean - -def max_with_lens(features, lens): - """ - features: [N, T, ...] (assume the second dimension represents length) - lens: [N,] - """ - lens = torch.as_tensor(lens) - mask = generate_length_mask(lens).to(features.device) # [N, T] - - feature_max = features.clone() - feature_max[~mask] = float("-inf") - feature_max, _ = feature_max.max(1) - return feature_max - -def repeat_tensor(x, n): - return x.unsqueeze(0).repeat(n, *([1] * len(x.shape))) - -def init(m, method="kaiming"): - if isinstance(m, (nn.Conv2d, nn.Conv1d)): - if method == "kaiming": - nn.init.kaiming_uniform_(m.weight) - elif method == "xavier": - nn.init.xavier_uniform_(m.weight) - else: - raise Exception(f"initialization method {method} not supported") - if m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, (nn.BatchNorm2d, nn.BatchNorm1d)): - nn.init.constant_(m.weight, 1) - if m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.Linear): - if method == "kaiming": - nn.init.kaiming_uniform_(m.weight) - elif method == "xavier": - nn.init.xavier_uniform_(m.weight) - else: - raise Exception(f"initialization method {method} not supported") - if m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.Embedding): - if method == "kaiming": - nn.init.kaiming_uniform_(m.weight) - elif method == "xavier": - nn.init.xavier_uniform_(m.weight) - else: - raise Exception(f"initialization method {method} not supported") - - - - -class PositionalEncoding(nn.Module): - - def __init__(self, d_model, dropout=0.1, max_len=100): - super(PositionalEncoding, self).__init__() - self.dropout = nn.Dropout(p=dropout) - - pe = torch.zeros(max_len, d_model) - position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1) - div_term = torch.exp(torch.arange(0, d_model, 2).float() * \ - (-math.log(10000.0) / d_model)) - pe[:, 0::2] = torch.sin(position * div_term) - pe[:, 1::2] = torch.cos(position * div_term) - pe = pe.unsqueeze(0).transpose(0, 1) - # self.register_buffer("pe", pe) - self.register_parameter("pe", nn.Parameter(pe, requires_grad=False)) - - def forward(self, x): - # x: [T, N, E] - x = x + self.pe[:x.size(0), :] - return self.dropout(x) diff --git a/spaces/Afrihub/README/README.md b/spaces/Afrihub/README/README.md deleted file mode 100644 index 1a5b671f48d15ef90da1d86d15ee1171ed733594..0000000000000000000000000000000000000000 --- a/spaces/Afrihub/README/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: README -emoji: 🏢 -colorFrom: blue -colorTo: green -sdk: static -pinned: false ---- - -Edit this `README.md` markdown file to author your organization card. diff --git a/spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/reflection.py b/spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/reflection.py deleted file mode 100644 index c7c316398c2cd3dbf14a4891c12437c5322f2968..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/reflection.py +++ /dev/null @@ -1,128 +0,0 @@ -import asyncio -import logging -from typing import Any, Dict, List - -from datetime import datetime as dt -import datetime - -from pydantic import Field - -from agentverse.agents.simulation_agent.conversation import BaseAgent - -# from agentverse.environments.simulation_env.rules.base import Rule -from agentverse.environments.simulation_env.rules.base import SimulationRule as Rule -from agentverse.message import Message - -from . import env_registry as EnvironmentRegistry -from ..base import BaseEnvironment - -from pydantic import validator - - -@EnvironmentRegistry.register("reflection") -class ReflectionEnvironment(BaseEnvironment): - """ - Environment used in Observation-Planning-Reflection agent architecture. - - Args: - agents: List of agents - rule: Rule for the environment - max_turns: Maximum number of turns - cnt_turn: Current turn number - last_messages: Messages from last turn - rule_params: Variables set by the rule - current_time - time_delta: time difference between steps - """ - - agents: List[BaseAgent] - rule: Rule - max_turns: int = 10 - cnt_turn: int = 0 - last_messages: List[Message] = [] - rule_params: Dict = {} - current_time: dt = dt.now() - time_delta: int = 120 - # - - # @validator("time_delta") - # def convert_str_to_timedelta(cls, string): - # - # return datetime.timedelta(seconds=int(string)) - - def __init__(self, rule, **kwargs): - rule_config = rule - order_config = rule_config.get("order", {"type": "sequential"}) - visibility_config = rule_config.get("visibility", {"type": "all"}) - selector_config = rule_config.get("selector", {"type": "basic"}) - updater_config = rule_config.get("updater", {"type": "basic"}) - describer_config = rule_config.get("describer", {"type": "basic"}) - rule = Rule( - order_config, - visibility_config, - selector_config, - updater_config, - describer_config, - ) - - super().__init__(rule=rule, **kwargs) - - async def step(self) -> List[Message]: - """Run one step of the environment""" - - logging.log(logging.INFO, f"Tick tock. Current time: {self.current_time}") - - # Get the next agent index - agent_ids = self.rule.get_next_agent_idx(self) - - # Generate current environment description - env_descriptions = self.rule.get_env_description(self) - - # Generate the next message - messages = await asyncio.gather( - *[ - self.agents[i].astep(self.current_time, env_descriptions[i]) - for i in agent_ids - ] - ) - - # Some rules will select certain messages from all the messages - selected_messages = self.rule.select_message(self, messages) - self.last_messages = selected_messages - self.print_messages(selected_messages) - - # Update the memory of the agents - self.rule.update_memory(self) - - # Update the set of visible agents for each agent - self.rule.update_visible_agents(self) - - self.cnt_turn += 1 - - # update current_time - self.tick_tock() - - return selected_messages - - def print_messages(self, messages: List[Message]) -> None: - for message in messages: - if message is not None: - logging.info(f"{message.sender}: {message.content}") - - def reset(self) -> None: - """Reset the environment""" - self.cnt_turn = 0 - self.rule.reset() - BaseAgent.update_forward_refs() - for agent in self.agents: - agent.reset(environment=self) - - def is_done(self) -> bool: - """Check if the environment is done""" - return self.cnt_turn >= self.max_turns - - def tick_tock(self) -> None: - """Increment the time""" - self.current_time = self.current_time + datetime.timedelta( - seconds=self.time_delta - ) diff --git a/spaces/AgentVerse/agentVerse/agentverse/environments/tasksolving_env/rules/decision_maker/central.py b/spaces/AgentVerse/agentVerse/agentverse/environments/tasksolving_env/rules/decision_maker/central.py deleted file mode 100644 index 5d7bf57029bcf2bbe4894da96e1d070bca2dd7da..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/agentverse/environments/tasksolving_env/rules/decision_maker/central.py +++ /dev/null @@ -1,56 +0,0 @@ -from __future__ import annotations -import asyncio -from colorama import Fore - -from typing import TYPE_CHECKING, List - -from . import decision_maker_registry -from .base import BaseDecisionMaker -from agentverse.logging import typewriter_log, logger -from agentverse.message import Message - -if TYPE_CHECKING: - from agentverse.agents import BaseAgent, SolverAgent, CriticAgent - from agentverse.message import SolverMessage - - -@decision_maker_registry.register("central") -class CentralDecisionMaker(BaseDecisionMaker): - """ - Discuss in a central manner. - """ - - name: str = "central" - - async def astep( - self, - agents: List[BaseAgent], - task_description: str, - previous_plan: str = "No solution yet.", - advice: str = "No advice yet.", - *args, - **kwargs, - ) -> List[SolverMessage]: - if advice != "No advice yet.": - agents[1].add_message_to_memory( - [Message(content=advice, sender="Evaluator")] - ) - result = await agents[1].astep( - previous_plan, - advice, - task_description, - roles=", ".join( - [ - agent.role_description[0].lower() + agent.role_description[1:] - for agent in agents - ] - ), - ) - agents[1].add_message_to_memory([result]) - result = agents[0].step( - previous_plan, advice, task_description, chat_record=result.content - ) - return [result] - - def reset(self): - pass diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/anchor-plugin.d.ts b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/anchor-plugin.d.ts deleted file mode 100644 index 5a908ac6d26d22b1904c953d72634cc0a000384d..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/anchor-plugin.d.ts +++ /dev/null @@ -1,9 +0,0 @@ -import Anchor from './anchor' - -export default class AnchorPlugin extends Phaser.Plugins.BasePlugin { - add( - gameObject: Phaser.GameObjects.GameObject, - config?: Anchor.IConfig - ): Anchor; - -} \ No newline at end of file diff --git a/spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/criteria/id_loss.py b/spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/criteria/id_loss.py deleted file mode 100644 index a828023e115243e48918538d31b91d662cd12d0f..0000000000000000000000000000000000000000 --- a/spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/criteria/id_loss.py +++ /dev/null @@ -1,39 +0,0 @@ -import torch -from torch import nn - -from models.facial_recognition.model_irse import Backbone - - -class IDLoss(nn.Module): - def __init__(self, opts): - super(IDLoss, self).__init__() - print('Loading ResNet ArcFace') - self.facenet = Backbone(input_size=112, num_layers=50, drop_ratio=0.6, mode='ir_se') - self.facenet.load_state_dict(torch.load(opts.ir_se50_weights)) - self.pool = torch.nn.AdaptiveAvgPool2d((256, 256)) - self.face_pool = torch.nn.AdaptiveAvgPool2d((112, 112)) - self.facenet.eval() - self.opts = opts - - def extract_feats(self, x): - if x.shape[2] != 256: - x = self.pool(x) - x = x[:, :, 35:223, 32:220] # Crop interesting region - x = self.face_pool(x) - x_feats = self.facenet(x) - return x_feats - - def forward(self, y_hat, y): - n_samples = y.shape[0] - y_feats = self.extract_feats(y) # Otherwise use the feature from there - y_hat_feats = self.extract_feats(y_hat) - y_feats = y_feats.detach() - loss = 0 - sim_improvement = 0 - count = 0 - for i in range(n_samples): - diff_target = y_hat_feats[i].dot(y_feats[i]) - loss += 1 - diff_target - count += 1 - - return loss / count, sim_improvement / count diff --git a/spaces/Amrrs/DragGan-Inversion/legacy.py b/spaces/Amrrs/DragGan-Inversion/legacy.py deleted file mode 100644 index a874c38c2c943e632badb8e12f5a4297071827df..0000000000000000000000000000000000000000 --- a/spaces/Amrrs/DragGan-Inversion/legacy.py +++ /dev/null @@ -1,369 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Converting legacy network pickle into the new format.""" - -import click -import pickle -import re -import copy -import numpy as np -import torch -import dnnlib -from torch_utils import misc - -# ---------------------------------------------------------------------------- - - -def load_network_pkl(f, force_fp16=False): - data = _LegacyUnpickler(f).load() - - # Legacy TensorFlow pickle => convert. - if isinstance(data, tuple) and len(data) == 3 and all(isinstance(net, _TFNetworkStub) for net in data): - tf_G, tf_D, tf_Gs = data - G = convert_tf_generator(tf_G) - D = convert_tf_discriminator(tf_D) - G_ema = convert_tf_generator(tf_Gs) - data = dict(G=G, D=D, G_ema=G_ema) - - # Add missing fields. - if 'training_set_kwargs' not in data: - data['training_set_kwargs'] = None - if 'augment_pipe' not in data: - data['augment_pipe'] = None - - # Validate contents. - assert isinstance(data['G'], torch.nn.Module) - assert isinstance(data['D'], torch.nn.Module) - assert isinstance(data['G_ema'], torch.nn.Module) - assert isinstance(data['training_set_kwargs'], (dict, type(None))) - assert isinstance(data['augment_pipe'], (torch.nn.Module, type(None))) - - # Force FP16. - if force_fp16: - for key in ['G', 'D', 'G_ema']: - old = data[key] - kwargs = copy.deepcopy(old.init_kwargs) - fp16_kwargs = kwargs.get('synthesis_kwargs', kwargs) - fp16_kwargs.num_fp16_res = 4 - fp16_kwargs.conv_clamp = 256 - if kwargs != old.init_kwargs: - new = type(old)(**kwargs).eval().requires_grad_(False) - misc.copy_params_and_buffers(old, new, require_all=True) - data[key] = new - return data - -# ---------------------------------------------------------------------------- - - -class _TFNetworkStub(dnnlib.EasyDict): - pass - - -class _LegacyUnpickler(pickle.Unpickler): - def find_class(self, module, name): - if module == 'dnnlib.tflib.network' and name == 'Network': - return _TFNetworkStub - return super().find_class(module, name) - -# ---------------------------------------------------------------------------- - - -def _collect_tf_params(tf_net): - # pylint: disable=protected-access - tf_params = dict() - - def recurse(prefix, tf_net): - for name, value in tf_net.variables: - tf_params[prefix + name] = value - for name, comp in tf_net.components.items(): - recurse(prefix + name + '/', comp) - recurse('', tf_net) - return tf_params - -# ---------------------------------------------------------------------------- - - -def _populate_module_params(module, *patterns): - for name, tensor in misc.named_params_and_buffers(module): - found = False - value = None - for pattern, value_fn in zip(patterns[0::2], patterns[1::2]): - match = re.fullmatch(pattern, name) - if match: - found = True - if value_fn is not None: - value = value_fn(*match.groups()) - break - try: - assert found - if value is not None: - tensor.copy_(torch.from_numpy(np.array(value))) - except: - print(name, list(tensor.shape)) - raise - -# ---------------------------------------------------------------------------- - - -def convert_tf_generator(tf_G): - if tf_G.version < 4: - raise ValueError('TensorFlow pickle version too low') - - # Collect kwargs. - tf_kwargs = tf_G.static_kwargs - known_kwargs = set() - - def kwarg(tf_name, default=None, none=None): - known_kwargs.add(tf_name) - val = tf_kwargs.get(tf_name, default) - return val if val is not None else none - - # Convert kwargs. - from training import networks_stylegan2 - network_class = networks_stylegan2.Generator - kwargs = dnnlib.EasyDict( - z_dim=kwarg('latent_size', 512), - c_dim=kwarg('label_size', 0), - w_dim=kwarg('dlatent_size', 512), - img_resolution=kwarg('resolution', 1024), - img_channels=kwarg('num_channels', 3), - channel_base=kwarg('fmap_base', 16384) * 2, - channel_max=kwarg('fmap_max', 512), - num_fp16_res=kwarg('num_fp16_res', 0), - conv_clamp=kwarg('conv_clamp', None), - architecture=kwarg('architecture', 'skip'), - resample_filter=kwarg('resample_kernel', [1, 3, 3, 1]), - use_noise=kwarg('use_noise', True), - activation=kwarg('nonlinearity', 'lrelu'), - mapping_kwargs=dnnlib.EasyDict( - num_layers=kwarg('mapping_layers', 8), - embed_features=kwarg('label_fmaps', None), - layer_features=kwarg('mapping_fmaps', None), - activation=kwarg('mapping_nonlinearity', 'lrelu'), - lr_multiplier=kwarg('mapping_lrmul', 0.01), - w_avg_beta=kwarg('w_avg_beta', 0.995, none=1), - ), - ) - - # Check for unknown kwargs. - kwarg('truncation_psi') - kwarg('truncation_cutoff') - kwarg('style_mixing_prob') - kwarg('structure') - kwarg('conditioning') - kwarg('fused_modconv') - unknown_kwargs = list(set(tf_kwargs.keys()) - known_kwargs) - if len(unknown_kwargs) > 0: - raise ValueError('Unknown TensorFlow kwarg', unknown_kwargs[0]) - - # Collect params. - tf_params = _collect_tf_params(tf_G) - for name, value in list(tf_params.items()): - match = re.fullmatch(r'ToRGB_lod(\d+)/(.*)', name) - if match: - r = kwargs.img_resolution // (2 ** int(match.group(1))) - tf_params[f'{r}x{r}/ToRGB/{match.group(2)}'] = value - kwargs.synthesis.kwargs.architecture = 'orig' - # for name, value in tf_params.items(): print(f'{name:<50s}{list(value.shape)}') - - # Convert params. - G = network_class(**kwargs).eval().requires_grad_(False) - # pylint: disable=unnecessary-lambda - # pylint: disable=f-string-without-interpolation - _populate_module_params(G, - r'mapping\.w_avg', lambda: tf_params[f'dlatent_avg'], - r'mapping\.embed\.weight', lambda: tf_params[f'mapping/LabelEmbed/weight'].transpose( - ), - r'mapping\.embed\.bias', lambda: tf_params[f'mapping/LabelEmbed/bias'], - r'mapping\.fc(\d+)\.weight', lambda i: tf_params[f'mapping/Dense{i}/weight'].transpose( - ), - r'mapping\.fc(\d+)\.bias', lambda i: tf_params[f'mapping/Dense{i}/bias'], - r'synthesis\.b4\.const', lambda: tf_params[f'synthesis/4x4/Const/const'][0], - r'synthesis\.b4\.conv1\.weight', lambda: tf_params[f'synthesis/4x4/Conv/weight'].transpose( - 3, 2, 0, 1), - r'synthesis\.b4\.conv1\.bias', lambda: tf_params[ - f'synthesis/4x4/Conv/bias'], - r'synthesis\.b4\.conv1\.noise_const', lambda: tf_params[ - f'synthesis/noise0'][0, 0], - r'synthesis\.b4\.conv1\.noise_strength', lambda: tf_params[ - f'synthesis/4x4/Conv/noise_strength'], - r'synthesis\.b4\.conv1\.affine\.weight', lambda: tf_params[ - f'synthesis/4x4/Conv/mod_weight'].transpose(), - r'synthesis\.b4\.conv1\.affine\.bias', lambda: tf_params[ - f'synthesis/4x4/Conv/mod_bias'] + 1, - r'synthesis\.b(\d+)\.conv0\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/weight'][::-1, ::-1].transpose( - 3, 2, 0, 1), - r'synthesis\.b(\d+)\.conv0\.bias', lambda r: tf_params[ - f'synthesis/{r}x{r}/Conv0_up/bias'], - r'synthesis\.b(\d+)\.conv0\.noise_const', lambda r: tf_params[ - f'synthesis/noise{int(np.log2(int(r)))*2-5}'][0, 0], - r'synthesis\.b(\d+)\.conv0\.noise_strength', lambda r: tf_params[ - f'synthesis/{r}x{r}/Conv0_up/noise_strength'], - r'synthesis\.b(\d+)\.conv0\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/mod_weight'].transpose( - ), - r'synthesis\.b(\d+)\.conv0\.affine\.bias', lambda r: tf_params[ - f'synthesis/{r}x{r}/Conv0_up/mod_bias'] + 1, - r'synthesis\.b(\d+)\.conv1\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/weight'].transpose( - 3, 2, 0, 1), - r'synthesis\.b(\d+)\.conv1\.bias', lambda r: tf_params[ - f'synthesis/{r}x{r}/Conv1/bias'], - r'synthesis\.b(\d+)\.conv1\.noise_const', lambda r: tf_params[ - f'synthesis/noise{int(np.log2(int(r)))*2-4}'][0, 0], - r'synthesis\.b(\d+)\.conv1\.noise_strength', lambda r: tf_params[ - f'synthesis/{r}x{r}/Conv1/noise_strength'], - r'synthesis\.b(\d+)\.conv1\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/mod_weight'].transpose( - ), - r'synthesis\.b(\d+)\.conv1\.affine\.bias', lambda r: tf_params[ - f'synthesis/{r}x{r}/Conv1/mod_bias'] + 1, - r'synthesis\.b(\d+)\.torgb\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/weight'].transpose( - 3, 2, 0, 1), - r'synthesis\.b(\d+)\.torgb\.bias', lambda r: tf_params[ - f'synthesis/{r}x{r}/ToRGB/bias'], - r'synthesis\.b(\d+)\.torgb\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/mod_weight'].transpose( - ), - r'synthesis\.b(\d+)\.torgb\.affine\.bias', lambda r: tf_params[ - f'synthesis/{r}x{r}/ToRGB/mod_bias'] + 1, - r'synthesis\.b(\d+)\.skip\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Skip/weight'][::-1, ::-1].transpose( - 3, 2, 0, 1), - r'.*\.resample_filter', None, - r'.*\.act_filter', None, - ) - return G - -# ---------------------------------------------------------------------------- - - -def convert_tf_discriminator(tf_D): - if tf_D.version < 4: - raise ValueError('TensorFlow pickle version too low') - - # Collect kwargs. - tf_kwargs = tf_D.static_kwargs - known_kwargs = set() - - def kwarg(tf_name, default=None): - known_kwargs.add(tf_name) - return tf_kwargs.get(tf_name, default) - - # Convert kwargs. - kwargs = dnnlib.EasyDict( - c_dim=kwarg('label_size', 0), - img_resolution=kwarg('resolution', 1024), - img_channels=kwarg('num_channels', 3), - architecture=kwarg('architecture', 'resnet'), - channel_base=kwarg('fmap_base', 16384) * 2, - channel_max=kwarg('fmap_max', 512), - num_fp16_res=kwarg('num_fp16_res', 0), - conv_clamp=kwarg('conv_clamp', None), - cmap_dim=kwarg('mapping_fmaps', None), - block_kwargs=dnnlib.EasyDict( - activation=kwarg('nonlinearity', 'lrelu'), - resample_filter=kwarg('resample_kernel', [1, 3, 3, 1]), - freeze_layers=kwarg('freeze_layers', 0), - ), - mapping_kwargs=dnnlib.EasyDict( - num_layers=kwarg('mapping_layers', 0), - embed_features=kwarg('mapping_fmaps', None), - layer_features=kwarg('mapping_fmaps', None), - activation=kwarg('nonlinearity', 'lrelu'), - lr_multiplier=kwarg('mapping_lrmul', 0.1), - ), - epilogue_kwargs=dnnlib.EasyDict( - mbstd_group_size=kwarg('mbstd_group_size', None), - mbstd_num_channels=kwarg('mbstd_num_features', 1), - activation=kwarg('nonlinearity', 'lrelu'), - ), - ) - - # Check for unknown kwargs. - kwarg('structure') - kwarg('conditioning') - unknown_kwargs = list(set(tf_kwargs.keys()) - known_kwargs) - if len(unknown_kwargs) > 0: - raise ValueError('Unknown TensorFlow kwarg', unknown_kwargs[0]) - - # Collect params. - tf_params = _collect_tf_params(tf_D) - for name, value in list(tf_params.items()): - match = re.fullmatch(r'FromRGB_lod(\d+)/(.*)', name) - if match: - r = kwargs.img_resolution // (2 ** int(match.group(1))) - tf_params[f'{r}x{r}/FromRGB/{match.group(2)}'] = value - kwargs.architecture = 'orig' - # for name, value in tf_params.items(): print(f'{name:<50s}{list(value.shape)}') - - # Convert params. - from training import networks_stylegan2 - D = networks_stylegan2.Discriminator(**kwargs).eval().requires_grad_(False) - # pylint: disable=unnecessary-lambda - # pylint: disable=f-string-without-interpolation - _populate_module_params(D, - r'b(\d+)\.fromrgb\.weight', lambda r: tf_params[f'{r}x{r}/FromRGB/weight'].transpose( - 3, 2, 0, 1), - r'b(\d+)\.fromrgb\.bias', lambda r: tf_params[f'{r}x{r}/FromRGB/bias'], - r'b(\d+)\.conv(\d+)\.weight', lambda r, i: tf_params[f'{r}x{r}/Conv{i}{["","_down"][int(i)]}/weight'].transpose( - 3, 2, 0, 1), - r'b(\d+)\.conv(\d+)\.bias', lambda r, i: tf_params[ - f'{r}x{r}/Conv{i}{["","_down"][int(i)]}/bias'], - r'b(\d+)\.skip\.weight', lambda r: tf_params[f'{r}x{r}/Skip/weight'].transpose( - 3, 2, 0, 1), - r'mapping\.embed\.weight', lambda: tf_params[f'LabelEmbed/weight'].transpose( - ), - r'mapping\.embed\.bias', lambda: tf_params[f'LabelEmbed/bias'], - r'mapping\.fc(\d+)\.weight', lambda i: tf_params[f'Mapping{i}/weight'].transpose( - ), - r'mapping\.fc(\d+)\.bias', lambda i: tf_params[f'Mapping{i}/bias'], - r'b4\.conv\.weight', lambda: tf_params[f'4x4/Conv/weight'].transpose( - 3, 2, 0, 1), - r'b4\.conv\.bias', lambda: tf_params[f'4x4/Conv/bias'], - r'b4\.fc\.weight', lambda: tf_params[f'4x4/Dense0/weight'].transpose( - ), - r'b4\.fc\.bias', lambda: tf_params[f'4x4/Dense0/bias'], - r'b4\.out\.weight', lambda: tf_params[f'Output/weight'].transpose( - ), - r'b4\.out\.bias', lambda: tf_params[f'Output/bias'], - r'.*\.resample_filter', None, - ) - return D - -# ---------------------------------------------------------------------------- - - -@click.command() -@click.option('--source', help='Input pickle', required=True, metavar='PATH') -@click.option('--dest', help='Output pickle', required=True, metavar='PATH') -@click.option('--force-fp16', help='Force the networks to use FP16', type=bool, default=False, metavar='BOOL', show_default=True) -def convert_network_pickle(source, dest, force_fp16): - """Convert legacy network pickle into the native PyTorch format. - - The tool is able to load the main network configurations exported using the TensorFlow version of StyleGAN2 or StyleGAN2-ADA. - It does not support e.g. StyleGAN2-ADA comparison methods, StyleGAN2 configs A-D, or StyleGAN1 networks. - - Example: - - \b - python legacy.py \\ - --source=https://nvlabs-fi-cdn.nvidia.com/stylegan2/networks/stylegan2-cat-config-f.pkl \\ - --dest=stylegan2-cat-config-f.pkl - """ - print(f'Loading "{source}"...') - with dnnlib.util.open_url(source) as f: - data = load_network_pkl(f, force_fp16=force_fp16) - print(f'Saving "{dest}"...') - with open(dest, 'wb') as f: - pickle.dump(data, f) - print('Done.') - -# ---------------------------------------------------------------------------- - - -if __name__ == "__main__": - convert_network_pickle() # pylint: disable=no-value-for-parameter - -# ---------------------------------------------------------------------------- diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/textual_inversion/textual_inversion_flax.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/textual_inversion/textual_inversion_flax.py deleted file mode 100644 index 66127ad60be94193959bbb7510337193a7797a16..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/textual_inversion/textual_inversion_flax.py +++ /dev/null @@ -1,681 +0,0 @@ -import argparse -import logging -import math -import os -import random -from pathlib import Path - -import jax -import jax.numpy as jnp -import numpy as np -import optax -import PIL -import torch -import torch.utils.checkpoint -import transformers -from flax import jax_utils -from flax.training import train_state -from flax.training.common_utils import shard -from huggingface_hub import create_repo, upload_folder - -# TODO: remove and import from diffusers.utils when the new version of diffusers is released -from packaging import version -from PIL import Image -from torch.utils.data import Dataset -from torchvision import transforms -from tqdm.auto import tqdm -from transformers import CLIPImageProcessor, CLIPTokenizer, FlaxCLIPTextModel, set_seed - -from diffusers import ( - FlaxAutoencoderKL, - FlaxDDPMScheduler, - FlaxPNDMScheduler, - FlaxStableDiffusionPipeline, - FlaxUNet2DConditionModel, -) -from diffusers.pipelines.stable_diffusion import FlaxStableDiffusionSafetyChecker -from diffusers.utils import check_min_version - - -if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): - PIL_INTERPOLATION = { - "linear": PIL.Image.Resampling.BILINEAR, - "bilinear": PIL.Image.Resampling.BILINEAR, - "bicubic": PIL.Image.Resampling.BICUBIC, - "lanczos": PIL.Image.Resampling.LANCZOS, - "nearest": PIL.Image.Resampling.NEAREST, - } -else: - PIL_INTERPOLATION = { - "linear": PIL.Image.LINEAR, - "bilinear": PIL.Image.BILINEAR, - "bicubic": PIL.Image.BICUBIC, - "lanczos": PIL.Image.LANCZOS, - "nearest": PIL.Image.NEAREST, - } -# ------------------------------------------------------------------------------ - -# Will error if the minimal version of diffusers is not installed. Remove at your own risks. -check_min_version("0.19.0") - -logger = logging.getLogger(__name__) - - -def parse_args(): - parser = argparse.ArgumentParser(description="Simple example of a training script.") - parser.add_argument( - "--pretrained_model_name_or_path", - type=str, - default=None, - required=True, - help="Path to pretrained model or model identifier from huggingface.co/models.", - ) - parser.add_argument( - "--tokenizer_name", - type=str, - default=None, - help="Pretrained tokenizer name or path if not the same as model_name", - ) - parser.add_argument( - "--train_data_dir", type=str, default=None, required=True, help="A folder containing the training data." - ) - parser.add_argument( - "--placeholder_token", - type=str, - default=None, - required=True, - help="A token to use as a placeholder for the concept.", - ) - parser.add_argument( - "--initializer_token", type=str, default=None, required=True, help="A token to use as initializer word." - ) - parser.add_argument("--learnable_property", type=str, default="object", help="Choose between 'object' and 'style'") - parser.add_argument("--repeats", type=int, default=100, help="How many times to repeat the training data.") - parser.add_argument( - "--output_dir", - type=str, - default="text-inversion-model", - help="The output directory where the model predictions and checkpoints will be written.", - ) - parser.add_argument("--seed", type=int, default=42, help="A seed for reproducible training.") - parser.add_argument( - "--resolution", - type=int, - default=512, - help=( - "The resolution for input images, all the images in the train/validation dataset will be resized to this" - " resolution" - ), - ) - parser.add_argument( - "--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution." - ) - parser.add_argument( - "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." - ) - parser.add_argument("--num_train_epochs", type=int, default=100) - parser.add_argument( - "--max_train_steps", - type=int, - default=5000, - help="Total number of training steps to perform. If provided, overrides num_train_epochs.", - ) - parser.add_argument( - "--save_steps", - type=int, - default=500, - help="Save learned_embeds.bin every X updates steps.", - ) - parser.add_argument( - "--learning_rate", - type=float, - default=1e-4, - help="Initial learning rate (after the potential warmup period) to use.", - ) - parser.add_argument( - "--scale_lr", - action="store_true", - default=True, - help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", - ) - parser.add_argument( - "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." - ) - parser.add_argument( - "--revision", - type=str, - default=None, - required=False, - help="Revision of pretrained model identifier from huggingface.co/models.", - ) - parser.add_argument( - "--lr_scheduler", - type=str, - default="constant", - help=( - 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' - ' "constant", "constant_with_warmup"]' - ), - ) - parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") - parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") - parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") - parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") - parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") - parser.add_argument( - "--use_auth_token", - action="store_true", - help=( - "Will use the token generated when running `huggingface-cli login` (necessary to use this script with" - " private models)." - ), - ) - parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") - parser.add_argument( - "--hub_model_id", - type=str, - default=None, - help="The name of the repository to keep in sync with the local `output_dir`.", - ) - parser.add_argument( - "--logging_dir", - type=str, - default="logs", - help=( - "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" - " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." - ), - ) - parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") - - args = parser.parse_args() - env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) - if env_local_rank != -1 and env_local_rank != args.local_rank: - args.local_rank = env_local_rank - - if args.train_data_dir is None: - raise ValueError("You must specify a train data directory.") - - return args - - -imagenet_templates_small = [ - "a photo of a {}", - "a rendering of a {}", - "a cropped photo of the {}", - "the photo of a {}", - "a photo of a clean {}", - "a photo of a dirty {}", - "a dark photo of the {}", - "a photo of my {}", - "a photo of the cool {}", - "a close-up photo of a {}", - "a bright photo of the {}", - "a cropped photo of a {}", - "a photo of the {}", - "a good photo of the {}", - "a photo of one {}", - "a close-up photo of the {}", - "a rendition of the {}", - "a photo of the clean {}", - "a rendition of a {}", - "a photo of a nice {}", - "a good photo of a {}", - "a photo of the nice {}", - "a photo of the small {}", - "a photo of the weird {}", - "a photo of the large {}", - "a photo of a cool {}", - "a photo of a small {}", -] - -imagenet_style_templates_small = [ - "a painting in the style of {}", - "a rendering in the style of {}", - "a cropped painting in the style of {}", - "the painting in the style of {}", - "a clean painting in the style of {}", - "a dirty painting in the style of {}", - "a dark painting in the style of {}", - "a picture in the style of {}", - "a cool painting in the style of {}", - "a close-up painting in the style of {}", - "a bright painting in the style of {}", - "a cropped painting in the style of {}", - "a good painting in the style of {}", - "a close-up painting in the style of {}", - "a rendition in the style of {}", - "a nice painting in the style of {}", - "a small painting in the style of {}", - "a weird painting in the style of {}", - "a large painting in the style of {}", -] - - -class TextualInversionDataset(Dataset): - def __init__( - self, - data_root, - tokenizer, - learnable_property="object", # [object, style] - size=512, - repeats=100, - interpolation="bicubic", - flip_p=0.5, - set="train", - placeholder_token="*", - center_crop=False, - ): - self.data_root = data_root - self.tokenizer = tokenizer - self.learnable_property = learnable_property - self.size = size - self.placeholder_token = placeholder_token - self.center_crop = center_crop - self.flip_p = flip_p - - self.image_paths = [os.path.join(self.data_root, file_path) for file_path in os.listdir(self.data_root)] - - self.num_images = len(self.image_paths) - self._length = self.num_images - - if set == "train": - self._length = self.num_images * repeats - - self.interpolation = { - "linear": PIL_INTERPOLATION["linear"], - "bilinear": PIL_INTERPOLATION["bilinear"], - "bicubic": PIL_INTERPOLATION["bicubic"], - "lanczos": PIL_INTERPOLATION["lanczos"], - }[interpolation] - - self.templates = imagenet_style_templates_small if learnable_property == "style" else imagenet_templates_small - self.flip_transform = transforms.RandomHorizontalFlip(p=self.flip_p) - - def __len__(self): - return self._length - - def __getitem__(self, i): - example = {} - image = Image.open(self.image_paths[i % self.num_images]) - - if not image.mode == "RGB": - image = image.convert("RGB") - - placeholder_string = self.placeholder_token - text = random.choice(self.templates).format(placeholder_string) - - example["input_ids"] = self.tokenizer( - text, - padding="max_length", - truncation=True, - max_length=self.tokenizer.model_max_length, - return_tensors="pt", - ).input_ids[0] - - # default to score-sde preprocessing - img = np.array(image).astype(np.uint8) - - if self.center_crop: - crop = min(img.shape[0], img.shape[1]) - ( - h, - w, - ) = ( - img.shape[0], - img.shape[1], - ) - img = img[(h - crop) // 2 : (h + crop) // 2, (w - crop) // 2 : (w + crop) // 2] - - image = Image.fromarray(img) - image = image.resize((self.size, self.size), resample=self.interpolation) - - image = self.flip_transform(image) - image = np.array(image).astype(np.uint8) - image = (image / 127.5 - 1.0).astype(np.float32) - - example["pixel_values"] = torch.from_numpy(image).permute(2, 0, 1) - return example - - -def resize_token_embeddings(model, new_num_tokens, initializer_token_id, placeholder_token_id, rng): - if model.config.vocab_size == new_num_tokens or new_num_tokens is None: - return - model.config.vocab_size = new_num_tokens - - params = model.params - old_embeddings = params["text_model"]["embeddings"]["token_embedding"]["embedding"] - old_num_tokens, emb_dim = old_embeddings.shape - - initializer = jax.nn.initializers.normal() - - new_embeddings = initializer(rng, (new_num_tokens, emb_dim)) - new_embeddings = new_embeddings.at[:old_num_tokens].set(old_embeddings) - new_embeddings = new_embeddings.at[placeholder_token_id].set(new_embeddings[initializer_token_id]) - params["text_model"]["embeddings"]["token_embedding"]["embedding"] = new_embeddings - - model.params = params - return model - - -def get_params_to_save(params): - return jax.device_get(jax.tree_util.tree_map(lambda x: x[0], params)) - - -def main(): - args = parse_args() - - if args.seed is not None: - set_seed(args.seed) - - if jax.process_index() == 0: - if args.output_dir is not None: - os.makedirs(args.output_dir, exist_ok=True) - - if args.push_to_hub: - repo_id = create_repo( - repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token - ).repo_id - - # Make one log on every process with the configuration for debugging. - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - level=logging.INFO, - ) - # Setup logging, we only want one process per machine to log things on the screen. - logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR) - if jax.process_index() == 0: - transformers.utils.logging.set_verbosity_info() - else: - transformers.utils.logging.set_verbosity_error() - - # Load the tokenizer and add the placeholder token as a additional special token - if args.tokenizer_name: - tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name) - elif args.pretrained_model_name_or_path: - tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer") - - # Add the placeholder token in tokenizer - num_added_tokens = tokenizer.add_tokens(args.placeholder_token) - if num_added_tokens == 0: - raise ValueError( - f"The tokenizer already contains the token {args.placeholder_token}. Please pass a different" - " `placeholder_token` that is not already in the tokenizer." - ) - - # Convert the initializer_token, placeholder_token to ids - token_ids = tokenizer.encode(args.initializer_token, add_special_tokens=False) - # Check if initializer_token is a single token or a sequence of tokens - if len(token_ids) > 1: - raise ValueError("The initializer token must be a single token.") - - initializer_token_id = token_ids[0] - placeholder_token_id = tokenizer.convert_tokens_to_ids(args.placeholder_token) - - # Load models and create wrapper for stable diffusion - text_encoder = FlaxCLIPTextModel.from_pretrained( - args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision - ) - vae, vae_params = FlaxAutoencoderKL.from_pretrained( - args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision - ) - unet, unet_params = FlaxUNet2DConditionModel.from_pretrained( - args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision - ) - - # Create sampling rng - rng = jax.random.PRNGKey(args.seed) - rng, _ = jax.random.split(rng) - # Resize the token embeddings as we are adding new special tokens to the tokenizer - text_encoder = resize_token_embeddings( - text_encoder, len(tokenizer), initializer_token_id, placeholder_token_id, rng - ) - original_token_embeds = text_encoder.params["text_model"]["embeddings"]["token_embedding"]["embedding"] - - train_dataset = TextualInversionDataset( - data_root=args.train_data_dir, - tokenizer=tokenizer, - size=args.resolution, - placeholder_token=args.placeholder_token, - repeats=args.repeats, - learnable_property=args.learnable_property, - center_crop=args.center_crop, - set="train", - ) - - def collate_fn(examples): - pixel_values = torch.stack([example["pixel_values"] for example in examples]) - input_ids = torch.stack([example["input_ids"] for example in examples]) - - batch = {"pixel_values": pixel_values, "input_ids": input_ids} - batch = {k: v.numpy() for k, v in batch.items()} - - return batch - - total_train_batch_size = args.train_batch_size * jax.local_device_count() - train_dataloader = torch.utils.data.DataLoader( - train_dataset, batch_size=total_train_batch_size, shuffle=True, drop_last=True, collate_fn=collate_fn - ) - - # Optimization - if args.scale_lr: - args.learning_rate = args.learning_rate * total_train_batch_size - - constant_scheduler = optax.constant_schedule(args.learning_rate) - - optimizer = optax.adamw( - learning_rate=constant_scheduler, - b1=args.adam_beta1, - b2=args.adam_beta2, - eps=args.adam_epsilon, - weight_decay=args.adam_weight_decay, - ) - - def create_mask(params, label_fn): - def _map(params, mask, label_fn): - for k in params: - if label_fn(k): - mask[k] = "token_embedding" - else: - if isinstance(params[k], dict): - mask[k] = {} - _map(params[k], mask[k], label_fn) - else: - mask[k] = "zero" - - mask = {} - _map(params, mask, label_fn) - return mask - - def zero_grads(): - # from https://github.com/deepmind/optax/issues/159#issuecomment-896459491 - def init_fn(_): - return () - - def update_fn(updates, state, params=None): - return jax.tree_util.tree_map(jnp.zeros_like, updates), () - - return optax.GradientTransformation(init_fn, update_fn) - - # Zero out gradients of layers other than the token embedding layer - tx = optax.multi_transform( - {"token_embedding": optimizer, "zero": zero_grads()}, - create_mask(text_encoder.params, lambda s: s == "token_embedding"), - ) - - state = train_state.TrainState.create(apply_fn=text_encoder.__call__, params=text_encoder.params, tx=tx) - - noise_scheduler = FlaxDDPMScheduler( - beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000 - ) - noise_scheduler_state = noise_scheduler.create_state() - - # Initialize our training - train_rngs = jax.random.split(rng, jax.local_device_count()) - - # Define gradient train step fn - def train_step(state, vae_params, unet_params, batch, train_rng): - dropout_rng, sample_rng, new_train_rng = jax.random.split(train_rng, 3) - - def compute_loss(params): - vae_outputs = vae.apply( - {"params": vae_params}, batch["pixel_values"], deterministic=True, method=vae.encode - ) - latents = vae_outputs.latent_dist.sample(sample_rng) - # (NHWC) -> (NCHW) - latents = jnp.transpose(latents, (0, 3, 1, 2)) - latents = latents * vae.config.scaling_factor - - noise_rng, timestep_rng = jax.random.split(sample_rng) - noise = jax.random.normal(noise_rng, latents.shape) - bsz = latents.shape[0] - timesteps = jax.random.randint( - timestep_rng, - (bsz,), - 0, - noise_scheduler.config.num_train_timesteps, - ) - noisy_latents = noise_scheduler.add_noise(noise_scheduler_state, latents, noise, timesteps) - encoder_hidden_states = state.apply_fn( - batch["input_ids"], params=params, dropout_rng=dropout_rng, train=True - )[0] - # Predict the noise residual and compute loss - model_pred = unet.apply( - {"params": unet_params}, noisy_latents, timesteps, encoder_hidden_states, train=False - ).sample - - # Get the target for loss depending on the prediction type - if noise_scheduler.config.prediction_type == "epsilon": - target = noise - elif noise_scheduler.config.prediction_type == "v_prediction": - target = noise_scheduler.get_velocity(noise_scheduler_state, latents, noise, timesteps) - else: - raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") - - loss = (target - model_pred) ** 2 - loss = loss.mean() - - return loss - - grad_fn = jax.value_and_grad(compute_loss) - loss, grad = grad_fn(state.params) - grad = jax.lax.pmean(grad, "batch") - new_state = state.apply_gradients(grads=grad) - - # Keep the token embeddings fixed except the newly added embeddings for the concept, - # as we only want to optimize the concept embeddings - token_embeds = original_token_embeds.at[placeholder_token_id].set( - new_state.params["text_model"]["embeddings"]["token_embedding"]["embedding"][placeholder_token_id] - ) - new_state.params["text_model"]["embeddings"]["token_embedding"]["embedding"] = token_embeds - - metrics = {"loss": loss} - metrics = jax.lax.pmean(metrics, axis_name="batch") - return new_state, metrics, new_train_rng - - # Create parallel version of the train and eval step - p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0,)) - - # Replicate the train state on each device - state = jax_utils.replicate(state) - vae_params = jax_utils.replicate(vae_params) - unet_params = jax_utils.replicate(unet_params) - - # Train! - num_update_steps_per_epoch = math.ceil(len(train_dataloader)) - - # Scheduler and math around the number of training steps. - if args.max_train_steps is None: - args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch - - args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) - - logger.info("***** Running training *****") - logger.info(f" Num examples = {len(train_dataset)}") - logger.info(f" Num Epochs = {args.num_train_epochs}") - logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") - logger.info(f" Total train batch size (w. parallel & distributed) = {total_train_batch_size}") - logger.info(f" Total optimization steps = {args.max_train_steps}") - - global_step = 0 - - epochs = tqdm(range(args.num_train_epochs), desc=f"Epoch ... (1/{args.num_train_epochs})", position=0) - for epoch in epochs: - # ======================== Training ================================ - - train_metrics = [] - - steps_per_epoch = len(train_dataset) // total_train_batch_size - train_step_progress_bar = tqdm(total=steps_per_epoch, desc="Training...", position=1, leave=False) - # train - for batch in train_dataloader: - batch = shard(batch) - state, train_metric, train_rngs = p_train_step(state, vae_params, unet_params, batch, train_rngs) - train_metrics.append(train_metric) - - train_step_progress_bar.update(1) - global_step += 1 - - if global_step >= args.max_train_steps: - break - if global_step % args.save_steps == 0: - learned_embeds = get_params_to_save(state.params)["text_model"]["embeddings"]["token_embedding"][ - "embedding" - ][placeholder_token_id] - learned_embeds_dict = {args.placeholder_token: learned_embeds} - jnp.save( - os.path.join(args.output_dir, "learned_embeds-" + str(global_step) + ".npy"), learned_embeds_dict - ) - - train_metric = jax_utils.unreplicate(train_metric) - - train_step_progress_bar.close() - epochs.write(f"Epoch... ({epoch + 1}/{args.num_train_epochs} | Loss: {train_metric['loss']})") - - # Create the pipeline using using the trained modules and save it. - if jax.process_index() == 0: - scheduler = FlaxPNDMScheduler( - beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", skip_prk_steps=True - ) - safety_checker = FlaxStableDiffusionSafetyChecker.from_pretrained( - "CompVis/stable-diffusion-safety-checker", from_pt=True - ) - pipeline = FlaxStableDiffusionPipeline( - text_encoder=text_encoder, - vae=vae, - unet=unet, - tokenizer=tokenizer, - scheduler=scheduler, - safety_checker=safety_checker, - feature_extractor=CLIPImageProcessor.from_pretrained("openai/clip-vit-base-patch32"), - ) - - pipeline.save_pretrained( - args.output_dir, - params={ - "text_encoder": get_params_to_save(state.params), - "vae": get_params_to_save(vae_params), - "unet": get_params_to_save(unet_params), - "safety_checker": safety_checker.params, - }, - ) - - # Also save the newly trained embeddings - learned_embeds = get_params_to_save(state.params)["text_model"]["embeddings"]["token_embedding"]["embedding"][ - placeholder_token_id - ] - learned_embeds_dict = {args.placeholder_token: learned_embeds} - jnp.save(os.path.join(args.output_dir, "learned_embeds.npy"), learned_embeds_dict) - - if args.push_to_hub: - upload_folder( - repo_id=repo_id, - folder_path=args.output_dir, - commit_message="End of training", - ignore_patterns=["step_*", "epoch_*"], - ) - - -if __name__ == "__main__": - main() diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/controlnet/pipeline_controlnet.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/controlnet/pipeline_controlnet.py deleted file mode 100644 index 42d2d14c44c5f3f2a266b71a6bc001d7d8b6a706..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/controlnet/pipeline_controlnet.py +++ /dev/null @@ -1,1009 +0,0 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import inspect -import warnings -from typing import Any, Callable, Dict, List, Optional, Tuple, Union - -import numpy as np -import PIL.Image -import torch -import torch.nn.functional as F -from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer - -from ...image_processor import VaeImageProcessor -from ...loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL, ControlNetModel, UNet2DConditionModel -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import ( - is_accelerate_available, - is_accelerate_version, - is_compiled_module, - logging, - randn_tensor, - replace_example_docstring, -) -from ..pipeline_utils import DiffusionPipeline -from ..stable_diffusion import StableDiffusionPipelineOutput -from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker -from .multicontrolnet import MultiControlNetModel - - -logger = logging.get_logger(__name__) # pylint: disable=invalid-name - - -EXAMPLE_DOC_STRING = """ - Examples: - ```py - >>> # !pip install opencv-python transformers accelerate - >>> from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler - >>> from diffusers.utils import load_image - >>> import numpy as np - >>> import torch - - >>> import cv2 - >>> from PIL import Image - - >>> # download an image - >>> image = load_image( - ... "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png" - ... ) - >>> image = np.array(image) - - >>> # get canny image - >>> image = cv2.Canny(image, 100, 200) - >>> image = image[:, :, None] - >>> image = np.concatenate([image, image, image], axis=2) - >>> canny_image = Image.fromarray(image) - - >>> # load control net and stable diffusion v1-5 - >>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16) - >>> pipe = StableDiffusionControlNetPipeline.from_pretrained( - ... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16 - ... ) - - >>> # speed up diffusion process with faster scheduler and memory optimization - >>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) - >>> # remove following line if xformers is not installed - >>> pipe.enable_xformers_memory_efficient_attention() - - >>> pipe.enable_model_cpu_offload() - - >>> # generate image - >>> generator = torch.manual_seed(0) - >>> image = pipe( - ... "futuristic-looking woman", num_inference_steps=20, generator=generator, image=canny_image - ... ).images[0] - ``` -""" - - -class StableDiffusionControlNetPipeline( - DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin -): - r""" - Pipeline for text-to-image generation using Stable Diffusion with ControlNet guidance. - - This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the - library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) - - In addition the pipeline inherits the following loading methods: - - *Textual-Inversion*: [`loaders.TextualInversionLoaderMixin.load_textual_inversion`] - - Args: - vae ([`AutoencoderKL`]): - Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. - text_encoder ([`CLIPTextModel`]): - Frozen text-encoder. Stable Diffusion uses the text portion of - [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically - the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. - tokenizer (`CLIPTokenizer`): - Tokenizer of class - [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). - unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. - controlnet ([`ControlNetModel`] or `List[ControlNetModel]`): - Provides additional conditioning to the unet during the denoising process. If you set multiple ControlNets - as a list, the outputs from each ControlNet are added together to create one combined additional - conditioning. - scheduler ([`SchedulerMixin`]): - A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of - [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. - safety_checker ([`StableDiffusionSafetyChecker`]): - Classification module that estimates whether generated images could be considered offensive or harmful. - Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. - feature_extractor ([`CLIPImageProcessor`]): - Model that extracts features from generated images to be used as inputs for the `safety_checker`. - """ - _optional_components = ["safety_checker", "feature_extractor"] - - def __init__( - self, - vae: AutoencoderKL, - text_encoder: CLIPTextModel, - tokenizer: CLIPTokenizer, - unet: UNet2DConditionModel, - controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], - scheduler: KarrasDiffusionSchedulers, - safety_checker: StableDiffusionSafetyChecker, - feature_extractor: CLIPImageProcessor, - requires_safety_checker: bool = True, - ): - super().__init__() - - if safety_checker is None and requires_safety_checker: - logger.warning( - f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" - " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" - " results in services or applications open to the public. Both the diffusers team and Hugging Face" - " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" - " it only for use-cases that involve analyzing network behavior or auditing its results. For more" - " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." - ) - - if safety_checker is not None and feature_extractor is None: - raise ValueError( - "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" - " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." - ) - - if isinstance(controlnet, (list, tuple)): - controlnet = MultiControlNetModel(controlnet) - - self.register_modules( - vae=vae, - text_encoder=text_encoder, - tokenizer=tokenizer, - unet=unet, - controlnet=controlnet, - scheduler=scheduler, - safety_checker=safety_checker, - feature_extractor=feature_extractor, - ) - self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) - self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) - self.control_image_processor = VaeImageProcessor( - vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False - ) - self.register_to_config(requires_safety_checker=requires_safety_checker) - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing - def enable_vae_slicing(self): - r""" - Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to - compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. - """ - self.vae.enable_slicing() - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing - def disable_vae_slicing(self): - r""" - Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to - computing decoding in one step. - """ - self.vae.disable_slicing() - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling - def enable_vae_tiling(self): - r""" - Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to - compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow - processing larger images. - """ - self.vae.enable_tiling() - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling - def disable_vae_tiling(self): - r""" - Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to - computing decoding in one step. - """ - self.vae.disable_tiling() - - def enable_model_cpu_offload(self, gpu_id=0): - r""" - Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared - to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` - method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with - `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`. - """ - if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"): - from accelerate import cpu_offload_with_hook - else: - raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.") - - device = torch.device(f"cuda:{gpu_id}") - - hook = None - for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]: - _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook) - - if self.safety_checker is not None: - # the safety checker can offload the vae again - _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook) - - # control net hook has be manually offloaded as it alternates with unet - cpu_offload_with_hook(self.controlnet, device) - - # We'll offload the last model manually. - self.final_offload_hook = hook - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt - def _encode_prompt( - self, - prompt, - device, - num_images_per_prompt, - do_classifier_free_guidance, - negative_prompt=None, - prompt_embeds: Optional[torch.FloatTensor] = None, - negative_prompt_embeds: Optional[torch.FloatTensor] = None, - lora_scale: Optional[float] = None, - ): - r""" - Encodes the prompt into text encoder hidden states. - - Args: - prompt (`str` or `List[str]`, *optional*): - prompt to be encoded - device: (`torch.device`): - torch device - num_images_per_prompt (`int`): - number of images that should be generated per prompt - do_classifier_free_guidance (`bool`): - whether to use classifier free guidance or not - negative_prompt (`str` or `List[str]`, *optional*): - The prompt or prompts not to guide the image generation. If not defined, one has to pass - `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is - less than `1`). - prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not - provided, text embeddings will be generated from `prompt` input argument. - negative_prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt - weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input - argument. - lora_scale (`float`, *optional*): - A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. - """ - # set lora scale so that monkey patched LoRA - # function of text encoder can correctly access it - if lora_scale is not None and isinstance(self, LoraLoaderMixin): - self._lora_scale = lora_scale - - if prompt is not None and isinstance(prompt, str): - batch_size = 1 - elif prompt is not None and isinstance(prompt, list): - batch_size = len(prompt) - else: - batch_size = prompt_embeds.shape[0] - - if prompt_embeds is None: - # textual inversion: procecss multi-vector tokens if necessary - if isinstance(self, TextualInversionLoaderMixin): - prompt = self.maybe_convert_prompt(prompt, self.tokenizer) - - text_inputs = self.tokenizer( - prompt, - padding="max_length", - max_length=self.tokenizer.model_max_length, - truncation=True, - return_tensors="pt", - ) - text_input_ids = text_inputs.input_ids - untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids - - if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( - text_input_ids, untruncated_ids - ): - removed_text = self.tokenizer.batch_decode( - untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] - ) - logger.warning( - "The following part of your input was truncated because CLIP can only handle sequences up to" - f" {self.tokenizer.model_max_length} tokens: {removed_text}" - ) - - if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: - attention_mask = text_inputs.attention_mask.to(device) - else: - attention_mask = None - - prompt_embeds = self.text_encoder( - text_input_ids.to(device), - attention_mask=attention_mask, - ) - prompt_embeds = prompt_embeds[0] - - prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) - - bs_embed, seq_len, _ = prompt_embeds.shape - # duplicate text embeddings for each generation per prompt, using mps friendly method - prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) - prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) - - # get unconditional embeddings for classifier free guidance - if do_classifier_free_guidance and negative_prompt_embeds is None: - uncond_tokens: List[str] - if negative_prompt is None: - uncond_tokens = [""] * batch_size - elif prompt is not None and type(prompt) is not type(negative_prompt): - raise TypeError( - f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" - f" {type(prompt)}." - ) - elif isinstance(negative_prompt, str): - uncond_tokens = [negative_prompt] - elif batch_size != len(negative_prompt): - raise ValueError( - f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" - f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" - " the batch size of `prompt`." - ) - else: - uncond_tokens = negative_prompt - - # textual inversion: procecss multi-vector tokens if necessary - if isinstance(self, TextualInversionLoaderMixin): - uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) - - max_length = prompt_embeds.shape[1] - uncond_input = self.tokenizer( - uncond_tokens, - padding="max_length", - max_length=max_length, - truncation=True, - return_tensors="pt", - ) - - if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: - attention_mask = uncond_input.attention_mask.to(device) - else: - attention_mask = None - - negative_prompt_embeds = self.text_encoder( - uncond_input.input_ids.to(device), - attention_mask=attention_mask, - ) - negative_prompt_embeds = negative_prompt_embeds[0] - - if do_classifier_free_guidance: - # duplicate unconditional embeddings for each generation per prompt, using mps friendly method - seq_len = negative_prompt_embeds.shape[1] - - negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) - - negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) - negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) - - # For classifier free guidance, we need to do two forward passes. - # Here we concatenate the unconditional and text embeddings into a single batch - # to avoid doing two forward passes - prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) - - return prompt_embeds - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker - def run_safety_checker(self, image, device, dtype): - if self.safety_checker is None: - has_nsfw_concept = None - else: - if torch.is_tensor(image): - feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") - else: - feature_extractor_input = self.image_processor.numpy_to_pil(image) - safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) - image, has_nsfw_concept = self.safety_checker( - images=image, clip_input=safety_checker_input.pixel_values.to(dtype) - ) - return image, has_nsfw_concept - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents - def decode_latents(self, latents): - warnings.warn( - "The decode_latents method is deprecated and will be removed in a future version. Please" - " use VaeImageProcessor instead", - FutureWarning, - ) - latents = 1 / self.vae.config.scaling_factor * latents - image = self.vae.decode(latents, return_dict=False)[0] - image = (image / 2 + 0.5).clamp(0, 1) - # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 - image = image.cpu().permute(0, 2, 3, 1).float().numpy() - return image - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs - def prepare_extra_step_kwargs(self, generator, eta): - # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature - # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. - # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 - # and should be between [0, 1] - - accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) - extra_step_kwargs = {} - if accepts_eta: - extra_step_kwargs["eta"] = eta - - # check if the scheduler accepts generator - accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) - if accepts_generator: - extra_step_kwargs["generator"] = generator - return extra_step_kwargs - - def check_inputs( - self, - prompt, - image, - callback_steps, - negative_prompt=None, - prompt_embeds=None, - negative_prompt_embeds=None, - controlnet_conditioning_scale=1.0, - control_guidance_start=0.0, - control_guidance_end=1.0, - ): - if (callback_steps is None) or ( - callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) - ): - raise ValueError( - f"`callback_steps` has to be a positive integer but is {callback_steps} of type" - f" {type(callback_steps)}." - ) - - if prompt is not None and prompt_embeds is not None: - raise ValueError( - f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" - " only forward one of the two." - ) - elif prompt is None and prompt_embeds is None: - raise ValueError( - "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." - ) - elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): - raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") - - if negative_prompt is not None and negative_prompt_embeds is not None: - raise ValueError( - f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" - f" {negative_prompt_embeds}. Please make sure to only forward one of the two." - ) - - if prompt_embeds is not None and negative_prompt_embeds is not None: - if prompt_embeds.shape != negative_prompt_embeds.shape: - raise ValueError( - "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" - f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" - f" {negative_prompt_embeds.shape}." - ) - - # `prompt` needs more sophisticated handling when there are multiple - # conditionings. - if isinstance(self.controlnet, MultiControlNetModel): - if isinstance(prompt, list): - logger.warning( - f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}" - " prompts. The conditionings will be fixed across the prompts." - ) - - # Check `image` - is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( - self.controlnet, torch._dynamo.eval_frame.OptimizedModule - ) - if ( - isinstance(self.controlnet, ControlNetModel) - or is_compiled - and isinstance(self.controlnet._orig_mod, ControlNetModel) - ): - self.check_image(image, prompt, prompt_embeds) - elif ( - isinstance(self.controlnet, MultiControlNetModel) - or is_compiled - and isinstance(self.controlnet._orig_mod, MultiControlNetModel) - ): - if not isinstance(image, list): - raise TypeError("For multiple controlnets: `image` must be type `list`") - - # When `image` is a nested list: - # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]]) - elif any(isinstance(i, list) for i in image): - raise ValueError("A single batch of multiple conditionings are supported at the moment.") - elif len(image) != len(self.controlnet.nets): - raise ValueError( - f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets." - ) - - for image_ in image: - self.check_image(image_, prompt, prompt_embeds) - else: - assert False - - # Check `controlnet_conditioning_scale` - if ( - isinstance(self.controlnet, ControlNetModel) - or is_compiled - and isinstance(self.controlnet._orig_mod, ControlNetModel) - ): - if not isinstance(controlnet_conditioning_scale, float): - raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") - elif ( - isinstance(self.controlnet, MultiControlNetModel) - or is_compiled - and isinstance(self.controlnet._orig_mod, MultiControlNetModel) - ): - if isinstance(controlnet_conditioning_scale, list): - if any(isinstance(i, list) for i in controlnet_conditioning_scale): - raise ValueError("A single batch of multiple conditionings are supported at the moment.") - elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len( - self.controlnet.nets - ): - raise ValueError( - "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" - " the same length as the number of controlnets" - ) - else: - assert False - - if len(control_guidance_start) != len(control_guidance_end): - raise ValueError( - f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list." - ) - - if isinstance(self.controlnet, MultiControlNetModel): - if len(control_guidance_start) != len(self.controlnet.nets): - raise ValueError( - f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}." - ) - - for start, end in zip(control_guidance_start, control_guidance_end): - if start >= end: - raise ValueError( - f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." - ) - if start < 0.0: - raise ValueError(f"control guidance start: {start} can't be smaller than 0.") - if end > 1.0: - raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") - - def check_image(self, image, prompt, prompt_embeds): - image_is_pil = isinstance(image, PIL.Image.Image) - image_is_tensor = isinstance(image, torch.Tensor) - image_is_np = isinstance(image, np.ndarray) - image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) - image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) - image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) - - if ( - not image_is_pil - and not image_is_tensor - and not image_is_np - and not image_is_pil_list - and not image_is_tensor_list - and not image_is_np_list - ): - raise TypeError( - f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" - ) - - if image_is_pil: - image_batch_size = 1 - else: - image_batch_size = len(image) - - if prompt is not None and isinstance(prompt, str): - prompt_batch_size = 1 - elif prompt is not None and isinstance(prompt, list): - prompt_batch_size = len(prompt) - elif prompt_embeds is not None: - prompt_batch_size = prompt_embeds.shape[0] - - if image_batch_size != 1 and image_batch_size != prompt_batch_size: - raise ValueError( - f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" - ) - - def prepare_image( - self, - image, - width, - height, - batch_size, - num_images_per_prompt, - device, - dtype, - do_classifier_free_guidance=False, - guess_mode=False, - ): - image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) - image_batch_size = image.shape[0] - - if image_batch_size == 1: - repeat_by = batch_size - else: - # image batch size is the same as prompt batch size - repeat_by = num_images_per_prompt - - image = image.repeat_interleave(repeat_by, dim=0) - - image = image.to(device=device, dtype=dtype) - - if do_classifier_free_guidance and not guess_mode: - image = torch.cat([image] * 2) - - return image - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents - def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): - shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) - if isinstance(generator, list) and len(generator) != batch_size: - raise ValueError( - f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" - f" size of {batch_size}. Make sure the batch size matches the length of the generators." - ) - - if latents is None: - latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) - else: - latents = latents.to(device) - - # scale the initial noise by the standard deviation required by the scheduler - latents = latents * self.scheduler.init_noise_sigma - return latents - - @torch.no_grad() - @replace_example_docstring(EXAMPLE_DOC_STRING) - def __call__( - self, - prompt: Union[str, List[str]] = None, - image: Union[ - torch.FloatTensor, - PIL.Image.Image, - np.ndarray, - List[torch.FloatTensor], - List[PIL.Image.Image], - List[np.ndarray], - ] = None, - height: Optional[int] = None, - width: Optional[int] = None, - num_inference_steps: int = 50, - guidance_scale: float = 7.5, - negative_prompt: Optional[Union[str, List[str]]] = None, - num_images_per_prompt: Optional[int] = 1, - eta: float = 0.0, - generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, - latents: Optional[torch.FloatTensor] = None, - prompt_embeds: Optional[torch.FloatTensor] = None, - negative_prompt_embeds: Optional[torch.FloatTensor] = None, - output_type: Optional[str] = "pil", - return_dict: bool = True, - callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, - callback_steps: int = 1, - cross_attention_kwargs: Optional[Dict[str, Any]] = None, - controlnet_conditioning_scale: Union[float, List[float]] = 1.0, - guess_mode: bool = False, - control_guidance_start: Union[float, List[float]] = 0.0, - control_guidance_end: Union[float, List[float]] = 1.0, - ): - r""" - Function invoked when calling the pipeline for generation. - - Args: - prompt (`str` or `List[str]`, *optional*): - The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. - instead. - image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: - `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): - The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If - the type is specified as `Torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can - also be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If - height and/or width are passed, `image` is resized according to them. If multiple ControlNets are - specified in init, images must be passed as a list such that each element of the list can be correctly - batched for input to a single controlnet. - height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): - The height in pixels of the generated image. - width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): - The width in pixels of the generated image. - num_inference_steps (`int`, *optional*, defaults to 50): - The number of denoising steps. More denoising steps usually lead to a higher quality image at the - expense of slower inference. - guidance_scale (`float`, *optional*, defaults to 7.5): - Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). - `guidance_scale` is defined as `w` of equation 2. of [Imagen - Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > - 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, - usually at the expense of lower image quality. - negative_prompt (`str` or `List[str]`, *optional*): - The prompt or prompts not to guide the image generation. If not defined, one has to pass - `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is - less than `1`). - num_images_per_prompt (`int`, *optional*, defaults to 1): - The number of images to generate per prompt. - eta (`float`, *optional*, defaults to 0.0): - Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to - [`schedulers.DDIMScheduler`], will be ignored for others. - generator (`torch.Generator` or `List[torch.Generator]`, *optional*): - One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) - to make generation deterministic. - latents (`torch.FloatTensor`, *optional*): - Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image - generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. - prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not - provided, text embeddings will be generated from `prompt` input argument. - negative_prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt - weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input - argument. - output_type (`str`, *optional*, defaults to `"pil"`): - The output format of the generate image. Choose between - [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a - plain tuple. - callback (`Callable`, *optional*): - A function that will be called every `callback_steps` steps during inference. The function will be - called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. - callback_steps (`int`, *optional*, defaults to 1): - The frequency at which the `callback` function will be called. If not specified, the callback will be - called at every step. - cross_attention_kwargs (`dict`, *optional*): - A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under - `self.processor` in - [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py). - controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): - The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added - to the residual in the original unet. If multiple ControlNets are specified in init, you can set the - corresponding scale as a list. - guess_mode (`bool`, *optional*, defaults to `False`): - In this mode, the ControlNet encoder will try best to recognize the content of the input image even if - you remove all prompts. The `guidance_scale` between 3.0 and 5.0 is recommended. - control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): - The percentage of total steps at which the controlnet starts applying. - control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): - The percentage of total steps at which the controlnet stops applying. - - Examples: - - Returns: - [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: - [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. - When returning a tuple, the first element is a list with the generated images, and the second element is a - list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" - (nsfw) content, according to the `safety_checker`. - """ - controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet - - # align format for control guidance - if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): - control_guidance_start = len(control_guidance_end) * [control_guidance_start] - elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): - control_guidance_end = len(control_guidance_start) * [control_guidance_end] - elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): - mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 - control_guidance_start, control_guidance_end = mult * [control_guidance_start], mult * [ - control_guidance_end - ] - - # 1. Check inputs. Raise error if not correct - self.check_inputs( - prompt, - image, - callback_steps, - negative_prompt, - prompt_embeds, - negative_prompt_embeds, - controlnet_conditioning_scale, - control_guidance_start, - control_guidance_end, - ) - - # 2. Define call parameters - if prompt is not None and isinstance(prompt, str): - batch_size = 1 - elif prompt is not None and isinstance(prompt, list): - batch_size = len(prompt) - else: - batch_size = prompt_embeds.shape[0] - - device = self._execution_device - # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) - # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` - # corresponds to doing no classifier free guidance. - do_classifier_free_guidance = guidance_scale > 1.0 - - if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): - controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) - - global_pool_conditions = ( - controlnet.config.global_pool_conditions - if isinstance(controlnet, ControlNetModel) - else controlnet.nets[0].config.global_pool_conditions - ) - guess_mode = guess_mode or global_pool_conditions - - # 3. Encode input prompt - text_encoder_lora_scale = ( - cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None - ) - prompt_embeds = self._encode_prompt( - prompt, - device, - num_images_per_prompt, - do_classifier_free_guidance, - negative_prompt, - prompt_embeds=prompt_embeds, - negative_prompt_embeds=negative_prompt_embeds, - lora_scale=text_encoder_lora_scale, - ) - - # 4. Prepare image - if isinstance(controlnet, ControlNetModel): - image = self.prepare_image( - image=image, - width=width, - height=height, - batch_size=batch_size * num_images_per_prompt, - num_images_per_prompt=num_images_per_prompt, - device=device, - dtype=controlnet.dtype, - do_classifier_free_guidance=do_classifier_free_guidance, - guess_mode=guess_mode, - ) - height, width = image.shape[-2:] - elif isinstance(controlnet, MultiControlNetModel): - images = [] - - for image_ in image: - image_ = self.prepare_image( - image=image_, - width=width, - height=height, - batch_size=batch_size * num_images_per_prompt, - num_images_per_prompt=num_images_per_prompt, - device=device, - dtype=controlnet.dtype, - do_classifier_free_guidance=do_classifier_free_guidance, - guess_mode=guess_mode, - ) - - images.append(image_) - - image = images - height, width = image[0].shape[-2:] - else: - assert False - - # 5. Prepare timesteps - self.scheduler.set_timesteps(num_inference_steps, device=device) - timesteps = self.scheduler.timesteps - - # 6. Prepare latent variables - num_channels_latents = self.unet.config.in_channels - latents = self.prepare_latents( - batch_size * num_images_per_prompt, - num_channels_latents, - height, - width, - prompt_embeds.dtype, - device, - generator, - latents, - ) - - # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline - extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) - - # 7.1 Create tensor stating which controlnets to keep - controlnet_keep = [] - for i in range(len(timesteps)): - keeps = [ - 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) - for s, e in zip(control_guidance_start, control_guidance_end) - ] - controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) - - # 8. Denoising loop - num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order - with self.progress_bar(total=num_inference_steps) as progress_bar: - for i, t in enumerate(timesteps): - # expand the latents if we are doing classifier free guidance - latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents - latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) - - # controlnet(s) inference - if guess_mode and do_classifier_free_guidance: - # Infer ControlNet only for the conditional batch. - control_model_input = latents - control_model_input = self.scheduler.scale_model_input(control_model_input, t) - controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] - else: - control_model_input = latent_model_input - controlnet_prompt_embeds = prompt_embeds - - if isinstance(controlnet_keep[i], list): - cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] - else: - cond_scale = controlnet_conditioning_scale * controlnet_keep[i] - - down_block_res_samples, mid_block_res_sample = self.controlnet( - control_model_input, - t, - encoder_hidden_states=controlnet_prompt_embeds, - controlnet_cond=image, - conditioning_scale=cond_scale, - guess_mode=guess_mode, - return_dict=False, - ) - - if guess_mode and do_classifier_free_guidance: - # Infered ControlNet only for the conditional batch. - # To apply the output of ControlNet to both the unconditional and conditional batches, - # add 0 to the unconditional batch to keep it unchanged. - down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples] - mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample]) - - # predict the noise residual - noise_pred = self.unet( - latent_model_input, - t, - encoder_hidden_states=prompt_embeds, - cross_attention_kwargs=cross_attention_kwargs, - down_block_additional_residuals=down_block_res_samples, - mid_block_additional_residual=mid_block_res_sample, - return_dict=False, - )[0] - - # perform guidance - if do_classifier_free_guidance: - noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) - noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) - - # compute the previous noisy sample x_t -> x_t-1 - latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] - - # call the callback, if provided - if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): - progress_bar.update() - if callback is not None and i % callback_steps == 0: - callback(i, t, latents) - - # If we do sequential model offloading, let's offload unet and controlnet - # manually for max memory savings - if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: - self.unet.to("cpu") - self.controlnet.to("cpu") - torch.cuda.empty_cache() - - if not output_type == "latent": - image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] - image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) - else: - image = latents - has_nsfw_concept = None - - if has_nsfw_concept is None: - do_denormalize = [True] * image.shape[0] - else: - do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] - - image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) - - # Offload last model to CPU - if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: - self.final_offload_hook.offload() - - if not return_dict: - return (image, has_nsfw_concept) - - return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/dynamic_modules_utils.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/dynamic_modules_utils.py deleted file mode 100644 index 5b0952f0b514cb52e63fdac8a780ddc9482a5b9d..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/dynamic_modules_utils.py +++ /dev/null @@ -1,456 +0,0 @@ -# coding=utf-8 -# Copyright 2023 The HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Utilities to dynamically load objects from the Hub.""" - -import importlib -import inspect -import json -import os -import re -import shutil -import sys -from pathlib import Path -from typing import Dict, Optional, Union -from urllib import request - -from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info -from packaging import version - -from .. import __version__ -from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging - - -COMMUNITY_PIPELINES_URL = ( - "https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py" -) - - -logger = logging.get_logger(__name__) # pylint: disable=invalid-name - - -def get_diffusers_versions(): - url = "https://pypi.org/pypi/diffusers/json" - releases = json.loads(request.urlopen(url).read())["releases"].keys() - return sorted(releases, key=lambda x: version.Version(x)) - - -def init_hf_modules(): - """ - Creates the cache directory for modules with an init, and adds it to the Python path. - """ - # This function has already been executed if HF_MODULES_CACHE already is in the Python path. - if HF_MODULES_CACHE in sys.path: - return - - sys.path.append(HF_MODULES_CACHE) - os.makedirs(HF_MODULES_CACHE, exist_ok=True) - init_path = Path(HF_MODULES_CACHE) / "__init__.py" - if not init_path.exists(): - init_path.touch() - - -def create_dynamic_module(name: Union[str, os.PathLike]): - """ - Creates a dynamic module in the cache directory for modules. - """ - init_hf_modules() - dynamic_module_path = Path(HF_MODULES_CACHE) / name - # If the parent module does not exist yet, recursively create it. - if not dynamic_module_path.parent.exists(): - create_dynamic_module(dynamic_module_path.parent) - os.makedirs(dynamic_module_path, exist_ok=True) - init_path = dynamic_module_path / "__init__.py" - if not init_path.exists(): - init_path.touch() - - -def get_relative_imports(module_file): - """ - Get the list of modules that are relatively imported in a module file. - - Args: - module_file (`str` or `os.PathLike`): The module file to inspect. - """ - with open(module_file, "r", encoding="utf-8") as f: - content = f.read() - - # Imports of the form `import .xxx` - relative_imports = re.findall("^\s*import\s+\.(\S+)\s*$", content, flags=re.MULTILINE) - # Imports of the form `from .xxx import yyy` - relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import", content, flags=re.MULTILINE) - # Unique-ify - return list(set(relative_imports)) - - -def get_relative_import_files(module_file): - """ - Get the list of all files that are needed for a given module. Note that this function recurses through the relative - imports (if a imports b and b imports c, it will return module files for b and c). - - Args: - module_file (`str` or `os.PathLike`): The module file to inspect. - """ - no_change = False - files_to_check = [module_file] - all_relative_imports = [] - - # Let's recurse through all relative imports - while not no_change: - new_imports = [] - for f in files_to_check: - new_imports.extend(get_relative_imports(f)) - - module_path = Path(module_file).parent - new_import_files = [str(module_path / m) for m in new_imports] - new_import_files = [f for f in new_import_files if f not in all_relative_imports] - files_to_check = [f"{f}.py" for f in new_import_files] - - no_change = len(new_import_files) == 0 - all_relative_imports.extend(files_to_check) - - return all_relative_imports - - -def check_imports(filename): - """ - Check if the current Python environment contains all the libraries that are imported in a file. - """ - with open(filename, "r", encoding="utf-8") as f: - content = f.read() - - # Imports of the form `import xxx` - imports = re.findall("^\s*import\s+(\S+)\s*$", content, flags=re.MULTILINE) - # Imports of the form `from xxx import yyy` - imports += re.findall("^\s*from\s+(\S+)\s+import", content, flags=re.MULTILINE) - # Only keep the top-level module - imports = [imp.split(".")[0] for imp in imports if not imp.startswith(".")] - - # Unique-ify and test we got them all - imports = list(set(imports)) - missing_packages = [] - for imp in imports: - try: - importlib.import_module(imp) - except ImportError: - missing_packages.append(imp) - - if len(missing_packages) > 0: - raise ImportError( - "This modeling file requires the following packages that were not found in your environment: " - f"{', '.join(missing_packages)}. Run `pip install {' '.join(missing_packages)}`" - ) - - return get_relative_imports(filename) - - -def get_class_in_module(class_name, module_path): - """ - Import a module on the cache directory for modules and extract a class from it. - """ - module_path = module_path.replace(os.path.sep, ".") - module = importlib.import_module(module_path) - - if class_name is None: - return find_pipeline_class(module) - return getattr(module, class_name) - - -def find_pipeline_class(loaded_module): - """ - Retrieve pipeline class that inherits from `DiffusionPipeline`. Note that there has to be exactly one class - inheriting from `DiffusionPipeline`. - """ - from ..pipelines import DiffusionPipeline - - cls_members = dict(inspect.getmembers(loaded_module, inspect.isclass)) - - pipeline_class = None - for cls_name, cls in cls_members.items(): - if ( - cls_name != DiffusionPipeline.__name__ - and issubclass(cls, DiffusionPipeline) - and cls.__module__.split(".")[0] != "diffusers" - ): - if pipeline_class is not None: - raise ValueError( - f"Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:" - f" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in" - f" {loaded_module}." - ) - pipeline_class = cls - - return pipeline_class - - -def get_cached_module_file( - pretrained_model_name_or_path: Union[str, os.PathLike], - module_file: str, - cache_dir: Optional[Union[str, os.PathLike]] = None, - force_download: bool = False, - resume_download: bool = False, - proxies: Optional[Dict[str, str]] = None, - use_auth_token: Optional[Union[bool, str]] = None, - revision: Optional[str] = None, - local_files_only: bool = False, -): - """ - Prepares Downloads a module from a local folder or a distant repo and returns its path inside the cached - Transformers module. - - Args: - pretrained_model_name_or_path (`str` or `os.PathLike`): - This can be either: - - - a string, the *model id* of a pretrained model configuration hosted inside a model repo on - huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced - under a user or organization name, like `dbmdz/bert-base-german-cased`. - - a path to a *directory* containing a configuration file saved using the - [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`. - - module_file (`str`): - The name of the module file containing the class to look for. - cache_dir (`str` or `os.PathLike`, *optional*): - Path to a directory in which a downloaded pretrained model configuration should be cached if the standard - cache should not be used. - force_download (`bool`, *optional*, defaults to `False`): - Whether or not to force to (re-)download the configuration files and override the cached versions if they - exist. - resume_download (`bool`, *optional*, defaults to `False`): - Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. - proxies (`Dict[str, str]`, *optional*): - A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', - 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. - use_auth_token (`str` or *bool*, *optional*): - The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated - when running `transformers-cli login` (stored in `~/.huggingface`). - revision (`str`, *optional*, defaults to `"main"`): - The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a - git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any - identifier allowed by git. - local_files_only (`bool`, *optional*, defaults to `False`): - If `True`, will only try to load the tokenizer configuration from local files. - - - - You may pass a token in `use_auth_token` if you are not logged in (`huggingface-cli long`) and want to use private - or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models). - - - - Returns: - `str`: The path to the module inside the cache. - """ - # Download and cache module_file from the repo `pretrained_model_name_or_path` of grab it if it's a local file. - pretrained_model_name_or_path = str(pretrained_model_name_or_path) - - module_file_or_url = os.path.join(pretrained_model_name_or_path, module_file) - - if os.path.isfile(module_file_or_url): - resolved_module_file = module_file_or_url - submodule = "local" - elif pretrained_model_name_or_path.count("/") == 0: - available_versions = get_diffusers_versions() - # cut ".dev0" - latest_version = "v" + ".".join(__version__.split(".")[:3]) - - # retrieve github version that matches - if revision is None: - revision = latest_version if latest_version[1:] in available_versions else "main" - logger.info(f"Defaulting to latest_version: {revision}.") - elif revision in available_versions: - revision = f"v{revision}" - elif revision == "main": - revision = revision - else: - raise ValueError( - f"`custom_revision`: {revision} does not exist. Please make sure to choose one of" - f" {', '.join(available_versions + ['main'])}." - ) - - # community pipeline on GitHub - github_url = COMMUNITY_PIPELINES_URL.format(revision=revision, pipeline=pretrained_model_name_or_path) - try: - resolved_module_file = cached_download( - github_url, - cache_dir=cache_dir, - force_download=force_download, - proxies=proxies, - resume_download=resume_download, - local_files_only=local_files_only, - use_auth_token=False, - ) - submodule = "git" - module_file = pretrained_model_name_or_path + ".py" - except EnvironmentError: - logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}.") - raise - else: - try: - # Load from URL or cache if already cached - resolved_module_file = hf_hub_download( - pretrained_model_name_or_path, - module_file, - cache_dir=cache_dir, - force_download=force_download, - proxies=proxies, - resume_download=resume_download, - local_files_only=local_files_only, - use_auth_token=use_auth_token, - ) - submodule = os.path.join("local", "--".join(pretrained_model_name_or_path.split("/"))) - except EnvironmentError: - logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}.") - raise - - # Check we have all the requirements in our environment - modules_needed = check_imports(resolved_module_file) - - # Now we move the module inside our cached dynamic modules. - full_submodule = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule - create_dynamic_module(full_submodule) - submodule_path = Path(HF_MODULES_CACHE) / full_submodule - if submodule == "local" or submodule == "git": - # We always copy local files (we could hash the file to see if there was a change, and give them the name of - # that hash, to only copy when there is a modification but it seems overkill for now). - # The only reason we do the copy is to avoid putting too many folders in sys.path. - shutil.copy(resolved_module_file, submodule_path / module_file) - for module_needed in modules_needed: - module_needed = f"{module_needed}.py" - shutil.copy(os.path.join(pretrained_model_name_or_path, module_needed), submodule_path / module_needed) - else: - # Get the commit hash - # TODO: we will get this info in the etag soon, so retrieve it from there and not here. - if isinstance(use_auth_token, str): - token = use_auth_token - elif use_auth_token is True: - token = HfFolder.get_token() - else: - token = None - - commit_hash = model_info(pretrained_model_name_or_path, revision=revision, token=token).sha - - # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the - # benefit of versioning. - submodule_path = submodule_path / commit_hash - full_submodule = full_submodule + os.path.sep + commit_hash - create_dynamic_module(full_submodule) - - if not (submodule_path / module_file).exists(): - shutil.copy(resolved_module_file, submodule_path / module_file) - # Make sure we also have every file with relative - for module_needed in modules_needed: - if not (submodule_path / module_needed).exists(): - get_cached_module_file( - pretrained_model_name_or_path, - f"{module_needed}.py", - cache_dir=cache_dir, - force_download=force_download, - resume_download=resume_download, - proxies=proxies, - use_auth_token=use_auth_token, - revision=revision, - local_files_only=local_files_only, - ) - return os.path.join(full_submodule, module_file) - - -def get_class_from_dynamic_module( - pretrained_model_name_or_path: Union[str, os.PathLike], - module_file: str, - class_name: Optional[str] = None, - cache_dir: Optional[Union[str, os.PathLike]] = None, - force_download: bool = False, - resume_download: bool = False, - proxies: Optional[Dict[str, str]] = None, - use_auth_token: Optional[Union[bool, str]] = None, - revision: Optional[str] = None, - local_files_only: bool = False, - **kwargs, -): - """ - Extracts a class from a module file, present in the local folder or repository of a model. - - - - Calling this function will execute the code in the module file found locally or downloaded from the Hub. It should - therefore only be called on trusted repos. - - - - Args: - pretrained_model_name_or_path (`str` or `os.PathLike`): - This can be either: - - - a string, the *model id* of a pretrained model configuration hosted inside a model repo on - huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced - under a user or organization name, like `dbmdz/bert-base-german-cased`. - - a path to a *directory* containing a configuration file saved using the - [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`. - - module_file (`str`): - The name of the module file containing the class to look for. - class_name (`str`): - The name of the class to import in the module. - cache_dir (`str` or `os.PathLike`, *optional*): - Path to a directory in which a downloaded pretrained model configuration should be cached if the standard - cache should not be used. - force_download (`bool`, *optional*, defaults to `False`): - Whether or not to force to (re-)download the configuration files and override the cached versions if they - exist. - resume_download (`bool`, *optional*, defaults to `False`): - Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. - proxies (`Dict[str, str]`, *optional*): - A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', - 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. - use_auth_token (`str` or `bool`, *optional*): - The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated - when running `transformers-cli login` (stored in `~/.huggingface`). - revision (`str`, *optional*, defaults to `"main"`): - The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a - git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any - identifier allowed by git. - local_files_only (`bool`, *optional*, defaults to `False`): - If `True`, will only try to load the tokenizer configuration from local files. - - - - You may pass a token in `use_auth_token` if you are not logged in (`huggingface-cli long`) and want to use private - or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models). - - - - Returns: - `type`: The class, dynamically imported from the module. - - Examples: - - ```python - # Download module `modeling.py` from huggingface.co and cache then extract the class `MyBertModel` from this - # module. - cls = get_class_from_dynamic_module("sgugger/my-bert-model", "modeling.py", "MyBertModel") - ```""" - # And lastly we get the class inside our newly created module - final_module = get_cached_module_file( - pretrained_model_name_or_path, - module_file, - cache_dir=cache_dir, - force_download=force_download, - resume_download=resume_download, - proxies=proxies, - use_auth_token=use_auth_token, - revision=revision, - local_files_only=local_files_only, - ) - return get_class_in_module(class_name, final_module.replace(".py", "")) diff --git a/spaces/Andy1621/uniformer_image_detection/configs/_base_/datasets/lvis_v1_instance.py b/spaces/Andy1621/uniformer_image_detection/configs/_base_/datasets/lvis_v1_instance.py deleted file mode 100644 index e8c5d1b14594a6ea38b215635686c04995338ed7..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/_base_/datasets/lvis_v1_instance.py +++ /dev/null @@ -1,23 +0,0 @@ -_base_ = 'coco_instance.py' -dataset_type = 'LVISV1Dataset' -data_root = 'data/lvis_v1/' -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - _delete_=True, - type='ClassBalancedDataset', - oversample_thr=1e-3, - dataset=dict( - type=dataset_type, - ann_file=data_root + 'annotations/lvis_v1_train.json', - img_prefix=data_root)), - val=dict( - type=dataset_type, - ann_file=data_root + 'annotations/lvis_v1_val.json', - img_prefix=data_root), - test=dict( - type=dataset_type, - ann_file=data_root + 'annotations/lvis_v1_val.json', - img_prefix=data_root)) -evaluation = dict(metric=['bbox', 'segm']) diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/utils/env.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/utils/env.py deleted file mode 100644 index e3f0d92529e193e6d8339419bcd9bed7901a7769..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/utils/env.py +++ /dev/null @@ -1,95 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -"""This file holding some environment constant for sharing by other files.""" - -import os.path as osp -import subprocess -import sys -from collections import defaultdict - -import cv2 -import torch - -import annotator.uniformer.mmcv as mmcv -from .parrots_wrapper import get_build_config - - -def collect_env(): - """Collect the information of the running environments. - - Returns: - dict: The environment information. The following fields are contained. - - - sys.platform: The variable of ``sys.platform``. - - Python: Python version. - - CUDA available: Bool, indicating if CUDA is available. - - GPU devices: Device type of each GPU. - - CUDA_HOME (optional): The env var ``CUDA_HOME``. - - NVCC (optional): NVCC version. - - GCC: GCC version, "n/a" if GCC is not installed. - - PyTorch: PyTorch version. - - PyTorch compiling details: The output of \ - ``torch.__config__.show()``. - - TorchVision (optional): TorchVision version. - - OpenCV: OpenCV version. - - MMCV: MMCV version. - - MMCV Compiler: The GCC version for compiling MMCV ops. - - MMCV CUDA Compiler: The CUDA version for compiling MMCV ops. - """ - env_info = {} - env_info['sys.platform'] = sys.platform - env_info['Python'] = sys.version.replace('\n', '') - - cuda_available = torch.cuda.is_available() - env_info['CUDA available'] = cuda_available - - if cuda_available: - devices = defaultdict(list) - for k in range(torch.cuda.device_count()): - devices[torch.cuda.get_device_name(k)].append(str(k)) - for name, device_ids in devices.items(): - env_info['GPU ' + ','.join(device_ids)] = name - - from annotator.uniformer.mmcv.utils.parrots_wrapper import _get_cuda_home - CUDA_HOME = _get_cuda_home() - env_info['CUDA_HOME'] = CUDA_HOME - - if CUDA_HOME is not None and osp.isdir(CUDA_HOME): - try: - nvcc = osp.join(CUDA_HOME, 'bin/nvcc') - nvcc = subprocess.check_output( - f'"{nvcc}" -V | tail -n1', shell=True) - nvcc = nvcc.decode('utf-8').strip() - except subprocess.SubprocessError: - nvcc = 'Not Available' - env_info['NVCC'] = nvcc - - try: - gcc = subprocess.check_output('gcc --version | head -n1', shell=True) - gcc = gcc.decode('utf-8').strip() - env_info['GCC'] = gcc - except subprocess.CalledProcessError: # gcc is unavailable - env_info['GCC'] = 'n/a' - - env_info['PyTorch'] = torch.__version__ - env_info['PyTorch compiling details'] = get_build_config() - - try: - import torchvision - env_info['TorchVision'] = torchvision.__version__ - except ModuleNotFoundError: - pass - - env_info['OpenCV'] = cv2.__version__ - - env_info['MMCV'] = mmcv.__version__ - - try: - from annotator.uniformer.mmcv.ops import get_compiler_version, get_compiling_cuda_version - except ModuleNotFoundError: - env_info['MMCV Compiler'] = 'n/a' - env_info['MMCV CUDA Compiler'] = 'n/a' - else: - env_info['MMCV Compiler'] = get_compiler_version() - env_info['MMCV CUDA Compiler'] = get_compiling_cuda_version() - - return env_info diff --git a/spaces/Araloak/fz/chat_completion.py b/spaces/Araloak/fz/chat_completion.py deleted file mode 100644 index bb065338a9d4aa7c8ea3cc70116b850820ce822f..0000000000000000000000000000000000000000 --- a/spaces/Araloak/fz/chat_completion.py +++ /dev/null @@ -1,62 +0,0 @@ -import linecache -import re -from typing import Dict, List, Optional - -import openai - - -class ChatCompletion: - def __init__(self, model: str = 'gpt-3.5-turbo', - api_key: Optional[str] = None, api_key_path: str = './openai_api_key'): - if api_key is None: - openai.api_key = api_key - api_key = linecache.getline(api_key_path, 2).strip('\n') - if len(api_key) == 0: - raise EnvironmentError - openai.api_key = api_key - - self.model = model - self.system_messages = [] - self.user_messages = [] - - def chat(self, msg: str, setting: Optional[str] = None, model: Optional[str] = None) -> str: - if self._context_length() > 2048: - self.reset() - if setting is not None: - if setting not in self.system_messages: - self.system_messages.append(setting) - if not self.user_messages or msg != self.user_messages[-1]: - self.user_messages.append(msg) - - return self._run(model) - - def retry(self, model: Optional[str] = None) -> str: - return self._run(model) - - def reset(self): - self.system_messages.clear() - self.user_messages.clear() - - def _make_message(self) -> List[Dict]: - sys_messages = [{'role': 'system', 'content': msg} for msg in self.system_messages] - user_messages = [{'role': 'user', 'content': msg} for msg in self.user_messages] - return sys_messages + user_messages - - def _context_length(self) -> int: - return len(''.join(self.system_messages)) + len(''.join(self.user_messages)) - - def _run(self, model: Optional[str] = None) -> str: - if model is None: - model = self.model - try: - response = openai.ChatCompletion.create(model=model, messages=self._make_message()) - ans = response['choices'][0]['message']['content'] - ans = re.sub(r'^\n+', '', ans) - except openai.error.OpenAIError as e: - ans = e - except Exception as e: - print(e) - return ans - - def __call__(self, msg: str, setting: Optional[str] = None, model: Optional[str] = None) -> str: - return self.chat(msg, setting, model) diff --git a/spaces/Artrajz/vits-simple-api/templates/index.html b/spaces/Artrajz/vits-simple-api/templates/index.html deleted file mode 100644 index 441bfc95abd4290e969e37ed46971375933fd551..0000000000000000000000000000000000000000 --- a/spaces/Artrajz/vits-simple-api/templates/index.html +++ /dev/null @@ -1,535 +0,0 @@ - - - - - - vits-simple-api - - - - -
    -
    -
    -

    - vits-simple-api -

    -
    - - -
    - - - -
    - -
    -
    -
    -
    - - -
    -
    - - -
    -
    -
    -
    -
    - - -
    -
    - - -
    -
    - - -
    -
    -
    -
    - - -
    -
    - - -
    -
    - - -
    -
    -
    - - -
    - - - - Your browser does not support the audio element. - -
    - - -
    -
    -
    -
    -
    -
    - - -
    -
    - - -
    -
    - - -
    -
    - - -
    -
    -
    - - -
    -
    - - -
    -
    - - -
    -
    -
    -
    - - -
    -
    - - -
    -
    - - -
    -
    -
    - -
    - - - - Your browser does not support the audio element. - -
    -
    -
    -
    -
    - - -
    -
    - - -
    -
    -
    -
    -
    - - -
    -
    - - -
    -
    - - -
    -
    -
    -
    - - -
    -
    - - -
    -
    - - -
    - -
    -
    -
    - - -
    -
    -
    - - -
    - - - - Your browser does not support the audio element. - -
    -
    -
    - -
    - {% if speakers_count == 0 %} -
    未加载任何模型
    - {% endif %} -
    - - - https://artrajz-vits-simple-api.hf.space/voice/speakers - -
    -
    - - - https://artrajz-vits-simple-api.hf.space/voice/vits?text=你好,こんにちは&id=164 - -
    -
    -
    -

    所有模型均为网络搜集,感谢模型原作者的付出!

    -

    请严格遵循模型原作者使用协议!模型一般都是禁止商用的!

    - -

    - Nene_Nanami_Rong_Tang: - CjangCjengh/TTSModels -

    -

    - louise: - CjangCjengh/TTSModels -

    -

    - Cantonese: - CjangCjengh/TTSModels -

    -

    - shanghainese: - CjangCjengh/TTSModels -

    -

    - w2v2-vits: - CjangCjengh/TTSModels -

    -

    - vctk: - jaywalnut310/vits -

    -

    - Bishojo Mangekyo: - Francis-Komizu/VITS -

    -

    - genshin: - zomehwh/vits-uma-genshin-honkai -

    -

    - paimon: - zixiiu/Digital_Life_Server -

    -

    - vits_chinese: - PlayVoice/vits_chinese -

    -
    -
    - -
    - -
    - - - - - - - \ No newline at end of file diff --git a/spaces/AsakuraMizu/moe-tts/app.py b/spaces/AsakuraMizu/moe-tts/app.py deleted file mode 100644 index fab105f30b61effbc0c083f7293ee5b699e1aafa..0000000000000000000000000000000000000000 --- a/spaces/AsakuraMizu/moe-tts/app.py +++ /dev/null @@ -1,320 +0,0 @@ -import argparse -import json -import os -import re -import tempfile -from pathlib import Path - -import librosa -import numpy as np -import torch -from torch import no_grad, LongTensor -import commons -import utils -import gradio as gr -import gradio.utils as gr_utils -import gradio.processing_utils as gr_processing_utils -from models import SynthesizerTrn -from text import text_to_sequence, _clean_text -from mel_processing import spectrogram_torch - -limitation = os.getenv("SYSTEM") == "spaces" # limit text and audio length in huggingface spaces - -audio_postprocess_ori = gr.Audio.postprocess - - -def audio_postprocess(self, y): - data = audio_postprocess_ori(self, y) - if data is None: - return None - return gr_processing_utils.encode_url_or_file_to_base64(data["name"]) - - -gr.Audio.postprocess = audio_postprocess - - -def get_text(text, hps, is_symbol): - text_norm = text_to_sequence(text, hps.symbols, [] if is_symbol else hps.data.text_cleaners) - if hps.data.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = LongTensor(text_norm) - return text_norm - - -def create_tts_fn(model, hps, speaker_ids): - def tts_fn(text, speaker, speed, is_symbol): - if limitation: - text_len = len(re.sub("\[([A-Z]{2})\]", "", text)) - max_len = 150 - if is_symbol: - max_len *= 3 - if text_len > max_len: - return "Error: Text is too long", None - - speaker_id = speaker_ids[speaker] - stn_tst = get_text(text, hps, is_symbol) - with no_grad(): - x_tst = stn_tst.unsqueeze(0).to(device) - x_tst_lengths = LongTensor([stn_tst.size(0)]).to(device) - sid = LongTensor([speaker_id]).to(device) - audio = model.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=.667, noise_scale_w=0.8, - length_scale=1.0 / speed)[0][0, 0].data.cpu().float().numpy() - del stn_tst, x_tst, x_tst_lengths, sid - return "Success", (hps.data.sampling_rate, audio) - - return tts_fn - - -def create_vc_fn(model, hps, speaker_ids): - def vc_fn(original_speaker, target_speaker, input_audio): - if input_audio is None: - return "You need to upload an audio", None - sampling_rate, audio = input_audio - duration = audio.shape[0] / sampling_rate - if limitation and duration > 30: - return "Error: Audio is too long", None - original_speaker_id = speaker_ids[original_speaker] - target_speaker_id = speaker_ids[target_speaker] - - audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32) - if len(audio.shape) > 1: - audio = librosa.to_mono(audio.transpose(1, 0)) - if sampling_rate != hps.data.sampling_rate: - audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=hps.data.sampling_rate) - with no_grad(): - y = torch.FloatTensor(audio) - y = y.unsqueeze(0) - spec = spectrogram_torch(y, hps.data.filter_length, - hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, - center=False).to(device) - spec_lengths = LongTensor([spec.size(-1)]).to(device) - sid_src = LongTensor([original_speaker_id]).to(device) - sid_tgt = LongTensor([target_speaker_id]).to(device) - audio = model.voice_conversion(spec, spec_lengths, sid_src=sid_src, sid_tgt=sid_tgt)[0][ - 0, 0].data.cpu().float().numpy() - del y, spec, spec_lengths, sid_src, sid_tgt - return "Success", (hps.data.sampling_rate, audio) - - return vc_fn - - -def create_soft_vc_fn(model, hps, speaker_ids): - def soft_vc_fn(target_speaker, input_audio1, input_audio2): - input_audio = input_audio1 - if input_audio is None: - input_audio = input_audio2 - if input_audio is None: - return "You need to upload an audio", None - sampling_rate, audio = input_audio - duration = audio.shape[0] / sampling_rate - if limitation and duration > 30: - return "Error: Audio is too long", None - target_speaker_id = speaker_ids[target_speaker] - - audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32) - if len(audio.shape) > 1: - audio = librosa.to_mono(audio.transpose(1, 0)) - if sampling_rate != 16000: - audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000) - with torch.inference_mode(): - units = hubert.units(torch.FloatTensor(audio).unsqueeze(0).unsqueeze(0).to(device)) - with no_grad(): - unit_lengths = LongTensor([units.size(1)]).to(device) - sid = LongTensor([target_speaker_id]).to(device) - audio = model.infer(units, unit_lengths, sid=sid, noise_scale=.667, - noise_scale_w=0.8)[0][0, 0].data.cpu().float().numpy() - del units, unit_lengths, sid - return "Success", (hps.data.sampling_rate, audio) - - return soft_vc_fn - - -def create_to_symbol_fn(hps): - def to_symbol_fn(is_symbol_input, input_text, temp_text): - return (_clean_text(input_text, hps.data.text_cleaners), input_text) if is_symbol_input \ - else (temp_text, temp_text) - - return to_symbol_fn - - -download_audio_js = """ -() =>{{ - let root = document.querySelector("body > gradio-app"); - if (root.shadowRoot != null) - root = root.shadowRoot; - let audio = root.querySelector("#{audio_id}").querySelector("audio"); - if (audio == undefined) - return; - audio = audio.src; - let oA = document.createElement("a"); - oA.download = Math.floor(Math.random()*100000000)+'.wav'; - oA.href = audio; - document.body.appendChild(oA); - oA.click(); - oA.remove(); -}} -""" - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--device', type=str, default='cpu') - parser.add_argument("--share", action="store_true", default=False, help="share gradio app") - args = parser.parse_args() - - device = torch.device(args.device) - models_tts = [] - models_vc = [] - models_soft_vc = [] - with open("saved_model/info.json", "r", encoding="utf-8") as f: - models_info = json.load(f) - for i, info in models_info.items(): - name = info["title"] - author = info["author"] - lang = info["lang"] - example = info["example"] - config_path = f"saved_model/{i}/config.json" - model_path = f"saved_model/{i}/model.pth" - cover = info["cover"] - cover_path = f"saved_model/{i}/{cover}" if cover else None - hps = utils.get_hparams_from_file(config_path) - model = SynthesizerTrn( - len(hps.symbols), - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - n_speakers=hps.data.n_speakers, - **hps.model) - utils.load_checkpoint(model_path, model, None) - model.eval().to(device) - speaker_ids = [sid for sid, name in enumerate(hps.speakers) if name != "None"] - speakers = [name for sid, name in enumerate(hps.speakers) if name != "None"] - - t = info["type"] - if t == "vits": - models_tts.append((name, author, cover_path, speakers, lang, example, - hps.symbols, create_tts_fn(model, hps, speaker_ids), - create_to_symbol_fn(hps))) - models_vc.append((name, author, cover_path, speakers, create_vc_fn(model, hps, speaker_ids))) - elif t == "soft-vits-vc": - models_soft_vc.append((name, author, cover_path, speakers, create_soft_vc_fn(model, hps, speaker_ids))) - - hubert = torch.hub.load("bshall/hubert:main", "hubert_soft", trust_repo=True).to(device) - - app = gr.Blocks() - - with app: - gr.Markdown("# Moe TTS And Voice Conversion Using VITS Model\n\n" - "![visitor badge](https://visitor-badge.glitch.me/badge?page_id=skytnt.moegoe)\n\n" - "[Open In Colab]" - "(https://colab.research.google.com/drive/14Pb8lpmwZL-JI5Ub6jpG4sz2-8KS0kbS?usp=sharing)" - " without queue and length limitation.\n\n" - "Feel free to [open discussion](https://huggingface.co/spaces/skytnt/moe-tts/discussions/new) " - "if you want to add your model to this app.") - with gr.Tabs(): - with gr.TabItem("TTS"): - with gr.Tabs(): - for i, (name, author, cover_path, speakers, lang, example, symbols, tts_fn, - to_symbol_fn) in enumerate(models_tts): - with gr.TabItem(f"model{i}"): - with gr.Column(): - cover_markdown = f"![cover](file/{cover_path})\n\n" if cover_path else "" - gr.Markdown(f"## {name}\n\n" - f"{cover_markdown}" - f"model author: {author}\n\n" - f"language: {lang}") - tts_input1 = gr.TextArea(label="Text (150 words limitation)", value=example, - elem_id=f"tts-input{i}") - tts_input2 = gr.Dropdown(label="Speaker", choices=speakers, - type="index", value=speakers[0]) - tts_input3 = gr.Slider(label="Speed", value=1, minimum=0.5, maximum=2, step=0.1) - with gr.Accordion(label="Advanced Options", open=False): - temp_text_var = gr.Variable() - symbol_input = gr.Checkbox(value=False, label="Symbol input") - symbol_list = gr.Dataset(label="Symbol list", components=[tts_input1], - samples=[[x] for x in symbols], - elem_id=f"symbol-list{i}") - symbol_list_json = gr.Json(value=symbols, visible=False) - tts_submit = gr.Button("Generate", variant="primary") - tts_output1 = gr.Textbox(label="Output Message") - tts_output2 = gr.Audio(label="Output Audio", elem_id=f"tts-audio{i}") - download = gr.Button("Download Audio") - download.click(None, [], [], _js=download_audio_js.format(audio_id=f"tts-audio{i}")) - - tts_submit.click(tts_fn, [tts_input1, tts_input2, tts_input3, symbol_input], - [tts_output1, tts_output2], api_name=f"tts-model{i}") - symbol_input.change(to_symbol_fn, - [symbol_input, tts_input1, temp_text_var], - [tts_input1, temp_text_var]) - symbol_list.click(None, [symbol_list, symbol_list_json], [], - _js=f""" - (i,symbols) => {{ - let root = document.querySelector("body > gradio-app"); - if (root.shadowRoot != null) - root = root.shadowRoot; - let text_input = root.querySelector("#tts-input{i}").querySelector("textarea"); - let startPos = text_input.selectionStart; - let endPos = text_input.selectionEnd; - let oldTxt = text_input.value; - let result = oldTxt.substring(0, startPos) + symbols[i] + oldTxt.substring(endPos); - text_input.value = result; - let x = window.scrollX, y = window.scrollY; - text_input.focus(); - text_input.selectionStart = startPos + symbols[i].length; - text_input.selectionEnd = startPos + symbols[i].length; - text_input.blur(); - window.scrollTo(x, y); - return []; - }}""") - - with gr.TabItem("Voice Conversion"): - with gr.Tabs(): - for i, (name, author, cover_path, speakers, vc_fn) in enumerate(models_vc): - with gr.TabItem(f"model{i}"): - cover_markdown = f"![cover](file/{cover_path})\n\n" if cover_path else "" - gr.Markdown(f"## {name}\n\n" - f"{cover_markdown}" - f"model author: {author}") - vc_input1 = gr.Dropdown(label="Original Speaker", choices=speakers, type="index", - value=speakers[0]) - vc_input2 = gr.Dropdown(label="Target Speaker", choices=speakers, type="index", - value=speakers[min(len(speakers) - 1, 1)]) - vc_input3 = gr.Audio(label="Input Audio (30s limitation)") - vc_submit = gr.Button("Convert", variant="primary") - vc_output1 = gr.Textbox(label="Output Message") - vc_output2 = gr.Audio(label="Output Audio", elem_id=f"vc-audio{i}") - download = gr.Button("Download Audio") - download.click(None, [], [], _js=download_audio_js.format(audio_id=f"vc-audio{i}")) - vc_submit.click(vc_fn, [vc_input1, vc_input2, vc_input3], [vc_output1, vc_output2], api_name=f"vc-model{i}") - with gr.TabItem("Soft Voice Conversion"): - with gr.Tabs(): - for i, (name, author, cover_path, speakers, soft_vc_fn) in enumerate(models_soft_vc): - with gr.TabItem(f"model{i}"): - cover_markdown = f"![cover](file/{cover_path})\n\n" if cover_path else "" - gr.Markdown(f"## {name}\n\n" - f"{cover_markdown}" - f"model author: {author}") - vc_input1 = gr.Dropdown(label="Target Speaker", choices=speakers, type="index", - value=speakers[0]) - source_tabs = gr.Tabs() - with source_tabs: - with gr.TabItem("microphone"): - vc_input2 = gr.Audio(label="Input Audio (30s limitation)", source="microphone") - with gr.TabItem("upload"): - vc_input3 = gr.Audio(label="Input Audio (30s limitation)", source="upload") - vc_submit = gr.Button("Convert", variant="primary") - vc_output1 = gr.Textbox(label="Output Message") - vc_output2 = gr.Audio(label="Output Audio", elem_id=f"svc-audio{i}") - download = gr.Button("Download Audio") - download.click(None, [], [], _js=download_audio_js.format(audio_id=f"svc-audio{i}")) - # clear inputs - source_tabs.set_event_trigger("change", None, [], [vc_input2, vc_input3], - js="()=>[null,null]") - vc_submit.click(soft_vc_fn, [vc_input1, vc_input2, vc_input3], - [vc_output1, vc_output2], api_name=f"svc-model{i}") - gr.Markdown( - "unofficial demo for \n\n" - "- [https://github.com/CjangCjengh/MoeGoe](https://github.com/CjangCjengh/MoeGoe)\n" - "- [https://github.com/Francis-Komizu/VITS](https://github.com/Francis-Komizu/VITS)\n" - "- [https://github.com/luoyily/MoeTTS](https://github.com/luoyily/MoeTTS)\n" - "- [https://github.com/Francis-Komizu/Sovits](https://github.com/Francis-Komizu/Sovits)" - ) - app.queue(concurrency_count=3).launch(share=args.share) diff --git a/spaces/Ash123/stable-diffusion-nano/README.md b/spaces/Ash123/stable-diffusion-nano/README.md deleted file mode 100644 index 2680444393ecc4b61cecda71183a16c3d2d0bba7..0000000000000000000000000000000000000000 --- a/spaces/Ash123/stable-diffusion-nano/README.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Stable Diffusion Nano -emoji: 📊 -colorFrom: indigo -colorTo: pink -sdk: gradio -sdk_version: 3.28.3 -app_file: app.py -pinned: false -license: creativeml-openrail-m -tags: -- jax-diffusers-event -- stable-diffusion ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/idna/uts46data.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/idna/uts46data.py deleted file mode 100644 index 186796c17b25c1e766112ef4d9f16bb2dea4b306..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/idna/uts46data.py +++ /dev/null @@ -1,8600 +0,0 @@ -# This file is automatically generated by tools/idna-data -# vim: set fileencoding=utf-8 : - -from typing import List, Tuple, Union - - -"""IDNA Mapping Table from UTS46.""" - - -__version__ = '15.0.0' -def _seg_0() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x0, '3'), - (0x1, '3'), - (0x2, '3'), - (0x3, '3'), - (0x4, '3'), - (0x5, '3'), - (0x6, '3'), - (0x7, '3'), - (0x8, '3'), - (0x9, '3'), - (0xA, '3'), - (0xB, '3'), - (0xC, '3'), - (0xD, '3'), - (0xE, '3'), - (0xF, '3'), - (0x10, '3'), - (0x11, '3'), - (0x12, '3'), - (0x13, '3'), - (0x14, '3'), - (0x15, '3'), - (0x16, '3'), - (0x17, '3'), - (0x18, '3'), - (0x19, '3'), - (0x1A, '3'), - (0x1B, '3'), - (0x1C, '3'), - (0x1D, '3'), - (0x1E, '3'), - (0x1F, '3'), - (0x20, '3'), - (0x21, '3'), - (0x22, '3'), - (0x23, '3'), - (0x24, '3'), - (0x25, '3'), - (0x26, '3'), - (0x27, '3'), - (0x28, '3'), - (0x29, '3'), - (0x2A, '3'), - (0x2B, '3'), - (0x2C, '3'), - (0x2D, 'V'), - (0x2E, 'V'), - (0x2F, '3'), - (0x30, 'V'), - (0x31, 'V'), - (0x32, 'V'), - (0x33, 'V'), - (0x34, 'V'), - (0x35, 'V'), - (0x36, 'V'), - (0x37, 'V'), - (0x38, 'V'), - (0x39, 'V'), - (0x3A, '3'), - (0x3B, '3'), - (0x3C, '3'), - (0x3D, '3'), - (0x3E, '3'), - (0x3F, '3'), - (0x40, '3'), - (0x41, 'M', 'a'), - (0x42, 'M', 'b'), - (0x43, 'M', 'c'), - (0x44, 'M', 'd'), - (0x45, 'M', 'e'), - (0x46, 'M', 'f'), - (0x47, 'M', 'g'), - (0x48, 'M', 'h'), - (0x49, 'M', 'i'), - (0x4A, 'M', 'j'), - (0x4B, 'M', 'k'), - (0x4C, 'M', 'l'), - (0x4D, 'M', 'm'), - (0x4E, 'M', 'n'), - (0x4F, 'M', 'o'), - (0x50, 'M', 'p'), - (0x51, 'M', 'q'), - (0x52, 'M', 'r'), - (0x53, 'M', 's'), - (0x54, 'M', 't'), - (0x55, 'M', 'u'), - (0x56, 'M', 'v'), - (0x57, 'M', 'w'), - (0x58, 'M', 'x'), - (0x59, 'M', 'y'), - (0x5A, 'M', 'z'), - (0x5B, '3'), - (0x5C, '3'), - (0x5D, '3'), - (0x5E, '3'), - (0x5F, '3'), - (0x60, '3'), - (0x61, 'V'), - (0x62, 'V'), - (0x63, 'V'), - ] - -def _seg_1() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x64, 'V'), - (0x65, 'V'), - (0x66, 'V'), - (0x67, 'V'), - (0x68, 'V'), - (0x69, 'V'), - (0x6A, 'V'), - (0x6B, 'V'), - (0x6C, 'V'), - (0x6D, 'V'), - (0x6E, 'V'), - (0x6F, 'V'), - (0x70, 'V'), - (0x71, 'V'), - (0x72, 'V'), - (0x73, 'V'), - (0x74, 'V'), - (0x75, 'V'), - (0x76, 'V'), - (0x77, 'V'), - (0x78, 'V'), - (0x79, 'V'), - (0x7A, 'V'), - (0x7B, '3'), - (0x7C, '3'), - (0x7D, '3'), - (0x7E, '3'), - (0x7F, '3'), - (0x80, 'X'), - (0x81, 'X'), - (0x82, 'X'), - (0x83, 'X'), - (0x84, 'X'), - (0x85, 'X'), - (0x86, 'X'), - (0x87, 'X'), - (0x88, 'X'), - (0x89, 'X'), - (0x8A, 'X'), - (0x8B, 'X'), - (0x8C, 'X'), - (0x8D, 'X'), - (0x8E, 'X'), - (0x8F, 'X'), - (0x90, 'X'), - (0x91, 'X'), - (0x92, 'X'), - (0x93, 'X'), - (0x94, 'X'), - (0x95, 'X'), - (0x96, 'X'), - (0x97, 'X'), - (0x98, 'X'), - (0x99, 'X'), - (0x9A, 'X'), - (0x9B, 'X'), - (0x9C, 'X'), - (0x9D, 'X'), - (0x9E, 'X'), - (0x9F, 'X'), - (0xA0, '3', ' '), - (0xA1, 'V'), - (0xA2, 'V'), - (0xA3, 'V'), - (0xA4, 'V'), - (0xA5, 'V'), - (0xA6, 'V'), - (0xA7, 'V'), - (0xA8, '3', ' ̈'), - (0xA9, 'V'), - (0xAA, 'M', 'a'), - (0xAB, 'V'), - (0xAC, 'V'), - (0xAD, 'I'), - (0xAE, 'V'), - (0xAF, '3', ' ̄'), - (0xB0, 'V'), - (0xB1, 'V'), - (0xB2, 'M', '2'), - (0xB3, 'M', '3'), - (0xB4, '3', ' ́'), - (0xB5, 'M', 'μ'), - (0xB6, 'V'), - (0xB7, 'V'), - (0xB8, '3', ' ̧'), - (0xB9, 'M', '1'), - (0xBA, 'M', 'o'), - (0xBB, 'V'), - (0xBC, 'M', '1⁄4'), - (0xBD, 'M', '1⁄2'), - (0xBE, 'M', '3⁄4'), - (0xBF, 'V'), - (0xC0, 'M', 'à'), - (0xC1, 'M', 'á'), - (0xC2, 'M', 'â'), - (0xC3, 'M', 'ã'), - (0xC4, 'M', 'ä'), - (0xC5, 'M', 'å'), - (0xC6, 'M', 'æ'), - (0xC7, 'M', 'ç'), - ] - -def _seg_2() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xC8, 'M', 'è'), - (0xC9, 'M', 'é'), - (0xCA, 'M', 'ê'), - (0xCB, 'M', 'ë'), - (0xCC, 'M', 'ì'), - (0xCD, 'M', 'í'), - (0xCE, 'M', 'î'), - (0xCF, 'M', 'ï'), - (0xD0, 'M', 'ð'), - (0xD1, 'M', 'ñ'), - (0xD2, 'M', 'ò'), - (0xD3, 'M', 'ó'), - (0xD4, 'M', 'ô'), - (0xD5, 'M', 'õ'), - (0xD6, 'M', 'ö'), - (0xD7, 'V'), - (0xD8, 'M', 'ø'), - (0xD9, 'M', 'ù'), - (0xDA, 'M', 'ú'), - (0xDB, 'M', 'û'), - (0xDC, 'M', 'ü'), - (0xDD, 'M', 'ý'), - (0xDE, 'M', 'þ'), - (0xDF, 'D', 'ss'), - (0xE0, 'V'), - (0xE1, 'V'), - (0xE2, 'V'), - (0xE3, 'V'), - (0xE4, 'V'), - (0xE5, 'V'), - (0xE6, 'V'), - (0xE7, 'V'), - (0xE8, 'V'), - (0xE9, 'V'), - (0xEA, 'V'), - (0xEB, 'V'), - (0xEC, 'V'), - (0xED, 'V'), - (0xEE, 'V'), - (0xEF, 'V'), - (0xF0, 'V'), - (0xF1, 'V'), - (0xF2, 'V'), - (0xF3, 'V'), - (0xF4, 'V'), - (0xF5, 'V'), - (0xF6, 'V'), - (0xF7, 'V'), - (0xF8, 'V'), - (0xF9, 'V'), - (0xFA, 'V'), - (0xFB, 'V'), - (0xFC, 'V'), - (0xFD, 'V'), - (0xFE, 'V'), - (0xFF, 'V'), - (0x100, 'M', 'ā'), - (0x101, 'V'), - (0x102, 'M', 'ă'), - (0x103, 'V'), - (0x104, 'M', 'ą'), - (0x105, 'V'), - (0x106, 'M', 'ć'), - (0x107, 'V'), - (0x108, 'M', 'ĉ'), - (0x109, 'V'), - (0x10A, 'M', 'ċ'), - (0x10B, 'V'), - (0x10C, 'M', 'č'), - (0x10D, 'V'), - (0x10E, 'M', 'ď'), - (0x10F, 'V'), - (0x110, 'M', 'đ'), - (0x111, 'V'), - (0x112, 'M', 'ē'), - (0x113, 'V'), - (0x114, 'M', 'ĕ'), - (0x115, 'V'), - (0x116, 'M', 'ė'), - (0x117, 'V'), - (0x118, 'M', 'ę'), - (0x119, 'V'), - (0x11A, 'M', 'ě'), - (0x11B, 'V'), - (0x11C, 'M', 'ĝ'), - (0x11D, 'V'), - (0x11E, 'M', 'ğ'), - (0x11F, 'V'), - (0x120, 'M', 'ġ'), - (0x121, 'V'), - (0x122, 'M', 'ģ'), - (0x123, 'V'), - (0x124, 'M', 'ĥ'), - (0x125, 'V'), - (0x126, 'M', 'ħ'), - (0x127, 'V'), - (0x128, 'M', 'ĩ'), - (0x129, 'V'), - (0x12A, 'M', 'ī'), - (0x12B, 'V'), - ] - -def _seg_3() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x12C, 'M', 'ĭ'), - (0x12D, 'V'), - (0x12E, 'M', 'į'), - (0x12F, 'V'), - (0x130, 'M', 'i̇'), - (0x131, 'V'), - (0x132, 'M', 'ij'), - (0x134, 'M', 'ĵ'), - (0x135, 'V'), - (0x136, 'M', 'ķ'), - (0x137, 'V'), - (0x139, 'M', 'ĺ'), - (0x13A, 'V'), - (0x13B, 'M', 'ļ'), - (0x13C, 'V'), - (0x13D, 'M', 'ľ'), - (0x13E, 'V'), - (0x13F, 'M', 'l·'), - (0x141, 'M', 'ł'), - (0x142, 'V'), - (0x143, 'M', 'ń'), - (0x144, 'V'), - (0x145, 'M', 'ņ'), - (0x146, 'V'), - (0x147, 'M', 'ň'), - (0x148, 'V'), - (0x149, 'M', 'ʼn'), - (0x14A, 'M', 'ŋ'), - (0x14B, 'V'), - (0x14C, 'M', 'ō'), - (0x14D, 'V'), - (0x14E, 'M', 'ŏ'), - (0x14F, 'V'), - (0x150, 'M', 'ő'), - (0x151, 'V'), - (0x152, 'M', 'œ'), - (0x153, 'V'), - (0x154, 'M', 'ŕ'), - (0x155, 'V'), - (0x156, 'M', 'ŗ'), - (0x157, 'V'), - (0x158, 'M', 'ř'), - (0x159, 'V'), - (0x15A, 'M', 'ś'), - (0x15B, 'V'), - (0x15C, 'M', 'ŝ'), - (0x15D, 'V'), - (0x15E, 'M', 'ş'), - (0x15F, 'V'), - (0x160, 'M', 'š'), - (0x161, 'V'), - (0x162, 'M', 'ţ'), - (0x163, 'V'), - (0x164, 'M', 'ť'), - (0x165, 'V'), - (0x166, 'M', 'ŧ'), - (0x167, 'V'), - (0x168, 'M', 'ũ'), - (0x169, 'V'), - (0x16A, 'M', 'ū'), - (0x16B, 'V'), - (0x16C, 'M', 'ŭ'), - (0x16D, 'V'), - (0x16E, 'M', 'ů'), - (0x16F, 'V'), - (0x170, 'M', 'ű'), - (0x171, 'V'), - (0x172, 'M', 'ų'), - (0x173, 'V'), - (0x174, 'M', 'ŵ'), - (0x175, 'V'), - (0x176, 'M', 'ŷ'), - (0x177, 'V'), - (0x178, 'M', 'ÿ'), - (0x179, 'M', 'ź'), - (0x17A, 'V'), - (0x17B, 'M', 'ż'), - (0x17C, 'V'), - (0x17D, 'M', 'ž'), - (0x17E, 'V'), - (0x17F, 'M', 's'), - (0x180, 'V'), - (0x181, 'M', 'ɓ'), - (0x182, 'M', 'ƃ'), - (0x183, 'V'), - (0x184, 'M', 'ƅ'), - (0x185, 'V'), - (0x186, 'M', 'ɔ'), - (0x187, 'M', 'ƈ'), - (0x188, 'V'), - (0x189, 'M', 'ɖ'), - (0x18A, 'M', 'ɗ'), - (0x18B, 'M', 'ƌ'), - (0x18C, 'V'), - (0x18E, 'M', 'ǝ'), - (0x18F, 'M', 'ə'), - (0x190, 'M', 'ɛ'), - (0x191, 'M', 'ƒ'), - (0x192, 'V'), - (0x193, 'M', 'ɠ'), - ] - -def _seg_4() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x194, 'M', 'ɣ'), - (0x195, 'V'), - (0x196, 'M', 'ɩ'), - (0x197, 'M', 'ɨ'), - (0x198, 'M', 'ƙ'), - (0x199, 'V'), - (0x19C, 'M', 'ɯ'), - (0x19D, 'M', 'ɲ'), - (0x19E, 'V'), - (0x19F, 'M', 'ɵ'), - (0x1A0, 'M', 'ơ'), - (0x1A1, 'V'), - (0x1A2, 'M', 'ƣ'), - (0x1A3, 'V'), - (0x1A4, 'M', 'ƥ'), - (0x1A5, 'V'), - (0x1A6, 'M', 'ʀ'), - (0x1A7, 'M', 'ƨ'), - (0x1A8, 'V'), - (0x1A9, 'M', 'ʃ'), - (0x1AA, 'V'), - (0x1AC, 'M', 'ƭ'), - (0x1AD, 'V'), - (0x1AE, 'M', 'ʈ'), - (0x1AF, 'M', 'ư'), - (0x1B0, 'V'), - (0x1B1, 'M', 'ʊ'), - (0x1B2, 'M', 'ʋ'), - (0x1B3, 'M', 'ƴ'), - (0x1B4, 'V'), - (0x1B5, 'M', 'ƶ'), - (0x1B6, 'V'), - (0x1B7, 'M', 'ʒ'), - (0x1B8, 'M', 'ƹ'), - (0x1B9, 'V'), - (0x1BC, 'M', 'ƽ'), - (0x1BD, 'V'), - (0x1C4, 'M', 'dž'), - (0x1C7, 'M', 'lj'), - (0x1CA, 'M', 'nj'), - (0x1CD, 'M', 'ǎ'), - (0x1CE, 'V'), - (0x1CF, 'M', 'ǐ'), - (0x1D0, 'V'), - (0x1D1, 'M', 'ǒ'), - (0x1D2, 'V'), - (0x1D3, 'M', 'ǔ'), - (0x1D4, 'V'), - (0x1D5, 'M', 'ǖ'), - (0x1D6, 'V'), - (0x1D7, 'M', 'ǘ'), - (0x1D8, 'V'), - (0x1D9, 'M', 'ǚ'), - (0x1DA, 'V'), - (0x1DB, 'M', 'ǜ'), - (0x1DC, 'V'), - (0x1DE, 'M', 'ǟ'), - (0x1DF, 'V'), - (0x1E0, 'M', 'ǡ'), - (0x1E1, 'V'), - (0x1E2, 'M', 'ǣ'), - (0x1E3, 'V'), - (0x1E4, 'M', 'ǥ'), - (0x1E5, 'V'), - (0x1E6, 'M', 'ǧ'), - (0x1E7, 'V'), - (0x1E8, 'M', 'ǩ'), - (0x1E9, 'V'), - (0x1EA, 'M', 'ǫ'), - (0x1EB, 'V'), - (0x1EC, 'M', 'ǭ'), - (0x1ED, 'V'), - (0x1EE, 'M', 'ǯ'), - (0x1EF, 'V'), - (0x1F1, 'M', 'dz'), - (0x1F4, 'M', 'ǵ'), - (0x1F5, 'V'), - (0x1F6, 'M', 'ƕ'), - (0x1F7, 'M', 'ƿ'), - (0x1F8, 'M', 'ǹ'), - (0x1F9, 'V'), - (0x1FA, 'M', 'ǻ'), - (0x1FB, 'V'), - (0x1FC, 'M', 'ǽ'), - (0x1FD, 'V'), - (0x1FE, 'M', 'ǿ'), - (0x1FF, 'V'), - (0x200, 'M', 'ȁ'), - (0x201, 'V'), - (0x202, 'M', 'ȃ'), - (0x203, 'V'), - (0x204, 'M', 'ȅ'), - (0x205, 'V'), - (0x206, 'M', 'ȇ'), - (0x207, 'V'), - (0x208, 'M', 'ȉ'), - (0x209, 'V'), - (0x20A, 'M', 'ȋ'), - (0x20B, 'V'), - (0x20C, 'M', 'ȍ'), - ] - -def _seg_5() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x20D, 'V'), - (0x20E, 'M', 'ȏ'), - (0x20F, 'V'), - (0x210, 'M', 'ȑ'), - (0x211, 'V'), - (0x212, 'M', 'ȓ'), - (0x213, 'V'), - (0x214, 'M', 'ȕ'), - (0x215, 'V'), - (0x216, 'M', 'ȗ'), - (0x217, 'V'), - (0x218, 'M', 'ș'), - (0x219, 'V'), - (0x21A, 'M', 'ț'), - (0x21B, 'V'), - (0x21C, 'M', 'ȝ'), - (0x21D, 'V'), - (0x21E, 'M', 'ȟ'), - (0x21F, 'V'), - (0x220, 'M', 'ƞ'), - (0x221, 'V'), - (0x222, 'M', 'ȣ'), - (0x223, 'V'), - (0x224, 'M', 'ȥ'), - (0x225, 'V'), - (0x226, 'M', 'ȧ'), - (0x227, 'V'), - (0x228, 'M', 'ȩ'), - (0x229, 'V'), - (0x22A, 'M', 'ȫ'), - (0x22B, 'V'), - (0x22C, 'M', 'ȭ'), - (0x22D, 'V'), - (0x22E, 'M', 'ȯ'), - (0x22F, 'V'), - (0x230, 'M', 'ȱ'), - (0x231, 'V'), - (0x232, 'M', 'ȳ'), - (0x233, 'V'), - (0x23A, 'M', 'ⱥ'), - (0x23B, 'M', 'ȼ'), - (0x23C, 'V'), - (0x23D, 'M', 'ƚ'), - (0x23E, 'M', 'ⱦ'), - (0x23F, 'V'), - (0x241, 'M', 'ɂ'), - (0x242, 'V'), - (0x243, 'M', 'ƀ'), - (0x244, 'M', 'ʉ'), - (0x245, 'M', 'ʌ'), - (0x246, 'M', 'ɇ'), - (0x247, 'V'), - (0x248, 'M', 'ɉ'), - (0x249, 'V'), - (0x24A, 'M', 'ɋ'), - (0x24B, 'V'), - (0x24C, 'M', 'ɍ'), - (0x24D, 'V'), - (0x24E, 'M', 'ɏ'), - (0x24F, 'V'), - (0x2B0, 'M', 'h'), - (0x2B1, 'M', 'ɦ'), - (0x2B2, 'M', 'j'), - (0x2B3, 'M', 'r'), - (0x2B4, 'M', 'ɹ'), - (0x2B5, 'M', 'ɻ'), - (0x2B6, 'M', 'ʁ'), - (0x2B7, 'M', 'w'), - (0x2B8, 'M', 'y'), - (0x2B9, 'V'), - (0x2D8, '3', ' ̆'), - (0x2D9, '3', ' ̇'), - (0x2DA, '3', ' ̊'), - (0x2DB, '3', ' ̨'), - (0x2DC, '3', ' ̃'), - (0x2DD, '3', ' ̋'), - (0x2DE, 'V'), - (0x2E0, 'M', 'ɣ'), - (0x2E1, 'M', 'l'), - (0x2E2, 'M', 's'), - (0x2E3, 'M', 'x'), - (0x2E4, 'M', 'ʕ'), - (0x2E5, 'V'), - (0x340, 'M', '̀'), - (0x341, 'M', '́'), - (0x342, 'V'), - (0x343, 'M', '̓'), - (0x344, 'M', '̈́'), - (0x345, 'M', 'ι'), - (0x346, 'V'), - (0x34F, 'I'), - (0x350, 'V'), - (0x370, 'M', 'ͱ'), - (0x371, 'V'), - (0x372, 'M', 'ͳ'), - (0x373, 'V'), - (0x374, 'M', 'ʹ'), - (0x375, 'V'), - (0x376, 'M', 'ͷ'), - (0x377, 'V'), - ] - -def _seg_6() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x378, 'X'), - (0x37A, '3', ' ι'), - (0x37B, 'V'), - (0x37E, '3', ';'), - (0x37F, 'M', 'ϳ'), - (0x380, 'X'), - (0x384, '3', ' ́'), - (0x385, '3', ' ̈́'), - (0x386, 'M', 'ά'), - (0x387, 'M', '·'), - (0x388, 'M', 'έ'), - (0x389, 'M', 'ή'), - (0x38A, 'M', 'ί'), - (0x38B, 'X'), - (0x38C, 'M', 'ό'), - (0x38D, 'X'), - (0x38E, 'M', 'ύ'), - (0x38F, 'M', 'ώ'), - (0x390, 'V'), - (0x391, 'M', 'α'), - (0x392, 'M', 'β'), - (0x393, 'M', 'γ'), - (0x394, 'M', 'δ'), - (0x395, 'M', 'ε'), - (0x396, 'M', 'ζ'), - (0x397, 'M', 'η'), - (0x398, 'M', 'θ'), - (0x399, 'M', 'ι'), - (0x39A, 'M', 'κ'), - (0x39B, 'M', 'λ'), - (0x39C, 'M', 'μ'), - (0x39D, 'M', 'ν'), - (0x39E, 'M', 'ξ'), - (0x39F, 'M', 'ο'), - (0x3A0, 'M', 'π'), - (0x3A1, 'M', 'ρ'), - (0x3A2, 'X'), - (0x3A3, 'M', 'σ'), - (0x3A4, 'M', 'τ'), - (0x3A5, 'M', 'υ'), - (0x3A6, 'M', 'φ'), - (0x3A7, 'M', 'χ'), - (0x3A8, 'M', 'ψ'), - (0x3A9, 'M', 'ω'), - (0x3AA, 'M', 'ϊ'), - (0x3AB, 'M', 'ϋ'), - (0x3AC, 'V'), - (0x3C2, 'D', 'σ'), - (0x3C3, 'V'), - (0x3CF, 'M', 'ϗ'), - (0x3D0, 'M', 'β'), - (0x3D1, 'M', 'θ'), - (0x3D2, 'M', 'υ'), - (0x3D3, 'M', 'ύ'), - (0x3D4, 'M', 'ϋ'), - (0x3D5, 'M', 'φ'), - (0x3D6, 'M', 'π'), - (0x3D7, 'V'), - (0x3D8, 'M', 'ϙ'), - (0x3D9, 'V'), - (0x3DA, 'M', 'ϛ'), - (0x3DB, 'V'), - (0x3DC, 'M', 'ϝ'), - (0x3DD, 'V'), - (0x3DE, 'M', 'ϟ'), - (0x3DF, 'V'), - (0x3E0, 'M', 'ϡ'), - (0x3E1, 'V'), - (0x3E2, 'M', 'ϣ'), - (0x3E3, 'V'), - (0x3E4, 'M', 'ϥ'), - (0x3E5, 'V'), - (0x3E6, 'M', 'ϧ'), - (0x3E7, 'V'), - (0x3E8, 'M', 'ϩ'), - (0x3E9, 'V'), - (0x3EA, 'M', 'ϫ'), - (0x3EB, 'V'), - (0x3EC, 'M', 'ϭ'), - (0x3ED, 'V'), - (0x3EE, 'M', 'ϯ'), - (0x3EF, 'V'), - (0x3F0, 'M', 'κ'), - (0x3F1, 'M', 'ρ'), - (0x3F2, 'M', 'σ'), - (0x3F3, 'V'), - (0x3F4, 'M', 'θ'), - (0x3F5, 'M', 'ε'), - (0x3F6, 'V'), - (0x3F7, 'M', 'ϸ'), - (0x3F8, 'V'), - (0x3F9, 'M', 'σ'), - (0x3FA, 'M', 'ϻ'), - (0x3FB, 'V'), - (0x3FD, 'M', 'ͻ'), - (0x3FE, 'M', 'ͼ'), - (0x3FF, 'M', 'ͽ'), - (0x400, 'M', 'ѐ'), - (0x401, 'M', 'ё'), - (0x402, 'M', 'ђ'), - ] - -def _seg_7() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x403, 'M', 'ѓ'), - (0x404, 'M', 'є'), - (0x405, 'M', 'ѕ'), - (0x406, 'M', 'і'), - (0x407, 'M', 'ї'), - (0x408, 'M', 'ј'), - (0x409, 'M', 'љ'), - (0x40A, 'M', 'њ'), - (0x40B, 'M', 'ћ'), - (0x40C, 'M', 'ќ'), - (0x40D, 'M', 'ѝ'), - (0x40E, 'M', 'ў'), - (0x40F, 'M', 'џ'), - (0x410, 'M', 'а'), - (0x411, 'M', 'б'), - (0x412, 'M', 'в'), - (0x413, 'M', 'г'), - (0x414, 'M', 'д'), - (0x415, 'M', 'е'), - (0x416, 'M', 'ж'), - (0x417, 'M', 'з'), - (0x418, 'M', 'и'), - (0x419, 'M', 'й'), - (0x41A, 'M', 'к'), - (0x41B, 'M', 'л'), - (0x41C, 'M', 'м'), - (0x41D, 'M', 'н'), - (0x41E, 'M', 'о'), - (0x41F, 'M', 'п'), - (0x420, 'M', 'р'), - (0x421, 'M', 'с'), - (0x422, 'M', 'т'), - (0x423, 'M', 'у'), - (0x424, 'M', 'ф'), - (0x425, 'M', 'х'), - (0x426, 'M', 'ц'), - (0x427, 'M', 'ч'), - (0x428, 'M', 'ш'), - (0x429, 'M', 'щ'), - (0x42A, 'M', 'ъ'), - (0x42B, 'M', 'ы'), - (0x42C, 'M', 'ь'), - (0x42D, 'M', 'э'), - (0x42E, 'M', 'ю'), - (0x42F, 'M', 'я'), - (0x430, 'V'), - (0x460, 'M', 'ѡ'), - (0x461, 'V'), - (0x462, 'M', 'ѣ'), - (0x463, 'V'), - (0x464, 'M', 'ѥ'), - (0x465, 'V'), - (0x466, 'M', 'ѧ'), - (0x467, 'V'), - (0x468, 'M', 'ѩ'), - (0x469, 'V'), - (0x46A, 'M', 'ѫ'), - (0x46B, 'V'), - (0x46C, 'M', 'ѭ'), - (0x46D, 'V'), - (0x46E, 'M', 'ѯ'), - (0x46F, 'V'), - (0x470, 'M', 'ѱ'), - (0x471, 'V'), - (0x472, 'M', 'ѳ'), - (0x473, 'V'), - (0x474, 'M', 'ѵ'), - (0x475, 'V'), - (0x476, 'M', 'ѷ'), - (0x477, 'V'), - (0x478, 'M', 'ѹ'), - (0x479, 'V'), - (0x47A, 'M', 'ѻ'), - (0x47B, 'V'), - (0x47C, 'M', 'ѽ'), - (0x47D, 'V'), - (0x47E, 'M', 'ѿ'), - (0x47F, 'V'), - (0x480, 'M', 'ҁ'), - (0x481, 'V'), - (0x48A, 'M', 'ҋ'), - (0x48B, 'V'), - (0x48C, 'M', 'ҍ'), - (0x48D, 'V'), - (0x48E, 'M', 'ҏ'), - (0x48F, 'V'), - (0x490, 'M', 'ґ'), - (0x491, 'V'), - (0x492, 'M', 'ғ'), - (0x493, 'V'), - (0x494, 'M', 'ҕ'), - (0x495, 'V'), - (0x496, 'M', 'җ'), - (0x497, 'V'), - (0x498, 'M', 'ҙ'), - (0x499, 'V'), - (0x49A, 'M', 'қ'), - (0x49B, 'V'), - (0x49C, 'M', 'ҝ'), - (0x49D, 'V'), - ] - -def _seg_8() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x49E, 'M', 'ҟ'), - (0x49F, 'V'), - (0x4A0, 'M', 'ҡ'), - (0x4A1, 'V'), - (0x4A2, 'M', 'ң'), - (0x4A3, 'V'), - (0x4A4, 'M', 'ҥ'), - (0x4A5, 'V'), - (0x4A6, 'M', 'ҧ'), - (0x4A7, 'V'), - (0x4A8, 'M', 'ҩ'), - (0x4A9, 'V'), - (0x4AA, 'M', 'ҫ'), - (0x4AB, 'V'), - (0x4AC, 'M', 'ҭ'), - (0x4AD, 'V'), - (0x4AE, 'M', 'ү'), - (0x4AF, 'V'), - (0x4B0, 'M', 'ұ'), - (0x4B1, 'V'), - (0x4B2, 'M', 'ҳ'), - (0x4B3, 'V'), - (0x4B4, 'M', 'ҵ'), - (0x4B5, 'V'), - (0x4B6, 'M', 'ҷ'), - (0x4B7, 'V'), - (0x4B8, 'M', 'ҹ'), - (0x4B9, 'V'), - (0x4BA, 'M', 'һ'), - (0x4BB, 'V'), - (0x4BC, 'M', 'ҽ'), - (0x4BD, 'V'), - (0x4BE, 'M', 'ҿ'), - (0x4BF, 'V'), - (0x4C0, 'X'), - (0x4C1, 'M', 'ӂ'), - (0x4C2, 'V'), - (0x4C3, 'M', 'ӄ'), - (0x4C4, 'V'), - (0x4C5, 'M', 'ӆ'), - (0x4C6, 'V'), - (0x4C7, 'M', 'ӈ'), - (0x4C8, 'V'), - (0x4C9, 'M', 'ӊ'), - (0x4CA, 'V'), - (0x4CB, 'M', 'ӌ'), - (0x4CC, 'V'), - (0x4CD, 'M', 'ӎ'), - (0x4CE, 'V'), - (0x4D0, 'M', 'ӑ'), - (0x4D1, 'V'), - (0x4D2, 'M', 'ӓ'), - (0x4D3, 'V'), - (0x4D4, 'M', 'ӕ'), - (0x4D5, 'V'), - (0x4D6, 'M', 'ӗ'), - (0x4D7, 'V'), - (0x4D8, 'M', 'ә'), - (0x4D9, 'V'), - (0x4DA, 'M', 'ӛ'), - (0x4DB, 'V'), - (0x4DC, 'M', 'ӝ'), - (0x4DD, 'V'), - (0x4DE, 'M', 'ӟ'), - (0x4DF, 'V'), - (0x4E0, 'M', 'ӡ'), - (0x4E1, 'V'), - (0x4E2, 'M', 'ӣ'), - (0x4E3, 'V'), - (0x4E4, 'M', 'ӥ'), - (0x4E5, 'V'), - (0x4E6, 'M', 'ӧ'), - (0x4E7, 'V'), - (0x4E8, 'M', 'ө'), - (0x4E9, 'V'), - (0x4EA, 'M', 'ӫ'), - (0x4EB, 'V'), - (0x4EC, 'M', 'ӭ'), - (0x4ED, 'V'), - (0x4EE, 'M', 'ӯ'), - (0x4EF, 'V'), - (0x4F0, 'M', 'ӱ'), - (0x4F1, 'V'), - (0x4F2, 'M', 'ӳ'), - (0x4F3, 'V'), - (0x4F4, 'M', 'ӵ'), - (0x4F5, 'V'), - (0x4F6, 'M', 'ӷ'), - (0x4F7, 'V'), - (0x4F8, 'M', 'ӹ'), - (0x4F9, 'V'), - (0x4FA, 'M', 'ӻ'), - (0x4FB, 'V'), - (0x4FC, 'M', 'ӽ'), - (0x4FD, 'V'), - (0x4FE, 'M', 'ӿ'), - (0x4FF, 'V'), - (0x500, 'M', 'ԁ'), - (0x501, 'V'), - (0x502, 'M', 'ԃ'), - ] - -def _seg_9() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x503, 'V'), - (0x504, 'M', 'ԅ'), - (0x505, 'V'), - (0x506, 'M', 'ԇ'), - (0x507, 'V'), - (0x508, 'M', 'ԉ'), - (0x509, 'V'), - (0x50A, 'M', 'ԋ'), - (0x50B, 'V'), - (0x50C, 'M', 'ԍ'), - (0x50D, 'V'), - (0x50E, 'M', 'ԏ'), - (0x50F, 'V'), - (0x510, 'M', 'ԑ'), - (0x511, 'V'), - (0x512, 'M', 'ԓ'), - (0x513, 'V'), - (0x514, 'M', 'ԕ'), - (0x515, 'V'), - (0x516, 'M', 'ԗ'), - (0x517, 'V'), - (0x518, 'M', 'ԙ'), - (0x519, 'V'), - (0x51A, 'M', 'ԛ'), - (0x51B, 'V'), - (0x51C, 'M', 'ԝ'), - (0x51D, 'V'), - (0x51E, 'M', 'ԟ'), - (0x51F, 'V'), - (0x520, 'M', 'ԡ'), - (0x521, 'V'), - (0x522, 'M', 'ԣ'), - (0x523, 'V'), - (0x524, 'M', 'ԥ'), - (0x525, 'V'), - (0x526, 'M', 'ԧ'), - (0x527, 'V'), - (0x528, 'M', 'ԩ'), - (0x529, 'V'), - (0x52A, 'M', 'ԫ'), - (0x52B, 'V'), - (0x52C, 'M', 'ԭ'), - (0x52D, 'V'), - (0x52E, 'M', 'ԯ'), - (0x52F, 'V'), - (0x530, 'X'), - (0x531, 'M', 'ա'), - (0x532, 'M', 'բ'), - (0x533, 'M', 'գ'), - (0x534, 'M', 'դ'), - (0x535, 'M', 'ե'), - (0x536, 'M', 'զ'), - (0x537, 'M', 'է'), - (0x538, 'M', 'ը'), - (0x539, 'M', 'թ'), - (0x53A, 'M', 'ժ'), - (0x53B, 'M', 'ի'), - (0x53C, 'M', 'լ'), - (0x53D, 'M', 'խ'), - (0x53E, 'M', 'ծ'), - (0x53F, 'M', 'կ'), - (0x540, 'M', 'հ'), - (0x541, 'M', 'ձ'), - (0x542, 'M', 'ղ'), - (0x543, 'M', 'ճ'), - (0x544, 'M', 'մ'), - (0x545, 'M', 'յ'), - (0x546, 'M', 'ն'), - (0x547, 'M', 'շ'), - (0x548, 'M', 'ո'), - (0x549, 'M', 'չ'), - (0x54A, 'M', 'պ'), - (0x54B, 'M', 'ջ'), - (0x54C, 'M', 'ռ'), - (0x54D, 'M', 'ս'), - (0x54E, 'M', 'վ'), - (0x54F, 'M', 'տ'), - (0x550, 'M', 'ր'), - (0x551, 'M', 'ց'), - (0x552, 'M', 'ւ'), - (0x553, 'M', 'փ'), - (0x554, 'M', 'ք'), - (0x555, 'M', 'օ'), - (0x556, 'M', 'ֆ'), - (0x557, 'X'), - (0x559, 'V'), - (0x587, 'M', 'եւ'), - (0x588, 'V'), - (0x58B, 'X'), - (0x58D, 'V'), - (0x590, 'X'), - (0x591, 'V'), - (0x5C8, 'X'), - (0x5D0, 'V'), - (0x5EB, 'X'), - (0x5EF, 'V'), - (0x5F5, 'X'), - (0x606, 'V'), - (0x61C, 'X'), - (0x61D, 'V'), - ] - -def _seg_10() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x675, 'M', 'اٴ'), - (0x676, 'M', 'وٴ'), - (0x677, 'M', 'ۇٴ'), - (0x678, 'M', 'يٴ'), - (0x679, 'V'), - (0x6DD, 'X'), - (0x6DE, 'V'), - (0x70E, 'X'), - (0x710, 'V'), - (0x74B, 'X'), - (0x74D, 'V'), - (0x7B2, 'X'), - (0x7C0, 'V'), - (0x7FB, 'X'), - (0x7FD, 'V'), - (0x82E, 'X'), - (0x830, 'V'), - (0x83F, 'X'), - (0x840, 'V'), - (0x85C, 'X'), - (0x85E, 'V'), - (0x85F, 'X'), - (0x860, 'V'), - (0x86B, 'X'), - (0x870, 'V'), - (0x88F, 'X'), - (0x898, 'V'), - (0x8E2, 'X'), - (0x8E3, 'V'), - (0x958, 'M', 'क़'), - (0x959, 'M', 'ख़'), - (0x95A, 'M', 'ग़'), - (0x95B, 'M', 'ज़'), - (0x95C, 'M', 'ड़'), - (0x95D, 'M', 'ढ़'), - (0x95E, 'M', 'फ़'), - (0x95F, 'M', 'य़'), - (0x960, 'V'), - (0x984, 'X'), - (0x985, 'V'), - (0x98D, 'X'), - (0x98F, 'V'), - (0x991, 'X'), - (0x993, 'V'), - (0x9A9, 'X'), - (0x9AA, 'V'), - (0x9B1, 'X'), - (0x9B2, 'V'), - (0x9B3, 'X'), - (0x9B6, 'V'), - (0x9BA, 'X'), - (0x9BC, 'V'), - (0x9C5, 'X'), - (0x9C7, 'V'), - (0x9C9, 'X'), - (0x9CB, 'V'), - (0x9CF, 'X'), - (0x9D7, 'V'), - (0x9D8, 'X'), - (0x9DC, 'M', 'ড়'), - (0x9DD, 'M', 'ঢ়'), - (0x9DE, 'X'), - (0x9DF, 'M', 'য়'), - (0x9E0, 'V'), - (0x9E4, 'X'), - (0x9E6, 'V'), - (0x9FF, 'X'), - (0xA01, 'V'), - (0xA04, 'X'), - (0xA05, 'V'), - (0xA0B, 'X'), - (0xA0F, 'V'), - (0xA11, 'X'), - (0xA13, 'V'), - (0xA29, 'X'), - (0xA2A, 'V'), - (0xA31, 'X'), - (0xA32, 'V'), - (0xA33, 'M', 'ਲ਼'), - (0xA34, 'X'), - (0xA35, 'V'), - (0xA36, 'M', 'ਸ਼'), - (0xA37, 'X'), - (0xA38, 'V'), - (0xA3A, 'X'), - (0xA3C, 'V'), - (0xA3D, 'X'), - (0xA3E, 'V'), - (0xA43, 'X'), - (0xA47, 'V'), - (0xA49, 'X'), - (0xA4B, 'V'), - (0xA4E, 'X'), - (0xA51, 'V'), - (0xA52, 'X'), - (0xA59, 'M', 'ਖ਼'), - (0xA5A, 'M', 'ਗ਼'), - (0xA5B, 'M', 'ਜ਼'), - (0xA5C, 'V'), - (0xA5D, 'X'), - ] - -def _seg_11() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xA5E, 'M', 'ਫ਼'), - (0xA5F, 'X'), - (0xA66, 'V'), - (0xA77, 'X'), - (0xA81, 'V'), - (0xA84, 'X'), - (0xA85, 'V'), - (0xA8E, 'X'), - (0xA8F, 'V'), - (0xA92, 'X'), - (0xA93, 'V'), - (0xAA9, 'X'), - (0xAAA, 'V'), - (0xAB1, 'X'), - (0xAB2, 'V'), - (0xAB4, 'X'), - (0xAB5, 'V'), - (0xABA, 'X'), - (0xABC, 'V'), - (0xAC6, 'X'), - (0xAC7, 'V'), - (0xACA, 'X'), - (0xACB, 'V'), - (0xACE, 'X'), - (0xAD0, 'V'), - (0xAD1, 'X'), - (0xAE0, 'V'), - (0xAE4, 'X'), - (0xAE6, 'V'), - (0xAF2, 'X'), - (0xAF9, 'V'), - (0xB00, 'X'), - (0xB01, 'V'), - (0xB04, 'X'), - (0xB05, 'V'), - (0xB0D, 'X'), - (0xB0F, 'V'), - (0xB11, 'X'), - (0xB13, 'V'), - (0xB29, 'X'), - (0xB2A, 'V'), - (0xB31, 'X'), - (0xB32, 'V'), - (0xB34, 'X'), - (0xB35, 'V'), - (0xB3A, 'X'), - (0xB3C, 'V'), - (0xB45, 'X'), - (0xB47, 'V'), - (0xB49, 'X'), - (0xB4B, 'V'), - (0xB4E, 'X'), - (0xB55, 'V'), - (0xB58, 'X'), - (0xB5C, 'M', 'ଡ଼'), - (0xB5D, 'M', 'ଢ଼'), - (0xB5E, 'X'), - (0xB5F, 'V'), - (0xB64, 'X'), - (0xB66, 'V'), - (0xB78, 'X'), - (0xB82, 'V'), - (0xB84, 'X'), - (0xB85, 'V'), - (0xB8B, 'X'), - (0xB8E, 'V'), - (0xB91, 'X'), - (0xB92, 'V'), - (0xB96, 'X'), - (0xB99, 'V'), - (0xB9B, 'X'), - (0xB9C, 'V'), - (0xB9D, 'X'), - (0xB9E, 'V'), - (0xBA0, 'X'), - (0xBA3, 'V'), - (0xBA5, 'X'), - (0xBA8, 'V'), - (0xBAB, 'X'), - (0xBAE, 'V'), - (0xBBA, 'X'), - (0xBBE, 'V'), - (0xBC3, 'X'), - (0xBC6, 'V'), - (0xBC9, 'X'), - (0xBCA, 'V'), - (0xBCE, 'X'), - (0xBD0, 'V'), - (0xBD1, 'X'), - (0xBD7, 'V'), - (0xBD8, 'X'), - (0xBE6, 'V'), - (0xBFB, 'X'), - (0xC00, 'V'), - (0xC0D, 'X'), - (0xC0E, 'V'), - (0xC11, 'X'), - (0xC12, 'V'), - (0xC29, 'X'), - (0xC2A, 'V'), - ] - -def _seg_12() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xC3A, 'X'), - (0xC3C, 'V'), - (0xC45, 'X'), - (0xC46, 'V'), - (0xC49, 'X'), - (0xC4A, 'V'), - (0xC4E, 'X'), - (0xC55, 'V'), - (0xC57, 'X'), - (0xC58, 'V'), - (0xC5B, 'X'), - (0xC5D, 'V'), - (0xC5E, 'X'), - (0xC60, 'V'), - (0xC64, 'X'), - (0xC66, 'V'), - (0xC70, 'X'), - (0xC77, 'V'), - (0xC8D, 'X'), - (0xC8E, 'V'), - (0xC91, 'X'), - (0xC92, 'V'), - (0xCA9, 'X'), - (0xCAA, 'V'), - (0xCB4, 'X'), - (0xCB5, 'V'), - (0xCBA, 'X'), - (0xCBC, 'V'), - (0xCC5, 'X'), - (0xCC6, 'V'), - (0xCC9, 'X'), - (0xCCA, 'V'), - (0xCCE, 'X'), - (0xCD5, 'V'), - (0xCD7, 'X'), - (0xCDD, 'V'), - (0xCDF, 'X'), - (0xCE0, 'V'), - (0xCE4, 'X'), - (0xCE6, 'V'), - (0xCF0, 'X'), - (0xCF1, 'V'), - (0xCF4, 'X'), - (0xD00, 'V'), - (0xD0D, 'X'), - (0xD0E, 'V'), - (0xD11, 'X'), - (0xD12, 'V'), - (0xD45, 'X'), - (0xD46, 'V'), - (0xD49, 'X'), - (0xD4A, 'V'), - (0xD50, 'X'), - (0xD54, 'V'), - (0xD64, 'X'), - (0xD66, 'V'), - (0xD80, 'X'), - (0xD81, 'V'), - (0xD84, 'X'), - (0xD85, 'V'), - (0xD97, 'X'), - (0xD9A, 'V'), - (0xDB2, 'X'), - (0xDB3, 'V'), - (0xDBC, 'X'), - (0xDBD, 'V'), - (0xDBE, 'X'), - (0xDC0, 'V'), - (0xDC7, 'X'), - (0xDCA, 'V'), - (0xDCB, 'X'), - (0xDCF, 'V'), - (0xDD5, 'X'), - (0xDD6, 'V'), - (0xDD7, 'X'), - (0xDD8, 'V'), - (0xDE0, 'X'), - (0xDE6, 'V'), - (0xDF0, 'X'), - (0xDF2, 'V'), - (0xDF5, 'X'), - (0xE01, 'V'), - (0xE33, 'M', 'ํา'), - (0xE34, 'V'), - (0xE3B, 'X'), - (0xE3F, 'V'), - (0xE5C, 'X'), - (0xE81, 'V'), - (0xE83, 'X'), - (0xE84, 'V'), - (0xE85, 'X'), - (0xE86, 'V'), - (0xE8B, 'X'), - (0xE8C, 'V'), - (0xEA4, 'X'), - (0xEA5, 'V'), - (0xEA6, 'X'), - (0xEA7, 'V'), - (0xEB3, 'M', 'ໍາ'), - (0xEB4, 'V'), - ] - -def _seg_13() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xEBE, 'X'), - (0xEC0, 'V'), - (0xEC5, 'X'), - (0xEC6, 'V'), - (0xEC7, 'X'), - (0xEC8, 'V'), - (0xECF, 'X'), - (0xED0, 'V'), - (0xEDA, 'X'), - (0xEDC, 'M', 'ຫນ'), - (0xEDD, 'M', 'ຫມ'), - (0xEDE, 'V'), - (0xEE0, 'X'), - (0xF00, 'V'), - (0xF0C, 'M', '་'), - (0xF0D, 'V'), - (0xF43, 'M', 'གྷ'), - (0xF44, 'V'), - (0xF48, 'X'), - (0xF49, 'V'), - (0xF4D, 'M', 'ཌྷ'), - (0xF4E, 'V'), - (0xF52, 'M', 'དྷ'), - (0xF53, 'V'), - (0xF57, 'M', 'བྷ'), - (0xF58, 'V'), - (0xF5C, 'M', 'ཛྷ'), - (0xF5D, 'V'), - (0xF69, 'M', 'ཀྵ'), - (0xF6A, 'V'), - (0xF6D, 'X'), - (0xF71, 'V'), - (0xF73, 'M', 'ཱི'), - (0xF74, 'V'), - (0xF75, 'M', 'ཱུ'), - (0xF76, 'M', 'ྲྀ'), - (0xF77, 'M', 'ྲཱྀ'), - (0xF78, 'M', 'ླྀ'), - (0xF79, 'M', 'ླཱྀ'), - (0xF7A, 'V'), - (0xF81, 'M', 'ཱྀ'), - (0xF82, 'V'), - (0xF93, 'M', 'ྒྷ'), - (0xF94, 'V'), - (0xF98, 'X'), - (0xF99, 'V'), - (0xF9D, 'M', 'ྜྷ'), - (0xF9E, 'V'), - (0xFA2, 'M', 'ྡྷ'), - (0xFA3, 'V'), - (0xFA7, 'M', 'ྦྷ'), - (0xFA8, 'V'), - (0xFAC, 'M', 'ྫྷ'), - (0xFAD, 'V'), - (0xFB9, 'M', 'ྐྵ'), - (0xFBA, 'V'), - (0xFBD, 'X'), - (0xFBE, 'V'), - (0xFCD, 'X'), - (0xFCE, 'V'), - (0xFDB, 'X'), - (0x1000, 'V'), - (0x10A0, 'X'), - (0x10C7, 'M', 'ⴧ'), - (0x10C8, 'X'), - (0x10CD, 'M', 'ⴭ'), - (0x10CE, 'X'), - (0x10D0, 'V'), - (0x10FC, 'M', 'ნ'), - (0x10FD, 'V'), - (0x115F, 'X'), - (0x1161, 'V'), - (0x1249, 'X'), - (0x124A, 'V'), - (0x124E, 'X'), - (0x1250, 'V'), - (0x1257, 'X'), - (0x1258, 'V'), - (0x1259, 'X'), - (0x125A, 'V'), - (0x125E, 'X'), - (0x1260, 'V'), - (0x1289, 'X'), - (0x128A, 'V'), - (0x128E, 'X'), - (0x1290, 'V'), - (0x12B1, 'X'), - (0x12B2, 'V'), - (0x12B6, 'X'), - (0x12B8, 'V'), - (0x12BF, 'X'), - (0x12C0, 'V'), - (0x12C1, 'X'), - (0x12C2, 'V'), - (0x12C6, 'X'), - (0x12C8, 'V'), - (0x12D7, 'X'), - (0x12D8, 'V'), - (0x1311, 'X'), - (0x1312, 'V'), - ] - -def _seg_14() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1316, 'X'), - (0x1318, 'V'), - (0x135B, 'X'), - (0x135D, 'V'), - (0x137D, 'X'), - (0x1380, 'V'), - (0x139A, 'X'), - (0x13A0, 'V'), - (0x13F6, 'X'), - (0x13F8, 'M', 'Ᏸ'), - (0x13F9, 'M', 'Ᏹ'), - (0x13FA, 'M', 'Ᏺ'), - (0x13FB, 'M', 'Ᏻ'), - (0x13FC, 'M', 'Ᏼ'), - (0x13FD, 'M', 'Ᏽ'), - (0x13FE, 'X'), - (0x1400, 'V'), - (0x1680, 'X'), - (0x1681, 'V'), - (0x169D, 'X'), - (0x16A0, 'V'), - (0x16F9, 'X'), - (0x1700, 'V'), - (0x1716, 'X'), - (0x171F, 'V'), - (0x1737, 'X'), - (0x1740, 'V'), - (0x1754, 'X'), - (0x1760, 'V'), - (0x176D, 'X'), - (0x176E, 'V'), - (0x1771, 'X'), - (0x1772, 'V'), - (0x1774, 'X'), - (0x1780, 'V'), - (0x17B4, 'X'), - (0x17B6, 'V'), - (0x17DE, 'X'), - (0x17E0, 'V'), - (0x17EA, 'X'), - (0x17F0, 'V'), - (0x17FA, 'X'), - (0x1800, 'V'), - (0x1806, 'X'), - (0x1807, 'V'), - (0x180B, 'I'), - (0x180E, 'X'), - (0x180F, 'I'), - (0x1810, 'V'), - (0x181A, 'X'), - (0x1820, 'V'), - (0x1879, 'X'), - (0x1880, 'V'), - (0x18AB, 'X'), - (0x18B0, 'V'), - (0x18F6, 'X'), - (0x1900, 'V'), - (0x191F, 'X'), - (0x1920, 'V'), - (0x192C, 'X'), - (0x1930, 'V'), - (0x193C, 'X'), - (0x1940, 'V'), - (0x1941, 'X'), - (0x1944, 'V'), - (0x196E, 'X'), - (0x1970, 'V'), - (0x1975, 'X'), - (0x1980, 'V'), - (0x19AC, 'X'), - (0x19B0, 'V'), - (0x19CA, 'X'), - (0x19D0, 'V'), - (0x19DB, 'X'), - (0x19DE, 'V'), - (0x1A1C, 'X'), - (0x1A1E, 'V'), - (0x1A5F, 'X'), - (0x1A60, 'V'), - (0x1A7D, 'X'), - (0x1A7F, 'V'), - (0x1A8A, 'X'), - (0x1A90, 'V'), - (0x1A9A, 'X'), - (0x1AA0, 'V'), - (0x1AAE, 'X'), - (0x1AB0, 'V'), - (0x1ACF, 'X'), - (0x1B00, 'V'), - (0x1B4D, 'X'), - (0x1B50, 'V'), - (0x1B7F, 'X'), - (0x1B80, 'V'), - (0x1BF4, 'X'), - (0x1BFC, 'V'), - (0x1C38, 'X'), - (0x1C3B, 'V'), - (0x1C4A, 'X'), - (0x1C4D, 'V'), - (0x1C80, 'M', 'в'), - ] - -def _seg_15() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1C81, 'M', 'д'), - (0x1C82, 'M', 'о'), - (0x1C83, 'M', 'с'), - (0x1C84, 'M', 'т'), - (0x1C86, 'M', 'ъ'), - (0x1C87, 'M', 'ѣ'), - (0x1C88, 'M', 'ꙋ'), - (0x1C89, 'X'), - (0x1C90, 'M', 'ა'), - (0x1C91, 'M', 'ბ'), - (0x1C92, 'M', 'გ'), - (0x1C93, 'M', 'დ'), - (0x1C94, 'M', 'ე'), - (0x1C95, 'M', 'ვ'), - (0x1C96, 'M', 'ზ'), - (0x1C97, 'M', 'თ'), - (0x1C98, 'M', 'ი'), - (0x1C99, 'M', 'კ'), - (0x1C9A, 'M', 'ლ'), - (0x1C9B, 'M', 'მ'), - (0x1C9C, 'M', 'ნ'), - (0x1C9D, 'M', 'ო'), - (0x1C9E, 'M', 'პ'), - (0x1C9F, 'M', 'ჟ'), - (0x1CA0, 'M', 'რ'), - (0x1CA1, 'M', 'ს'), - (0x1CA2, 'M', 'ტ'), - (0x1CA3, 'M', 'უ'), - (0x1CA4, 'M', 'ფ'), - (0x1CA5, 'M', 'ქ'), - (0x1CA6, 'M', 'ღ'), - (0x1CA7, 'M', 'ყ'), - (0x1CA8, 'M', 'შ'), - (0x1CA9, 'M', 'ჩ'), - (0x1CAA, 'M', 'ც'), - (0x1CAB, 'M', 'ძ'), - (0x1CAC, 'M', 'წ'), - (0x1CAD, 'M', 'ჭ'), - (0x1CAE, 'M', 'ხ'), - (0x1CAF, 'M', 'ჯ'), - (0x1CB0, 'M', 'ჰ'), - (0x1CB1, 'M', 'ჱ'), - (0x1CB2, 'M', 'ჲ'), - (0x1CB3, 'M', 'ჳ'), - (0x1CB4, 'M', 'ჴ'), - (0x1CB5, 'M', 'ჵ'), - (0x1CB6, 'M', 'ჶ'), - (0x1CB7, 'M', 'ჷ'), - (0x1CB8, 'M', 'ჸ'), - (0x1CB9, 'M', 'ჹ'), - (0x1CBA, 'M', 'ჺ'), - (0x1CBB, 'X'), - (0x1CBD, 'M', 'ჽ'), - (0x1CBE, 'M', 'ჾ'), - (0x1CBF, 'M', 'ჿ'), - (0x1CC0, 'V'), - (0x1CC8, 'X'), - (0x1CD0, 'V'), - (0x1CFB, 'X'), - (0x1D00, 'V'), - (0x1D2C, 'M', 'a'), - (0x1D2D, 'M', 'æ'), - (0x1D2E, 'M', 'b'), - (0x1D2F, 'V'), - (0x1D30, 'M', 'd'), - (0x1D31, 'M', 'e'), - (0x1D32, 'M', 'ǝ'), - (0x1D33, 'M', 'g'), - (0x1D34, 'M', 'h'), - (0x1D35, 'M', 'i'), - (0x1D36, 'M', 'j'), - (0x1D37, 'M', 'k'), - (0x1D38, 'M', 'l'), - (0x1D39, 'M', 'm'), - (0x1D3A, 'M', 'n'), - (0x1D3B, 'V'), - (0x1D3C, 'M', 'o'), - (0x1D3D, 'M', 'ȣ'), - (0x1D3E, 'M', 'p'), - (0x1D3F, 'M', 'r'), - (0x1D40, 'M', 't'), - (0x1D41, 'M', 'u'), - (0x1D42, 'M', 'w'), - (0x1D43, 'M', 'a'), - (0x1D44, 'M', 'ɐ'), - (0x1D45, 'M', 'ɑ'), - (0x1D46, 'M', 'ᴂ'), - (0x1D47, 'M', 'b'), - (0x1D48, 'M', 'd'), - (0x1D49, 'M', 'e'), - (0x1D4A, 'M', 'ə'), - (0x1D4B, 'M', 'ɛ'), - (0x1D4C, 'M', 'ɜ'), - (0x1D4D, 'M', 'g'), - (0x1D4E, 'V'), - (0x1D4F, 'M', 'k'), - (0x1D50, 'M', 'm'), - (0x1D51, 'M', 'ŋ'), - (0x1D52, 'M', 'o'), - (0x1D53, 'M', 'ɔ'), - ] - -def _seg_16() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D54, 'M', 'ᴖ'), - (0x1D55, 'M', 'ᴗ'), - (0x1D56, 'M', 'p'), - (0x1D57, 'M', 't'), - (0x1D58, 'M', 'u'), - (0x1D59, 'M', 'ᴝ'), - (0x1D5A, 'M', 'ɯ'), - (0x1D5B, 'M', 'v'), - (0x1D5C, 'M', 'ᴥ'), - (0x1D5D, 'M', 'β'), - (0x1D5E, 'M', 'γ'), - (0x1D5F, 'M', 'δ'), - (0x1D60, 'M', 'φ'), - (0x1D61, 'M', 'χ'), - (0x1D62, 'M', 'i'), - (0x1D63, 'M', 'r'), - (0x1D64, 'M', 'u'), - (0x1D65, 'M', 'v'), - (0x1D66, 'M', 'β'), - (0x1D67, 'M', 'γ'), - (0x1D68, 'M', 'ρ'), - (0x1D69, 'M', 'φ'), - (0x1D6A, 'M', 'χ'), - (0x1D6B, 'V'), - (0x1D78, 'M', 'н'), - (0x1D79, 'V'), - (0x1D9B, 'M', 'ɒ'), - (0x1D9C, 'M', 'c'), - (0x1D9D, 'M', 'ɕ'), - (0x1D9E, 'M', 'ð'), - (0x1D9F, 'M', 'ɜ'), - (0x1DA0, 'M', 'f'), - (0x1DA1, 'M', 'ɟ'), - (0x1DA2, 'M', 'ɡ'), - (0x1DA3, 'M', 'ɥ'), - (0x1DA4, 'M', 'ɨ'), - (0x1DA5, 'M', 'ɩ'), - (0x1DA6, 'M', 'ɪ'), - (0x1DA7, 'M', 'ᵻ'), - (0x1DA8, 'M', 'ʝ'), - (0x1DA9, 'M', 'ɭ'), - (0x1DAA, 'M', 'ᶅ'), - (0x1DAB, 'M', 'ʟ'), - (0x1DAC, 'M', 'ɱ'), - (0x1DAD, 'M', 'ɰ'), - (0x1DAE, 'M', 'ɲ'), - (0x1DAF, 'M', 'ɳ'), - (0x1DB0, 'M', 'ɴ'), - (0x1DB1, 'M', 'ɵ'), - (0x1DB2, 'M', 'ɸ'), - (0x1DB3, 'M', 'ʂ'), - (0x1DB4, 'M', 'ʃ'), - (0x1DB5, 'M', 'ƫ'), - (0x1DB6, 'M', 'ʉ'), - (0x1DB7, 'M', 'ʊ'), - (0x1DB8, 'M', 'ᴜ'), - (0x1DB9, 'M', 'ʋ'), - (0x1DBA, 'M', 'ʌ'), - (0x1DBB, 'M', 'z'), - (0x1DBC, 'M', 'ʐ'), - (0x1DBD, 'M', 'ʑ'), - (0x1DBE, 'M', 'ʒ'), - (0x1DBF, 'M', 'θ'), - (0x1DC0, 'V'), - (0x1E00, 'M', 'ḁ'), - (0x1E01, 'V'), - (0x1E02, 'M', 'ḃ'), - (0x1E03, 'V'), - (0x1E04, 'M', 'ḅ'), - (0x1E05, 'V'), - (0x1E06, 'M', 'ḇ'), - (0x1E07, 'V'), - (0x1E08, 'M', 'ḉ'), - (0x1E09, 'V'), - (0x1E0A, 'M', 'ḋ'), - (0x1E0B, 'V'), - (0x1E0C, 'M', 'ḍ'), - (0x1E0D, 'V'), - (0x1E0E, 'M', 'ḏ'), - (0x1E0F, 'V'), - (0x1E10, 'M', 'ḑ'), - (0x1E11, 'V'), - (0x1E12, 'M', 'ḓ'), - (0x1E13, 'V'), - (0x1E14, 'M', 'ḕ'), - (0x1E15, 'V'), - (0x1E16, 'M', 'ḗ'), - (0x1E17, 'V'), - (0x1E18, 'M', 'ḙ'), - (0x1E19, 'V'), - (0x1E1A, 'M', 'ḛ'), - (0x1E1B, 'V'), - (0x1E1C, 'M', 'ḝ'), - (0x1E1D, 'V'), - (0x1E1E, 'M', 'ḟ'), - (0x1E1F, 'V'), - (0x1E20, 'M', 'ḡ'), - (0x1E21, 'V'), - (0x1E22, 'M', 'ḣ'), - (0x1E23, 'V'), - ] - -def _seg_17() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1E24, 'M', 'ḥ'), - (0x1E25, 'V'), - (0x1E26, 'M', 'ḧ'), - (0x1E27, 'V'), - (0x1E28, 'M', 'ḩ'), - (0x1E29, 'V'), - (0x1E2A, 'M', 'ḫ'), - (0x1E2B, 'V'), - (0x1E2C, 'M', 'ḭ'), - (0x1E2D, 'V'), - (0x1E2E, 'M', 'ḯ'), - (0x1E2F, 'V'), - (0x1E30, 'M', 'ḱ'), - (0x1E31, 'V'), - (0x1E32, 'M', 'ḳ'), - (0x1E33, 'V'), - (0x1E34, 'M', 'ḵ'), - (0x1E35, 'V'), - (0x1E36, 'M', 'ḷ'), - (0x1E37, 'V'), - (0x1E38, 'M', 'ḹ'), - (0x1E39, 'V'), - (0x1E3A, 'M', 'ḻ'), - (0x1E3B, 'V'), - (0x1E3C, 'M', 'ḽ'), - (0x1E3D, 'V'), - (0x1E3E, 'M', 'ḿ'), - (0x1E3F, 'V'), - (0x1E40, 'M', 'ṁ'), - (0x1E41, 'V'), - (0x1E42, 'M', 'ṃ'), - (0x1E43, 'V'), - (0x1E44, 'M', 'ṅ'), - (0x1E45, 'V'), - (0x1E46, 'M', 'ṇ'), - (0x1E47, 'V'), - (0x1E48, 'M', 'ṉ'), - (0x1E49, 'V'), - (0x1E4A, 'M', 'ṋ'), - (0x1E4B, 'V'), - (0x1E4C, 'M', 'ṍ'), - (0x1E4D, 'V'), - (0x1E4E, 'M', 'ṏ'), - (0x1E4F, 'V'), - (0x1E50, 'M', 'ṑ'), - (0x1E51, 'V'), - (0x1E52, 'M', 'ṓ'), - (0x1E53, 'V'), - (0x1E54, 'M', 'ṕ'), - (0x1E55, 'V'), - (0x1E56, 'M', 'ṗ'), - (0x1E57, 'V'), - (0x1E58, 'M', 'ṙ'), - (0x1E59, 'V'), - (0x1E5A, 'M', 'ṛ'), - (0x1E5B, 'V'), - (0x1E5C, 'M', 'ṝ'), - (0x1E5D, 'V'), - (0x1E5E, 'M', 'ṟ'), - (0x1E5F, 'V'), - (0x1E60, 'M', 'ṡ'), - (0x1E61, 'V'), - (0x1E62, 'M', 'ṣ'), - (0x1E63, 'V'), - (0x1E64, 'M', 'ṥ'), - (0x1E65, 'V'), - (0x1E66, 'M', 'ṧ'), - (0x1E67, 'V'), - (0x1E68, 'M', 'ṩ'), - (0x1E69, 'V'), - (0x1E6A, 'M', 'ṫ'), - (0x1E6B, 'V'), - (0x1E6C, 'M', 'ṭ'), - (0x1E6D, 'V'), - (0x1E6E, 'M', 'ṯ'), - (0x1E6F, 'V'), - (0x1E70, 'M', 'ṱ'), - (0x1E71, 'V'), - (0x1E72, 'M', 'ṳ'), - (0x1E73, 'V'), - (0x1E74, 'M', 'ṵ'), - (0x1E75, 'V'), - (0x1E76, 'M', 'ṷ'), - (0x1E77, 'V'), - (0x1E78, 'M', 'ṹ'), - (0x1E79, 'V'), - (0x1E7A, 'M', 'ṻ'), - (0x1E7B, 'V'), - (0x1E7C, 'M', 'ṽ'), - (0x1E7D, 'V'), - (0x1E7E, 'M', 'ṿ'), - (0x1E7F, 'V'), - (0x1E80, 'M', 'ẁ'), - (0x1E81, 'V'), - (0x1E82, 'M', 'ẃ'), - (0x1E83, 'V'), - (0x1E84, 'M', 'ẅ'), - (0x1E85, 'V'), - (0x1E86, 'M', 'ẇ'), - (0x1E87, 'V'), - ] - -def _seg_18() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1E88, 'M', 'ẉ'), - (0x1E89, 'V'), - (0x1E8A, 'M', 'ẋ'), - (0x1E8B, 'V'), - (0x1E8C, 'M', 'ẍ'), - (0x1E8D, 'V'), - (0x1E8E, 'M', 'ẏ'), - (0x1E8F, 'V'), - (0x1E90, 'M', 'ẑ'), - (0x1E91, 'V'), - (0x1E92, 'M', 'ẓ'), - (0x1E93, 'V'), - (0x1E94, 'M', 'ẕ'), - (0x1E95, 'V'), - (0x1E9A, 'M', 'aʾ'), - (0x1E9B, 'M', 'ṡ'), - (0x1E9C, 'V'), - (0x1E9E, 'M', 'ss'), - (0x1E9F, 'V'), - (0x1EA0, 'M', 'ạ'), - (0x1EA1, 'V'), - (0x1EA2, 'M', 'ả'), - (0x1EA3, 'V'), - (0x1EA4, 'M', 'ấ'), - (0x1EA5, 'V'), - (0x1EA6, 'M', 'ầ'), - (0x1EA7, 'V'), - (0x1EA8, 'M', 'ẩ'), - (0x1EA9, 'V'), - (0x1EAA, 'M', 'ẫ'), - (0x1EAB, 'V'), - (0x1EAC, 'M', 'ậ'), - (0x1EAD, 'V'), - (0x1EAE, 'M', 'ắ'), - (0x1EAF, 'V'), - (0x1EB0, 'M', 'ằ'), - (0x1EB1, 'V'), - (0x1EB2, 'M', 'ẳ'), - (0x1EB3, 'V'), - (0x1EB4, 'M', 'ẵ'), - (0x1EB5, 'V'), - (0x1EB6, 'M', 'ặ'), - (0x1EB7, 'V'), - (0x1EB8, 'M', 'ẹ'), - (0x1EB9, 'V'), - (0x1EBA, 'M', 'ẻ'), - (0x1EBB, 'V'), - (0x1EBC, 'M', 'ẽ'), - (0x1EBD, 'V'), - (0x1EBE, 'M', 'ế'), - (0x1EBF, 'V'), - (0x1EC0, 'M', 'ề'), - (0x1EC1, 'V'), - (0x1EC2, 'M', 'ể'), - (0x1EC3, 'V'), - (0x1EC4, 'M', 'ễ'), - (0x1EC5, 'V'), - (0x1EC6, 'M', 'ệ'), - (0x1EC7, 'V'), - (0x1EC8, 'M', 'ỉ'), - (0x1EC9, 'V'), - (0x1ECA, 'M', 'ị'), - (0x1ECB, 'V'), - (0x1ECC, 'M', 'ọ'), - (0x1ECD, 'V'), - (0x1ECE, 'M', 'ỏ'), - (0x1ECF, 'V'), - (0x1ED0, 'M', 'ố'), - (0x1ED1, 'V'), - (0x1ED2, 'M', 'ồ'), - (0x1ED3, 'V'), - (0x1ED4, 'M', 'ổ'), - (0x1ED5, 'V'), - (0x1ED6, 'M', 'ỗ'), - (0x1ED7, 'V'), - (0x1ED8, 'M', 'ộ'), - (0x1ED9, 'V'), - (0x1EDA, 'M', 'ớ'), - (0x1EDB, 'V'), - (0x1EDC, 'M', 'ờ'), - (0x1EDD, 'V'), - (0x1EDE, 'M', 'ở'), - (0x1EDF, 'V'), - (0x1EE0, 'M', 'ỡ'), - (0x1EE1, 'V'), - (0x1EE2, 'M', 'ợ'), - (0x1EE3, 'V'), - (0x1EE4, 'M', 'ụ'), - (0x1EE5, 'V'), - (0x1EE6, 'M', 'ủ'), - (0x1EE7, 'V'), - (0x1EE8, 'M', 'ứ'), - (0x1EE9, 'V'), - (0x1EEA, 'M', 'ừ'), - (0x1EEB, 'V'), - (0x1EEC, 'M', 'ử'), - (0x1EED, 'V'), - (0x1EEE, 'M', 'ữ'), - (0x1EEF, 'V'), - (0x1EF0, 'M', 'ự'), - ] - -def _seg_19() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1EF1, 'V'), - (0x1EF2, 'M', 'ỳ'), - (0x1EF3, 'V'), - (0x1EF4, 'M', 'ỵ'), - (0x1EF5, 'V'), - (0x1EF6, 'M', 'ỷ'), - (0x1EF7, 'V'), - (0x1EF8, 'M', 'ỹ'), - (0x1EF9, 'V'), - (0x1EFA, 'M', 'ỻ'), - (0x1EFB, 'V'), - (0x1EFC, 'M', 'ỽ'), - (0x1EFD, 'V'), - (0x1EFE, 'M', 'ỿ'), - (0x1EFF, 'V'), - (0x1F08, 'M', 'ἀ'), - (0x1F09, 'M', 'ἁ'), - (0x1F0A, 'M', 'ἂ'), - (0x1F0B, 'M', 'ἃ'), - (0x1F0C, 'M', 'ἄ'), - (0x1F0D, 'M', 'ἅ'), - (0x1F0E, 'M', 'ἆ'), - (0x1F0F, 'M', 'ἇ'), - (0x1F10, 'V'), - (0x1F16, 'X'), - (0x1F18, 'M', 'ἐ'), - (0x1F19, 'M', 'ἑ'), - (0x1F1A, 'M', 'ἒ'), - (0x1F1B, 'M', 'ἓ'), - (0x1F1C, 'M', 'ἔ'), - (0x1F1D, 'M', 'ἕ'), - (0x1F1E, 'X'), - (0x1F20, 'V'), - (0x1F28, 'M', 'ἠ'), - (0x1F29, 'M', 'ἡ'), - (0x1F2A, 'M', 'ἢ'), - (0x1F2B, 'M', 'ἣ'), - (0x1F2C, 'M', 'ἤ'), - (0x1F2D, 'M', 'ἥ'), - (0x1F2E, 'M', 'ἦ'), - (0x1F2F, 'M', 'ἧ'), - (0x1F30, 'V'), - (0x1F38, 'M', 'ἰ'), - (0x1F39, 'M', 'ἱ'), - (0x1F3A, 'M', 'ἲ'), - (0x1F3B, 'M', 'ἳ'), - (0x1F3C, 'M', 'ἴ'), - (0x1F3D, 'M', 'ἵ'), - (0x1F3E, 'M', 'ἶ'), - (0x1F3F, 'M', 'ἷ'), - (0x1F40, 'V'), - (0x1F46, 'X'), - (0x1F48, 'M', 'ὀ'), - (0x1F49, 'M', 'ὁ'), - (0x1F4A, 'M', 'ὂ'), - (0x1F4B, 'M', 'ὃ'), - (0x1F4C, 'M', 'ὄ'), - (0x1F4D, 'M', 'ὅ'), - (0x1F4E, 'X'), - (0x1F50, 'V'), - (0x1F58, 'X'), - (0x1F59, 'M', 'ὑ'), - (0x1F5A, 'X'), - (0x1F5B, 'M', 'ὓ'), - (0x1F5C, 'X'), - (0x1F5D, 'M', 'ὕ'), - (0x1F5E, 'X'), - (0x1F5F, 'M', 'ὗ'), - (0x1F60, 'V'), - (0x1F68, 'M', 'ὠ'), - (0x1F69, 'M', 'ὡ'), - (0x1F6A, 'M', 'ὢ'), - (0x1F6B, 'M', 'ὣ'), - (0x1F6C, 'M', 'ὤ'), - (0x1F6D, 'M', 'ὥ'), - (0x1F6E, 'M', 'ὦ'), - (0x1F6F, 'M', 'ὧ'), - (0x1F70, 'V'), - (0x1F71, 'M', 'ά'), - (0x1F72, 'V'), - (0x1F73, 'M', 'έ'), - (0x1F74, 'V'), - (0x1F75, 'M', 'ή'), - (0x1F76, 'V'), - (0x1F77, 'M', 'ί'), - (0x1F78, 'V'), - (0x1F79, 'M', 'ό'), - (0x1F7A, 'V'), - (0x1F7B, 'M', 'ύ'), - (0x1F7C, 'V'), - (0x1F7D, 'M', 'ώ'), - (0x1F7E, 'X'), - (0x1F80, 'M', 'ἀι'), - (0x1F81, 'M', 'ἁι'), - (0x1F82, 'M', 'ἂι'), - (0x1F83, 'M', 'ἃι'), - (0x1F84, 'M', 'ἄι'), - (0x1F85, 'M', 'ἅι'), - (0x1F86, 'M', 'ἆι'), - (0x1F87, 'M', 'ἇι'), - ] - -def _seg_20() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1F88, 'M', 'ἀι'), - (0x1F89, 'M', 'ἁι'), - (0x1F8A, 'M', 'ἂι'), - (0x1F8B, 'M', 'ἃι'), - (0x1F8C, 'M', 'ἄι'), - (0x1F8D, 'M', 'ἅι'), - (0x1F8E, 'M', 'ἆι'), - (0x1F8F, 'M', 'ἇι'), - (0x1F90, 'M', 'ἠι'), - (0x1F91, 'M', 'ἡι'), - (0x1F92, 'M', 'ἢι'), - (0x1F93, 'M', 'ἣι'), - (0x1F94, 'M', 'ἤι'), - (0x1F95, 'M', 'ἥι'), - (0x1F96, 'M', 'ἦι'), - (0x1F97, 'M', 'ἧι'), - (0x1F98, 'M', 'ἠι'), - (0x1F99, 'M', 'ἡι'), - (0x1F9A, 'M', 'ἢι'), - (0x1F9B, 'M', 'ἣι'), - (0x1F9C, 'M', 'ἤι'), - (0x1F9D, 'M', 'ἥι'), - (0x1F9E, 'M', 'ἦι'), - (0x1F9F, 'M', 'ἧι'), - (0x1FA0, 'M', 'ὠι'), - (0x1FA1, 'M', 'ὡι'), - (0x1FA2, 'M', 'ὢι'), - (0x1FA3, 'M', 'ὣι'), - (0x1FA4, 'M', 'ὤι'), - (0x1FA5, 'M', 'ὥι'), - (0x1FA6, 'M', 'ὦι'), - (0x1FA7, 'M', 'ὧι'), - (0x1FA8, 'M', 'ὠι'), - (0x1FA9, 'M', 'ὡι'), - (0x1FAA, 'M', 'ὢι'), - (0x1FAB, 'M', 'ὣι'), - (0x1FAC, 'M', 'ὤι'), - (0x1FAD, 'M', 'ὥι'), - (0x1FAE, 'M', 'ὦι'), - (0x1FAF, 'M', 'ὧι'), - (0x1FB0, 'V'), - (0x1FB2, 'M', 'ὰι'), - (0x1FB3, 'M', 'αι'), - (0x1FB4, 'M', 'άι'), - (0x1FB5, 'X'), - (0x1FB6, 'V'), - (0x1FB7, 'M', 'ᾶι'), - (0x1FB8, 'M', 'ᾰ'), - (0x1FB9, 'M', 'ᾱ'), - (0x1FBA, 'M', 'ὰ'), - (0x1FBB, 'M', 'ά'), - (0x1FBC, 'M', 'αι'), - (0x1FBD, '3', ' ̓'), - (0x1FBE, 'M', 'ι'), - (0x1FBF, '3', ' ̓'), - (0x1FC0, '3', ' ͂'), - (0x1FC1, '3', ' ̈͂'), - (0x1FC2, 'M', 'ὴι'), - (0x1FC3, 'M', 'ηι'), - (0x1FC4, 'M', 'ήι'), - (0x1FC5, 'X'), - (0x1FC6, 'V'), - (0x1FC7, 'M', 'ῆι'), - (0x1FC8, 'M', 'ὲ'), - (0x1FC9, 'M', 'έ'), - (0x1FCA, 'M', 'ὴ'), - (0x1FCB, 'M', 'ή'), - (0x1FCC, 'M', 'ηι'), - (0x1FCD, '3', ' ̓̀'), - (0x1FCE, '3', ' ̓́'), - (0x1FCF, '3', ' ̓͂'), - (0x1FD0, 'V'), - (0x1FD3, 'M', 'ΐ'), - (0x1FD4, 'X'), - (0x1FD6, 'V'), - (0x1FD8, 'M', 'ῐ'), - (0x1FD9, 'M', 'ῑ'), - (0x1FDA, 'M', 'ὶ'), - (0x1FDB, 'M', 'ί'), - (0x1FDC, 'X'), - (0x1FDD, '3', ' ̔̀'), - (0x1FDE, '3', ' ̔́'), - (0x1FDF, '3', ' ̔͂'), - (0x1FE0, 'V'), - (0x1FE3, 'M', 'ΰ'), - (0x1FE4, 'V'), - (0x1FE8, 'M', 'ῠ'), - (0x1FE9, 'M', 'ῡ'), - (0x1FEA, 'M', 'ὺ'), - (0x1FEB, 'M', 'ύ'), - (0x1FEC, 'M', 'ῥ'), - (0x1FED, '3', ' ̈̀'), - (0x1FEE, '3', ' ̈́'), - (0x1FEF, '3', '`'), - (0x1FF0, 'X'), - (0x1FF2, 'M', 'ὼι'), - (0x1FF3, 'M', 'ωι'), - (0x1FF4, 'M', 'ώι'), - (0x1FF5, 'X'), - (0x1FF6, 'V'), - ] - -def _seg_21() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1FF7, 'M', 'ῶι'), - (0x1FF8, 'M', 'ὸ'), - (0x1FF9, 'M', 'ό'), - (0x1FFA, 'M', 'ὼ'), - (0x1FFB, 'M', 'ώ'), - (0x1FFC, 'M', 'ωι'), - (0x1FFD, '3', ' ́'), - (0x1FFE, '3', ' ̔'), - (0x1FFF, 'X'), - (0x2000, '3', ' '), - (0x200B, 'I'), - (0x200C, 'D', ''), - (0x200E, 'X'), - (0x2010, 'V'), - (0x2011, 'M', '‐'), - (0x2012, 'V'), - (0x2017, '3', ' ̳'), - (0x2018, 'V'), - (0x2024, 'X'), - (0x2027, 'V'), - (0x2028, 'X'), - (0x202F, '3', ' '), - (0x2030, 'V'), - (0x2033, 'M', '′′'), - (0x2034, 'M', '′′′'), - (0x2035, 'V'), - (0x2036, 'M', '‵‵'), - (0x2037, 'M', '‵‵‵'), - (0x2038, 'V'), - (0x203C, '3', '!!'), - (0x203D, 'V'), - (0x203E, '3', ' ̅'), - (0x203F, 'V'), - (0x2047, '3', '??'), - (0x2048, '3', '?!'), - (0x2049, '3', '!?'), - (0x204A, 'V'), - (0x2057, 'M', '′′′′'), - (0x2058, 'V'), - (0x205F, '3', ' '), - (0x2060, 'I'), - (0x2061, 'X'), - (0x2064, 'I'), - (0x2065, 'X'), - (0x2070, 'M', '0'), - (0x2071, 'M', 'i'), - (0x2072, 'X'), - (0x2074, 'M', '4'), - (0x2075, 'M', '5'), - (0x2076, 'M', '6'), - (0x2077, 'M', '7'), - (0x2078, 'M', '8'), - (0x2079, 'M', '9'), - (0x207A, '3', '+'), - (0x207B, 'M', '−'), - (0x207C, '3', '='), - (0x207D, '3', '('), - (0x207E, '3', ')'), - (0x207F, 'M', 'n'), - (0x2080, 'M', '0'), - (0x2081, 'M', '1'), - (0x2082, 'M', '2'), - (0x2083, 'M', '3'), - (0x2084, 'M', '4'), - (0x2085, 'M', '5'), - (0x2086, 'M', '6'), - (0x2087, 'M', '7'), - (0x2088, 'M', '8'), - (0x2089, 'M', '9'), - (0x208A, '3', '+'), - (0x208B, 'M', '−'), - (0x208C, '3', '='), - (0x208D, '3', '('), - (0x208E, '3', ')'), - (0x208F, 'X'), - (0x2090, 'M', 'a'), - (0x2091, 'M', 'e'), - (0x2092, 'M', 'o'), - (0x2093, 'M', 'x'), - (0x2094, 'M', 'ə'), - (0x2095, 'M', 'h'), - (0x2096, 'M', 'k'), - (0x2097, 'M', 'l'), - (0x2098, 'M', 'm'), - (0x2099, 'M', 'n'), - (0x209A, 'M', 'p'), - (0x209B, 'M', 's'), - (0x209C, 'M', 't'), - (0x209D, 'X'), - (0x20A0, 'V'), - (0x20A8, 'M', 'rs'), - (0x20A9, 'V'), - (0x20C1, 'X'), - (0x20D0, 'V'), - (0x20F1, 'X'), - (0x2100, '3', 'a/c'), - (0x2101, '3', 'a/s'), - (0x2102, 'M', 'c'), - (0x2103, 'M', '°c'), - (0x2104, 'V'), - ] - -def _seg_22() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2105, '3', 'c/o'), - (0x2106, '3', 'c/u'), - (0x2107, 'M', 'ɛ'), - (0x2108, 'V'), - (0x2109, 'M', '°f'), - (0x210A, 'M', 'g'), - (0x210B, 'M', 'h'), - (0x210F, 'M', 'ħ'), - (0x2110, 'M', 'i'), - (0x2112, 'M', 'l'), - (0x2114, 'V'), - (0x2115, 'M', 'n'), - (0x2116, 'M', 'no'), - (0x2117, 'V'), - (0x2119, 'M', 'p'), - (0x211A, 'M', 'q'), - (0x211B, 'M', 'r'), - (0x211E, 'V'), - (0x2120, 'M', 'sm'), - (0x2121, 'M', 'tel'), - (0x2122, 'M', 'tm'), - (0x2123, 'V'), - (0x2124, 'M', 'z'), - (0x2125, 'V'), - (0x2126, 'M', 'ω'), - (0x2127, 'V'), - (0x2128, 'M', 'z'), - (0x2129, 'V'), - (0x212A, 'M', 'k'), - (0x212B, 'M', 'å'), - (0x212C, 'M', 'b'), - (0x212D, 'M', 'c'), - (0x212E, 'V'), - (0x212F, 'M', 'e'), - (0x2131, 'M', 'f'), - (0x2132, 'X'), - (0x2133, 'M', 'm'), - (0x2134, 'M', 'o'), - (0x2135, 'M', 'א'), - (0x2136, 'M', 'ב'), - (0x2137, 'M', 'ג'), - (0x2138, 'M', 'ד'), - (0x2139, 'M', 'i'), - (0x213A, 'V'), - (0x213B, 'M', 'fax'), - (0x213C, 'M', 'π'), - (0x213D, 'M', 'γ'), - (0x213F, 'M', 'π'), - (0x2140, 'M', '∑'), - (0x2141, 'V'), - (0x2145, 'M', 'd'), - (0x2147, 'M', 'e'), - (0x2148, 'M', 'i'), - (0x2149, 'M', 'j'), - (0x214A, 'V'), - (0x2150, 'M', '1⁄7'), - (0x2151, 'M', '1⁄9'), - (0x2152, 'M', '1⁄10'), - (0x2153, 'M', '1⁄3'), - (0x2154, 'M', '2⁄3'), - (0x2155, 'M', '1⁄5'), - (0x2156, 'M', '2⁄5'), - (0x2157, 'M', '3⁄5'), - (0x2158, 'M', '4⁄5'), - (0x2159, 'M', '1⁄6'), - (0x215A, 'M', '5⁄6'), - (0x215B, 'M', '1⁄8'), - (0x215C, 'M', '3⁄8'), - (0x215D, 'M', '5⁄8'), - (0x215E, 'M', '7⁄8'), - (0x215F, 'M', '1⁄'), - (0x2160, 'M', 'i'), - (0x2161, 'M', 'ii'), - (0x2162, 'M', 'iii'), - (0x2163, 'M', 'iv'), - (0x2164, 'M', 'v'), - (0x2165, 'M', 'vi'), - (0x2166, 'M', 'vii'), - (0x2167, 'M', 'viii'), - (0x2168, 'M', 'ix'), - (0x2169, 'M', 'x'), - (0x216A, 'M', 'xi'), - (0x216B, 'M', 'xii'), - (0x216C, 'M', 'l'), - (0x216D, 'M', 'c'), - (0x216E, 'M', 'd'), - (0x216F, 'M', 'm'), - (0x2170, 'M', 'i'), - (0x2171, 'M', 'ii'), - (0x2172, 'M', 'iii'), - (0x2173, 'M', 'iv'), - (0x2174, 'M', 'v'), - (0x2175, 'M', 'vi'), - (0x2176, 'M', 'vii'), - (0x2177, 'M', 'viii'), - (0x2178, 'M', 'ix'), - (0x2179, 'M', 'x'), - (0x217A, 'M', 'xi'), - (0x217B, 'M', 'xii'), - (0x217C, 'M', 'l'), - ] - -def _seg_23() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x217D, 'M', 'c'), - (0x217E, 'M', 'd'), - (0x217F, 'M', 'm'), - (0x2180, 'V'), - (0x2183, 'X'), - (0x2184, 'V'), - (0x2189, 'M', '0⁄3'), - (0x218A, 'V'), - (0x218C, 'X'), - (0x2190, 'V'), - (0x222C, 'M', '∫∫'), - (0x222D, 'M', '∫∫∫'), - (0x222E, 'V'), - (0x222F, 'M', '∮∮'), - (0x2230, 'M', '∮∮∮'), - (0x2231, 'V'), - (0x2260, '3'), - (0x2261, 'V'), - (0x226E, '3'), - (0x2270, 'V'), - (0x2329, 'M', '〈'), - (0x232A, 'M', '〉'), - (0x232B, 'V'), - (0x2427, 'X'), - (0x2440, 'V'), - (0x244B, 'X'), - (0x2460, 'M', '1'), - (0x2461, 'M', '2'), - (0x2462, 'M', '3'), - (0x2463, 'M', '4'), - (0x2464, 'M', '5'), - (0x2465, 'M', '6'), - (0x2466, 'M', '7'), - (0x2467, 'M', '8'), - (0x2468, 'M', '9'), - (0x2469, 'M', '10'), - (0x246A, 'M', '11'), - (0x246B, 'M', '12'), - (0x246C, 'M', '13'), - (0x246D, 'M', '14'), - (0x246E, 'M', '15'), - (0x246F, 'M', '16'), - (0x2470, 'M', '17'), - (0x2471, 'M', '18'), - (0x2472, 'M', '19'), - (0x2473, 'M', '20'), - (0x2474, '3', '(1)'), - (0x2475, '3', '(2)'), - (0x2476, '3', '(3)'), - (0x2477, '3', '(4)'), - (0x2478, '3', '(5)'), - (0x2479, '3', '(6)'), - (0x247A, '3', '(7)'), - (0x247B, '3', '(8)'), - (0x247C, '3', '(9)'), - (0x247D, '3', '(10)'), - (0x247E, '3', '(11)'), - (0x247F, '3', '(12)'), - (0x2480, '3', '(13)'), - (0x2481, '3', '(14)'), - (0x2482, '3', '(15)'), - (0x2483, '3', '(16)'), - (0x2484, '3', '(17)'), - (0x2485, '3', '(18)'), - (0x2486, '3', '(19)'), - (0x2487, '3', '(20)'), - (0x2488, 'X'), - (0x249C, '3', '(a)'), - (0x249D, '3', '(b)'), - (0x249E, '3', '(c)'), - (0x249F, '3', '(d)'), - (0x24A0, '3', '(e)'), - (0x24A1, '3', '(f)'), - (0x24A2, '3', '(g)'), - (0x24A3, '3', '(h)'), - (0x24A4, '3', '(i)'), - (0x24A5, '3', '(j)'), - (0x24A6, '3', '(k)'), - (0x24A7, '3', '(l)'), - (0x24A8, '3', '(m)'), - (0x24A9, '3', '(n)'), - (0x24AA, '3', '(o)'), - (0x24AB, '3', '(p)'), - (0x24AC, '3', '(q)'), - (0x24AD, '3', '(r)'), - (0x24AE, '3', '(s)'), - (0x24AF, '3', '(t)'), - (0x24B0, '3', '(u)'), - (0x24B1, '3', '(v)'), - (0x24B2, '3', '(w)'), - (0x24B3, '3', '(x)'), - (0x24B4, '3', '(y)'), - (0x24B5, '3', '(z)'), - (0x24B6, 'M', 'a'), - (0x24B7, 'M', 'b'), - (0x24B8, 'M', 'c'), - (0x24B9, 'M', 'd'), - (0x24BA, 'M', 'e'), - (0x24BB, 'M', 'f'), - (0x24BC, 'M', 'g'), - ] - -def _seg_24() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x24BD, 'M', 'h'), - (0x24BE, 'M', 'i'), - (0x24BF, 'M', 'j'), - (0x24C0, 'M', 'k'), - (0x24C1, 'M', 'l'), - (0x24C2, 'M', 'm'), - (0x24C3, 'M', 'n'), - (0x24C4, 'M', 'o'), - (0x24C5, 'M', 'p'), - (0x24C6, 'M', 'q'), - (0x24C7, 'M', 'r'), - (0x24C8, 'M', 's'), - (0x24C9, 'M', 't'), - (0x24CA, 'M', 'u'), - (0x24CB, 'M', 'v'), - (0x24CC, 'M', 'w'), - (0x24CD, 'M', 'x'), - (0x24CE, 'M', 'y'), - (0x24CF, 'M', 'z'), - (0x24D0, 'M', 'a'), - (0x24D1, 'M', 'b'), - (0x24D2, 'M', 'c'), - (0x24D3, 'M', 'd'), - (0x24D4, 'M', 'e'), - (0x24D5, 'M', 'f'), - (0x24D6, 'M', 'g'), - (0x24D7, 'M', 'h'), - (0x24D8, 'M', 'i'), - (0x24D9, 'M', 'j'), - (0x24DA, 'M', 'k'), - (0x24DB, 'M', 'l'), - (0x24DC, 'M', 'm'), - (0x24DD, 'M', 'n'), - (0x24DE, 'M', 'o'), - (0x24DF, 'M', 'p'), - (0x24E0, 'M', 'q'), - (0x24E1, 'M', 'r'), - (0x24E2, 'M', 's'), - (0x24E3, 'M', 't'), - (0x24E4, 'M', 'u'), - (0x24E5, 'M', 'v'), - (0x24E6, 'M', 'w'), - (0x24E7, 'M', 'x'), - (0x24E8, 'M', 'y'), - (0x24E9, 'M', 'z'), - (0x24EA, 'M', '0'), - (0x24EB, 'V'), - (0x2A0C, 'M', '∫∫∫∫'), - (0x2A0D, 'V'), - (0x2A74, '3', '::='), - (0x2A75, '3', '=='), - (0x2A76, '3', '==='), - (0x2A77, 'V'), - (0x2ADC, 'M', '⫝̸'), - (0x2ADD, 'V'), - (0x2B74, 'X'), - (0x2B76, 'V'), - (0x2B96, 'X'), - (0x2B97, 'V'), - (0x2C00, 'M', 'ⰰ'), - (0x2C01, 'M', 'ⰱ'), - (0x2C02, 'M', 'ⰲ'), - (0x2C03, 'M', 'ⰳ'), - (0x2C04, 'M', 'ⰴ'), - (0x2C05, 'M', 'ⰵ'), - (0x2C06, 'M', 'ⰶ'), - (0x2C07, 'M', 'ⰷ'), - (0x2C08, 'M', 'ⰸ'), - (0x2C09, 'M', 'ⰹ'), - (0x2C0A, 'M', 'ⰺ'), - (0x2C0B, 'M', 'ⰻ'), - (0x2C0C, 'M', 'ⰼ'), - (0x2C0D, 'M', 'ⰽ'), - (0x2C0E, 'M', 'ⰾ'), - (0x2C0F, 'M', 'ⰿ'), - (0x2C10, 'M', 'ⱀ'), - (0x2C11, 'M', 'ⱁ'), - (0x2C12, 'M', 'ⱂ'), - (0x2C13, 'M', 'ⱃ'), - (0x2C14, 'M', 'ⱄ'), - (0x2C15, 'M', 'ⱅ'), - (0x2C16, 'M', 'ⱆ'), - (0x2C17, 'M', 'ⱇ'), - (0x2C18, 'M', 'ⱈ'), - (0x2C19, 'M', 'ⱉ'), - (0x2C1A, 'M', 'ⱊ'), - (0x2C1B, 'M', 'ⱋ'), - (0x2C1C, 'M', 'ⱌ'), - (0x2C1D, 'M', 'ⱍ'), - (0x2C1E, 'M', 'ⱎ'), - (0x2C1F, 'M', 'ⱏ'), - (0x2C20, 'M', 'ⱐ'), - (0x2C21, 'M', 'ⱑ'), - (0x2C22, 'M', 'ⱒ'), - (0x2C23, 'M', 'ⱓ'), - (0x2C24, 'M', 'ⱔ'), - (0x2C25, 'M', 'ⱕ'), - (0x2C26, 'M', 'ⱖ'), - (0x2C27, 'M', 'ⱗ'), - (0x2C28, 'M', 'ⱘ'), - ] - -def _seg_25() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2C29, 'M', 'ⱙ'), - (0x2C2A, 'M', 'ⱚ'), - (0x2C2B, 'M', 'ⱛ'), - (0x2C2C, 'M', 'ⱜ'), - (0x2C2D, 'M', 'ⱝ'), - (0x2C2E, 'M', 'ⱞ'), - (0x2C2F, 'M', 'ⱟ'), - (0x2C30, 'V'), - (0x2C60, 'M', 'ⱡ'), - (0x2C61, 'V'), - (0x2C62, 'M', 'ɫ'), - (0x2C63, 'M', 'ᵽ'), - (0x2C64, 'M', 'ɽ'), - (0x2C65, 'V'), - (0x2C67, 'M', 'ⱨ'), - (0x2C68, 'V'), - (0x2C69, 'M', 'ⱪ'), - (0x2C6A, 'V'), - (0x2C6B, 'M', 'ⱬ'), - (0x2C6C, 'V'), - (0x2C6D, 'M', 'ɑ'), - (0x2C6E, 'M', 'ɱ'), - (0x2C6F, 'M', 'ɐ'), - (0x2C70, 'M', 'ɒ'), - (0x2C71, 'V'), - (0x2C72, 'M', 'ⱳ'), - (0x2C73, 'V'), - (0x2C75, 'M', 'ⱶ'), - (0x2C76, 'V'), - (0x2C7C, 'M', 'j'), - (0x2C7D, 'M', 'v'), - (0x2C7E, 'M', 'ȿ'), - (0x2C7F, 'M', 'ɀ'), - (0x2C80, 'M', 'ⲁ'), - (0x2C81, 'V'), - (0x2C82, 'M', 'ⲃ'), - (0x2C83, 'V'), - (0x2C84, 'M', 'ⲅ'), - (0x2C85, 'V'), - (0x2C86, 'M', 'ⲇ'), - (0x2C87, 'V'), - (0x2C88, 'M', 'ⲉ'), - (0x2C89, 'V'), - (0x2C8A, 'M', 'ⲋ'), - (0x2C8B, 'V'), - (0x2C8C, 'M', 'ⲍ'), - (0x2C8D, 'V'), - (0x2C8E, 'M', 'ⲏ'), - (0x2C8F, 'V'), - (0x2C90, 'M', 'ⲑ'), - (0x2C91, 'V'), - (0x2C92, 'M', 'ⲓ'), - (0x2C93, 'V'), - (0x2C94, 'M', 'ⲕ'), - (0x2C95, 'V'), - (0x2C96, 'M', 'ⲗ'), - (0x2C97, 'V'), - (0x2C98, 'M', 'ⲙ'), - (0x2C99, 'V'), - (0x2C9A, 'M', 'ⲛ'), - (0x2C9B, 'V'), - (0x2C9C, 'M', 'ⲝ'), - (0x2C9D, 'V'), - (0x2C9E, 'M', 'ⲟ'), - (0x2C9F, 'V'), - (0x2CA0, 'M', 'ⲡ'), - (0x2CA1, 'V'), - (0x2CA2, 'M', 'ⲣ'), - (0x2CA3, 'V'), - (0x2CA4, 'M', 'ⲥ'), - (0x2CA5, 'V'), - (0x2CA6, 'M', 'ⲧ'), - (0x2CA7, 'V'), - (0x2CA8, 'M', 'ⲩ'), - (0x2CA9, 'V'), - (0x2CAA, 'M', 'ⲫ'), - (0x2CAB, 'V'), - (0x2CAC, 'M', 'ⲭ'), - (0x2CAD, 'V'), - (0x2CAE, 'M', 'ⲯ'), - (0x2CAF, 'V'), - (0x2CB0, 'M', 'ⲱ'), - (0x2CB1, 'V'), - (0x2CB2, 'M', 'ⲳ'), - (0x2CB3, 'V'), - (0x2CB4, 'M', 'ⲵ'), - (0x2CB5, 'V'), - (0x2CB6, 'M', 'ⲷ'), - (0x2CB7, 'V'), - (0x2CB8, 'M', 'ⲹ'), - (0x2CB9, 'V'), - (0x2CBA, 'M', 'ⲻ'), - (0x2CBB, 'V'), - (0x2CBC, 'M', 'ⲽ'), - (0x2CBD, 'V'), - (0x2CBE, 'M', 'ⲿ'), - (0x2CBF, 'V'), - (0x2CC0, 'M', 'ⳁ'), - (0x2CC1, 'V'), - (0x2CC2, 'M', 'ⳃ'), - ] - -def _seg_26() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2CC3, 'V'), - (0x2CC4, 'M', 'ⳅ'), - (0x2CC5, 'V'), - (0x2CC6, 'M', 'ⳇ'), - (0x2CC7, 'V'), - (0x2CC8, 'M', 'ⳉ'), - (0x2CC9, 'V'), - (0x2CCA, 'M', 'ⳋ'), - (0x2CCB, 'V'), - (0x2CCC, 'M', 'ⳍ'), - (0x2CCD, 'V'), - (0x2CCE, 'M', 'ⳏ'), - (0x2CCF, 'V'), - (0x2CD0, 'M', 'ⳑ'), - (0x2CD1, 'V'), - (0x2CD2, 'M', 'ⳓ'), - (0x2CD3, 'V'), - (0x2CD4, 'M', 'ⳕ'), - (0x2CD5, 'V'), - (0x2CD6, 'M', 'ⳗ'), - (0x2CD7, 'V'), - (0x2CD8, 'M', 'ⳙ'), - (0x2CD9, 'V'), - (0x2CDA, 'M', 'ⳛ'), - (0x2CDB, 'V'), - (0x2CDC, 'M', 'ⳝ'), - (0x2CDD, 'V'), - (0x2CDE, 'M', 'ⳟ'), - (0x2CDF, 'V'), - (0x2CE0, 'M', 'ⳡ'), - (0x2CE1, 'V'), - (0x2CE2, 'M', 'ⳣ'), - (0x2CE3, 'V'), - (0x2CEB, 'M', 'ⳬ'), - (0x2CEC, 'V'), - (0x2CED, 'M', 'ⳮ'), - (0x2CEE, 'V'), - (0x2CF2, 'M', 'ⳳ'), - (0x2CF3, 'V'), - (0x2CF4, 'X'), - (0x2CF9, 'V'), - (0x2D26, 'X'), - (0x2D27, 'V'), - (0x2D28, 'X'), - (0x2D2D, 'V'), - (0x2D2E, 'X'), - (0x2D30, 'V'), - (0x2D68, 'X'), - (0x2D6F, 'M', 'ⵡ'), - (0x2D70, 'V'), - (0x2D71, 'X'), - (0x2D7F, 'V'), - (0x2D97, 'X'), - (0x2DA0, 'V'), - (0x2DA7, 'X'), - (0x2DA8, 'V'), - (0x2DAF, 'X'), - (0x2DB0, 'V'), - (0x2DB7, 'X'), - (0x2DB8, 'V'), - (0x2DBF, 'X'), - (0x2DC0, 'V'), - (0x2DC7, 'X'), - (0x2DC8, 'V'), - (0x2DCF, 'X'), - (0x2DD0, 'V'), - (0x2DD7, 'X'), - (0x2DD8, 'V'), - (0x2DDF, 'X'), - (0x2DE0, 'V'), - (0x2E5E, 'X'), - (0x2E80, 'V'), - (0x2E9A, 'X'), - (0x2E9B, 'V'), - (0x2E9F, 'M', '母'), - (0x2EA0, 'V'), - (0x2EF3, 'M', '龟'), - (0x2EF4, 'X'), - (0x2F00, 'M', '一'), - (0x2F01, 'M', '丨'), - (0x2F02, 'M', '丶'), - (0x2F03, 'M', '丿'), - (0x2F04, 'M', '乙'), - (0x2F05, 'M', '亅'), - (0x2F06, 'M', '二'), - (0x2F07, 'M', '亠'), - (0x2F08, 'M', '人'), - (0x2F09, 'M', '儿'), - (0x2F0A, 'M', '入'), - (0x2F0B, 'M', '八'), - (0x2F0C, 'M', '冂'), - (0x2F0D, 'M', '冖'), - (0x2F0E, 'M', '冫'), - (0x2F0F, 'M', '几'), - (0x2F10, 'M', '凵'), - (0x2F11, 'M', '刀'), - (0x2F12, 'M', '力'), - (0x2F13, 'M', '勹'), - (0x2F14, 'M', '匕'), - (0x2F15, 'M', '匚'), - ] - -def _seg_27() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2F16, 'M', '匸'), - (0x2F17, 'M', '十'), - (0x2F18, 'M', '卜'), - (0x2F19, 'M', '卩'), - (0x2F1A, 'M', '厂'), - (0x2F1B, 'M', '厶'), - (0x2F1C, 'M', '又'), - (0x2F1D, 'M', '口'), - (0x2F1E, 'M', '囗'), - (0x2F1F, 'M', '土'), - (0x2F20, 'M', '士'), - (0x2F21, 'M', '夂'), - (0x2F22, 'M', '夊'), - (0x2F23, 'M', '夕'), - (0x2F24, 'M', '大'), - (0x2F25, 'M', '女'), - (0x2F26, 'M', '子'), - (0x2F27, 'M', '宀'), - (0x2F28, 'M', '寸'), - (0x2F29, 'M', '小'), - (0x2F2A, 'M', '尢'), - (0x2F2B, 'M', '尸'), - (0x2F2C, 'M', '屮'), - (0x2F2D, 'M', '山'), - (0x2F2E, 'M', '巛'), - (0x2F2F, 'M', '工'), - (0x2F30, 'M', '己'), - (0x2F31, 'M', '巾'), - (0x2F32, 'M', '干'), - (0x2F33, 'M', '幺'), - (0x2F34, 'M', '广'), - (0x2F35, 'M', '廴'), - (0x2F36, 'M', '廾'), - (0x2F37, 'M', '弋'), - (0x2F38, 'M', '弓'), - (0x2F39, 'M', '彐'), - (0x2F3A, 'M', '彡'), - (0x2F3B, 'M', '彳'), - (0x2F3C, 'M', '心'), - (0x2F3D, 'M', '戈'), - (0x2F3E, 'M', '戶'), - (0x2F3F, 'M', '手'), - (0x2F40, 'M', '支'), - (0x2F41, 'M', '攴'), - (0x2F42, 'M', '文'), - (0x2F43, 'M', '斗'), - (0x2F44, 'M', '斤'), - (0x2F45, 'M', '方'), - (0x2F46, 'M', '无'), - (0x2F47, 'M', '日'), - (0x2F48, 'M', '曰'), - (0x2F49, 'M', '月'), - (0x2F4A, 'M', '木'), - (0x2F4B, 'M', '欠'), - (0x2F4C, 'M', '止'), - (0x2F4D, 'M', '歹'), - (0x2F4E, 'M', '殳'), - (0x2F4F, 'M', '毋'), - (0x2F50, 'M', '比'), - (0x2F51, 'M', '毛'), - (0x2F52, 'M', '氏'), - (0x2F53, 'M', '气'), - (0x2F54, 'M', '水'), - (0x2F55, 'M', '火'), - (0x2F56, 'M', '爪'), - (0x2F57, 'M', '父'), - (0x2F58, 'M', '爻'), - (0x2F59, 'M', '爿'), - (0x2F5A, 'M', '片'), - (0x2F5B, 'M', '牙'), - (0x2F5C, 'M', '牛'), - (0x2F5D, 'M', '犬'), - (0x2F5E, 'M', '玄'), - (0x2F5F, 'M', '玉'), - (0x2F60, 'M', '瓜'), - (0x2F61, 'M', '瓦'), - (0x2F62, 'M', '甘'), - (0x2F63, 'M', '生'), - (0x2F64, 'M', '用'), - (0x2F65, 'M', '田'), - (0x2F66, 'M', '疋'), - (0x2F67, 'M', '疒'), - (0x2F68, 'M', '癶'), - (0x2F69, 'M', '白'), - (0x2F6A, 'M', '皮'), - (0x2F6B, 'M', '皿'), - (0x2F6C, 'M', '目'), - (0x2F6D, 'M', '矛'), - (0x2F6E, 'M', '矢'), - (0x2F6F, 'M', '石'), - (0x2F70, 'M', '示'), - (0x2F71, 'M', '禸'), - (0x2F72, 'M', '禾'), - (0x2F73, 'M', '穴'), - (0x2F74, 'M', '立'), - (0x2F75, 'M', '竹'), - (0x2F76, 'M', '米'), - (0x2F77, 'M', '糸'), - (0x2F78, 'M', '缶'), - (0x2F79, 'M', '网'), - ] - -def _seg_28() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2F7A, 'M', '羊'), - (0x2F7B, 'M', '羽'), - (0x2F7C, 'M', '老'), - (0x2F7D, 'M', '而'), - (0x2F7E, 'M', '耒'), - (0x2F7F, 'M', '耳'), - (0x2F80, 'M', '聿'), - (0x2F81, 'M', '肉'), - (0x2F82, 'M', '臣'), - (0x2F83, 'M', '自'), - (0x2F84, 'M', '至'), - (0x2F85, 'M', '臼'), - (0x2F86, 'M', '舌'), - (0x2F87, 'M', '舛'), - (0x2F88, 'M', '舟'), - (0x2F89, 'M', '艮'), - (0x2F8A, 'M', '色'), - (0x2F8B, 'M', '艸'), - (0x2F8C, 'M', '虍'), - (0x2F8D, 'M', '虫'), - (0x2F8E, 'M', '血'), - (0x2F8F, 'M', '行'), - (0x2F90, 'M', '衣'), - (0x2F91, 'M', '襾'), - (0x2F92, 'M', '見'), - (0x2F93, 'M', '角'), - (0x2F94, 'M', '言'), - (0x2F95, 'M', '谷'), - (0x2F96, 'M', '豆'), - (0x2F97, 'M', '豕'), - (0x2F98, 'M', '豸'), - (0x2F99, 'M', '貝'), - (0x2F9A, 'M', '赤'), - (0x2F9B, 'M', '走'), - (0x2F9C, 'M', '足'), - (0x2F9D, 'M', '身'), - (0x2F9E, 'M', '車'), - (0x2F9F, 'M', '辛'), - (0x2FA0, 'M', '辰'), - (0x2FA1, 'M', '辵'), - (0x2FA2, 'M', '邑'), - (0x2FA3, 'M', '酉'), - (0x2FA4, 'M', '釆'), - (0x2FA5, 'M', '里'), - (0x2FA6, 'M', '金'), - (0x2FA7, 'M', '長'), - (0x2FA8, 'M', '門'), - (0x2FA9, 'M', '阜'), - (0x2FAA, 'M', '隶'), - (0x2FAB, 'M', '隹'), - (0x2FAC, 'M', '雨'), - (0x2FAD, 'M', '靑'), - (0x2FAE, 'M', '非'), - (0x2FAF, 'M', '面'), - (0x2FB0, 'M', '革'), - (0x2FB1, 'M', '韋'), - (0x2FB2, 'M', '韭'), - (0x2FB3, 'M', '音'), - (0x2FB4, 'M', '頁'), - (0x2FB5, 'M', '風'), - (0x2FB6, 'M', '飛'), - (0x2FB7, 'M', '食'), - (0x2FB8, 'M', '首'), - (0x2FB9, 'M', '香'), - (0x2FBA, 'M', '馬'), - (0x2FBB, 'M', '骨'), - (0x2FBC, 'M', '高'), - (0x2FBD, 'M', '髟'), - (0x2FBE, 'M', '鬥'), - (0x2FBF, 'M', '鬯'), - (0x2FC0, 'M', '鬲'), - (0x2FC1, 'M', '鬼'), - (0x2FC2, 'M', '魚'), - (0x2FC3, 'M', '鳥'), - (0x2FC4, 'M', '鹵'), - (0x2FC5, 'M', '鹿'), - (0x2FC6, 'M', '麥'), - (0x2FC7, 'M', '麻'), - (0x2FC8, 'M', '黃'), - (0x2FC9, 'M', '黍'), - (0x2FCA, 'M', '黑'), - (0x2FCB, 'M', '黹'), - (0x2FCC, 'M', '黽'), - (0x2FCD, 'M', '鼎'), - (0x2FCE, 'M', '鼓'), - (0x2FCF, 'M', '鼠'), - (0x2FD0, 'M', '鼻'), - (0x2FD1, 'M', '齊'), - (0x2FD2, 'M', '齒'), - (0x2FD3, 'M', '龍'), - (0x2FD4, 'M', '龜'), - (0x2FD5, 'M', '龠'), - (0x2FD6, 'X'), - (0x3000, '3', ' '), - (0x3001, 'V'), - (0x3002, 'M', '.'), - (0x3003, 'V'), - (0x3036, 'M', '〒'), - (0x3037, 'V'), - (0x3038, 'M', '十'), - ] - -def _seg_29() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x3039, 'M', '卄'), - (0x303A, 'M', '卅'), - (0x303B, 'V'), - (0x3040, 'X'), - (0x3041, 'V'), - (0x3097, 'X'), - (0x3099, 'V'), - (0x309B, '3', ' ゙'), - (0x309C, '3', ' ゚'), - (0x309D, 'V'), - (0x309F, 'M', 'より'), - (0x30A0, 'V'), - (0x30FF, 'M', 'コト'), - (0x3100, 'X'), - (0x3105, 'V'), - (0x3130, 'X'), - (0x3131, 'M', 'ᄀ'), - (0x3132, 'M', 'ᄁ'), - (0x3133, 'M', 'ᆪ'), - (0x3134, 'M', 'ᄂ'), - (0x3135, 'M', 'ᆬ'), - (0x3136, 'M', 'ᆭ'), - (0x3137, 'M', 'ᄃ'), - (0x3138, 'M', 'ᄄ'), - (0x3139, 'M', 'ᄅ'), - (0x313A, 'M', 'ᆰ'), - (0x313B, 'M', 'ᆱ'), - (0x313C, 'M', 'ᆲ'), - (0x313D, 'M', 'ᆳ'), - (0x313E, 'M', 'ᆴ'), - (0x313F, 'M', 'ᆵ'), - (0x3140, 'M', 'ᄚ'), - (0x3141, 'M', 'ᄆ'), - (0x3142, 'M', 'ᄇ'), - (0x3143, 'M', 'ᄈ'), - (0x3144, 'M', 'ᄡ'), - (0x3145, 'M', 'ᄉ'), - (0x3146, 'M', 'ᄊ'), - (0x3147, 'M', 'ᄋ'), - (0x3148, 'M', 'ᄌ'), - (0x3149, 'M', 'ᄍ'), - (0x314A, 'M', 'ᄎ'), - (0x314B, 'M', 'ᄏ'), - (0x314C, 'M', 'ᄐ'), - (0x314D, 'M', 'ᄑ'), - (0x314E, 'M', 'ᄒ'), - (0x314F, 'M', 'ᅡ'), - (0x3150, 'M', 'ᅢ'), - (0x3151, 'M', 'ᅣ'), - (0x3152, 'M', 'ᅤ'), - (0x3153, 'M', 'ᅥ'), - (0x3154, 'M', 'ᅦ'), - (0x3155, 'M', 'ᅧ'), - (0x3156, 'M', 'ᅨ'), - (0x3157, 'M', 'ᅩ'), - (0x3158, 'M', 'ᅪ'), - (0x3159, 'M', 'ᅫ'), - (0x315A, 'M', 'ᅬ'), - (0x315B, 'M', 'ᅭ'), - (0x315C, 'M', 'ᅮ'), - (0x315D, 'M', 'ᅯ'), - (0x315E, 'M', 'ᅰ'), - (0x315F, 'M', 'ᅱ'), - (0x3160, 'M', 'ᅲ'), - (0x3161, 'M', 'ᅳ'), - (0x3162, 'M', 'ᅴ'), - (0x3163, 'M', 'ᅵ'), - (0x3164, 'X'), - (0x3165, 'M', 'ᄔ'), - (0x3166, 'M', 'ᄕ'), - (0x3167, 'M', 'ᇇ'), - (0x3168, 'M', 'ᇈ'), - (0x3169, 'M', 'ᇌ'), - (0x316A, 'M', 'ᇎ'), - (0x316B, 'M', 'ᇓ'), - (0x316C, 'M', 'ᇗ'), - (0x316D, 'M', 'ᇙ'), - (0x316E, 'M', 'ᄜ'), - (0x316F, 'M', 'ᇝ'), - (0x3170, 'M', 'ᇟ'), - (0x3171, 'M', 'ᄝ'), - (0x3172, 'M', 'ᄞ'), - (0x3173, 'M', 'ᄠ'), - (0x3174, 'M', 'ᄢ'), - (0x3175, 'M', 'ᄣ'), - (0x3176, 'M', 'ᄧ'), - (0x3177, 'M', 'ᄩ'), - (0x3178, 'M', 'ᄫ'), - (0x3179, 'M', 'ᄬ'), - (0x317A, 'M', 'ᄭ'), - (0x317B, 'M', 'ᄮ'), - (0x317C, 'M', 'ᄯ'), - (0x317D, 'M', 'ᄲ'), - (0x317E, 'M', 'ᄶ'), - (0x317F, 'M', 'ᅀ'), - (0x3180, 'M', 'ᅇ'), - (0x3181, 'M', 'ᅌ'), - (0x3182, 'M', 'ᇱ'), - (0x3183, 'M', 'ᇲ'), - (0x3184, 'M', 'ᅗ'), - ] - -def _seg_30() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x3185, 'M', 'ᅘ'), - (0x3186, 'M', 'ᅙ'), - (0x3187, 'M', 'ᆄ'), - (0x3188, 'M', 'ᆅ'), - (0x3189, 'M', 'ᆈ'), - (0x318A, 'M', 'ᆑ'), - (0x318B, 'M', 'ᆒ'), - (0x318C, 'M', 'ᆔ'), - (0x318D, 'M', 'ᆞ'), - (0x318E, 'M', 'ᆡ'), - (0x318F, 'X'), - (0x3190, 'V'), - (0x3192, 'M', '一'), - (0x3193, 'M', '二'), - (0x3194, 'M', '三'), - (0x3195, 'M', '四'), - (0x3196, 'M', '上'), - (0x3197, 'M', '中'), - (0x3198, 'M', '下'), - (0x3199, 'M', '甲'), - (0x319A, 'M', '乙'), - (0x319B, 'M', '丙'), - (0x319C, 'M', '丁'), - (0x319D, 'M', '天'), - (0x319E, 'M', '地'), - (0x319F, 'M', '人'), - (0x31A0, 'V'), - (0x31E4, 'X'), - (0x31F0, 'V'), - (0x3200, '3', '(ᄀ)'), - (0x3201, '3', '(ᄂ)'), - (0x3202, '3', '(ᄃ)'), - (0x3203, '3', '(ᄅ)'), - (0x3204, '3', '(ᄆ)'), - (0x3205, '3', '(ᄇ)'), - (0x3206, '3', '(ᄉ)'), - (0x3207, '3', '(ᄋ)'), - (0x3208, '3', '(ᄌ)'), - (0x3209, '3', '(ᄎ)'), - (0x320A, '3', '(ᄏ)'), - (0x320B, '3', '(ᄐ)'), - (0x320C, '3', '(ᄑ)'), - (0x320D, '3', '(ᄒ)'), - (0x320E, '3', '(가)'), - (0x320F, '3', '(나)'), - (0x3210, '3', '(다)'), - (0x3211, '3', '(라)'), - (0x3212, '3', '(마)'), - (0x3213, '3', '(바)'), - (0x3214, '3', '(사)'), - (0x3215, '3', '(아)'), - (0x3216, '3', '(자)'), - (0x3217, '3', '(차)'), - (0x3218, '3', '(카)'), - (0x3219, '3', '(타)'), - (0x321A, '3', '(파)'), - (0x321B, '3', '(하)'), - (0x321C, '3', '(주)'), - (0x321D, '3', '(오전)'), - (0x321E, '3', '(오후)'), - (0x321F, 'X'), - (0x3220, '3', '(一)'), - (0x3221, '3', '(二)'), - (0x3222, '3', '(三)'), - (0x3223, '3', '(四)'), - (0x3224, '3', '(五)'), - (0x3225, '3', '(六)'), - (0x3226, '3', '(七)'), - (0x3227, '3', '(八)'), - (0x3228, '3', '(九)'), - (0x3229, '3', '(十)'), - (0x322A, '3', '(月)'), - (0x322B, '3', '(火)'), - (0x322C, '3', '(水)'), - (0x322D, '3', '(木)'), - (0x322E, '3', '(金)'), - (0x322F, '3', '(土)'), - (0x3230, '3', '(日)'), - (0x3231, '3', '(株)'), - (0x3232, '3', '(有)'), - (0x3233, '3', '(社)'), - (0x3234, '3', '(名)'), - (0x3235, '3', '(特)'), - (0x3236, '3', '(財)'), - (0x3237, '3', '(祝)'), - (0x3238, '3', '(労)'), - (0x3239, '3', '(代)'), - (0x323A, '3', '(呼)'), - (0x323B, '3', '(学)'), - (0x323C, '3', '(監)'), - (0x323D, '3', '(企)'), - (0x323E, '3', '(資)'), - (0x323F, '3', '(協)'), - (0x3240, '3', '(祭)'), - (0x3241, '3', '(休)'), - (0x3242, '3', '(自)'), - (0x3243, '3', '(至)'), - (0x3244, 'M', '問'), - (0x3245, 'M', '幼'), - (0x3246, 'M', '文'), - ] - -def _seg_31() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x3247, 'M', '箏'), - (0x3248, 'V'), - (0x3250, 'M', 'pte'), - (0x3251, 'M', '21'), - (0x3252, 'M', '22'), - (0x3253, 'M', '23'), - (0x3254, 'M', '24'), - (0x3255, 'M', '25'), - (0x3256, 'M', '26'), - (0x3257, 'M', '27'), - (0x3258, 'M', '28'), - (0x3259, 'M', '29'), - (0x325A, 'M', '30'), - (0x325B, 'M', '31'), - (0x325C, 'M', '32'), - (0x325D, 'M', '33'), - (0x325E, 'M', '34'), - (0x325F, 'M', '35'), - (0x3260, 'M', 'ᄀ'), - (0x3261, 'M', 'ᄂ'), - (0x3262, 'M', 'ᄃ'), - (0x3263, 'M', 'ᄅ'), - (0x3264, 'M', 'ᄆ'), - (0x3265, 'M', 'ᄇ'), - (0x3266, 'M', 'ᄉ'), - (0x3267, 'M', 'ᄋ'), - (0x3268, 'M', 'ᄌ'), - (0x3269, 'M', 'ᄎ'), - (0x326A, 'M', 'ᄏ'), - (0x326B, 'M', 'ᄐ'), - (0x326C, 'M', 'ᄑ'), - (0x326D, 'M', 'ᄒ'), - (0x326E, 'M', '가'), - (0x326F, 'M', '나'), - (0x3270, 'M', '다'), - (0x3271, 'M', '라'), - (0x3272, 'M', '마'), - (0x3273, 'M', '바'), - (0x3274, 'M', '사'), - (0x3275, 'M', '아'), - (0x3276, 'M', '자'), - (0x3277, 'M', '차'), - (0x3278, 'M', '카'), - (0x3279, 'M', '타'), - (0x327A, 'M', '파'), - (0x327B, 'M', '하'), - (0x327C, 'M', '참고'), - (0x327D, 'M', '주의'), - (0x327E, 'M', '우'), - (0x327F, 'V'), - (0x3280, 'M', '一'), - (0x3281, 'M', '二'), - (0x3282, 'M', '三'), - (0x3283, 'M', '四'), - (0x3284, 'M', '五'), - (0x3285, 'M', '六'), - (0x3286, 'M', '七'), - (0x3287, 'M', '八'), - (0x3288, 'M', '九'), - (0x3289, 'M', '十'), - (0x328A, 'M', '月'), - (0x328B, 'M', '火'), - (0x328C, 'M', '水'), - (0x328D, 'M', '木'), - (0x328E, 'M', '金'), - (0x328F, 'M', '土'), - (0x3290, 'M', '日'), - (0x3291, 'M', '株'), - (0x3292, 'M', '有'), - (0x3293, 'M', '社'), - (0x3294, 'M', '名'), - (0x3295, 'M', '特'), - (0x3296, 'M', '財'), - (0x3297, 'M', '祝'), - (0x3298, 'M', '労'), - (0x3299, 'M', '秘'), - (0x329A, 'M', '男'), - (0x329B, 'M', '女'), - (0x329C, 'M', '適'), - (0x329D, 'M', '優'), - (0x329E, 'M', '印'), - (0x329F, 'M', '注'), - (0x32A0, 'M', '項'), - (0x32A1, 'M', '休'), - (0x32A2, 'M', '写'), - (0x32A3, 'M', '正'), - (0x32A4, 'M', '上'), - (0x32A5, 'M', '中'), - (0x32A6, 'M', '下'), - (0x32A7, 'M', '左'), - (0x32A8, 'M', '右'), - (0x32A9, 'M', '医'), - (0x32AA, 'M', '宗'), - (0x32AB, 'M', '学'), - (0x32AC, 'M', '監'), - (0x32AD, 'M', '企'), - (0x32AE, 'M', '資'), - (0x32AF, 'M', '協'), - (0x32B0, 'M', '夜'), - (0x32B1, 'M', '36'), - ] - -def _seg_32() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x32B2, 'M', '37'), - (0x32B3, 'M', '38'), - (0x32B4, 'M', '39'), - (0x32B5, 'M', '40'), - (0x32B6, 'M', '41'), - (0x32B7, 'M', '42'), - (0x32B8, 'M', '43'), - (0x32B9, 'M', '44'), - (0x32BA, 'M', '45'), - (0x32BB, 'M', '46'), - (0x32BC, 'M', '47'), - (0x32BD, 'M', '48'), - (0x32BE, 'M', '49'), - (0x32BF, 'M', '50'), - (0x32C0, 'M', '1月'), - (0x32C1, 'M', '2月'), - (0x32C2, 'M', '3月'), - (0x32C3, 'M', '4月'), - (0x32C4, 'M', '5月'), - (0x32C5, 'M', '6月'), - (0x32C6, 'M', '7月'), - (0x32C7, 'M', '8月'), - (0x32C8, 'M', '9月'), - (0x32C9, 'M', '10月'), - (0x32CA, 'M', '11月'), - (0x32CB, 'M', '12月'), - (0x32CC, 'M', 'hg'), - (0x32CD, 'M', 'erg'), - (0x32CE, 'M', 'ev'), - (0x32CF, 'M', 'ltd'), - (0x32D0, 'M', 'ア'), - (0x32D1, 'M', 'イ'), - (0x32D2, 'M', 'ウ'), - (0x32D3, 'M', 'エ'), - (0x32D4, 'M', 'オ'), - (0x32D5, 'M', 'カ'), - (0x32D6, 'M', 'キ'), - (0x32D7, 'M', 'ク'), - (0x32D8, 'M', 'ケ'), - (0x32D9, 'M', 'コ'), - (0x32DA, 'M', 'サ'), - (0x32DB, 'M', 'シ'), - (0x32DC, 'M', 'ス'), - (0x32DD, 'M', 'セ'), - (0x32DE, 'M', 'ソ'), - (0x32DF, 'M', 'タ'), - (0x32E0, 'M', 'チ'), - (0x32E1, 'M', 'ツ'), - (0x32E2, 'M', 'テ'), - (0x32E3, 'M', 'ト'), - (0x32E4, 'M', 'ナ'), - (0x32E5, 'M', 'ニ'), - (0x32E6, 'M', 'ヌ'), - (0x32E7, 'M', 'ネ'), - (0x32E8, 'M', 'ノ'), - (0x32E9, 'M', 'ハ'), - (0x32EA, 'M', 'ヒ'), - (0x32EB, 'M', 'フ'), - (0x32EC, 'M', 'ヘ'), - (0x32ED, 'M', 'ホ'), - (0x32EE, 'M', 'マ'), - (0x32EF, 'M', 'ミ'), - (0x32F0, 'M', 'ム'), - (0x32F1, 'M', 'メ'), - (0x32F2, 'M', 'モ'), - (0x32F3, 'M', 'ヤ'), - (0x32F4, 'M', 'ユ'), - (0x32F5, 'M', 'ヨ'), - (0x32F6, 'M', 'ラ'), - (0x32F7, 'M', 'リ'), - (0x32F8, 'M', 'ル'), - (0x32F9, 'M', 'レ'), - (0x32FA, 'M', 'ロ'), - (0x32FB, 'M', 'ワ'), - (0x32FC, 'M', 'ヰ'), - (0x32FD, 'M', 'ヱ'), - (0x32FE, 'M', 'ヲ'), - (0x32FF, 'M', '令和'), - (0x3300, 'M', 'アパート'), - (0x3301, 'M', 'アルファ'), - (0x3302, 'M', 'アンペア'), - (0x3303, 'M', 'アール'), - (0x3304, 'M', 'イニング'), - (0x3305, 'M', 'インチ'), - (0x3306, 'M', 'ウォン'), - (0x3307, 'M', 'エスクード'), - (0x3308, 'M', 'エーカー'), - (0x3309, 'M', 'オンス'), - (0x330A, 'M', 'オーム'), - (0x330B, 'M', 'カイリ'), - (0x330C, 'M', 'カラット'), - (0x330D, 'M', 'カロリー'), - (0x330E, 'M', 'ガロン'), - (0x330F, 'M', 'ガンマ'), - (0x3310, 'M', 'ギガ'), - (0x3311, 'M', 'ギニー'), - (0x3312, 'M', 'キュリー'), - (0x3313, 'M', 'ギルダー'), - (0x3314, 'M', 'キロ'), - (0x3315, 'M', 'キログラム'), - ] - -def _seg_33() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x3316, 'M', 'キロメートル'), - (0x3317, 'M', 'キロワット'), - (0x3318, 'M', 'グラム'), - (0x3319, 'M', 'グラムトン'), - (0x331A, 'M', 'クルゼイロ'), - (0x331B, 'M', 'クローネ'), - (0x331C, 'M', 'ケース'), - (0x331D, 'M', 'コルナ'), - (0x331E, 'M', 'コーポ'), - (0x331F, 'M', 'サイクル'), - (0x3320, 'M', 'サンチーム'), - (0x3321, 'M', 'シリング'), - (0x3322, 'M', 'センチ'), - (0x3323, 'M', 'セント'), - (0x3324, 'M', 'ダース'), - (0x3325, 'M', 'デシ'), - (0x3326, 'M', 'ドル'), - (0x3327, 'M', 'トン'), - (0x3328, 'M', 'ナノ'), - (0x3329, 'M', 'ノット'), - (0x332A, 'M', 'ハイツ'), - (0x332B, 'M', 'パーセント'), - (0x332C, 'M', 'パーツ'), - (0x332D, 'M', 'バーレル'), - (0x332E, 'M', 'ピアストル'), - (0x332F, 'M', 'ピクル'), - (0x3330, 'M', 'ピコ'), - (0x3331, 'M', 'ビル'), - (0x3332, 'M', 'ファラッド'), - (0x3333, 'M', 'フィート'), - (0x3334, 'M', 'ブッシェル'), - (0x3335, 'M', 'フラン'), - (0x3336, 'M', 'ヘクタール'), - (0x3337, 'M', 'ペソ'), - (0x3338, 'M', 'ペニヒ'), - (0x3339, 'M', 'ヘルツ'), - (0x333A, 'M', 'ペンス'), - (0x333B, 'M', 'ページ'), - (0x333C, 'M', 'ベータ'), - (0x333D, 'M', 'ポイント'), - (0x333E, 'M', 'ボルト'), - (0x333F, 'M', 'ホン'), - (0x3340, 'M', 'ポンド'), - (0x3341, 'M', 'ホール'), - (0x3342, 'M', 'ホーン'), - (0x3343, 'M', 'マイクロ'), - (0x3344, 'M', 'マイル'), - (0x3345, 'M', 'マッハ'), - (0x3346, 'M', 'マルク'), - (0x3347, 'M', 'マンション'), - (0x3348, 'M', 'ミクロン'), - (0x3349, 'M', 'ミリ'), - (0x334A, 'M', 'ミリバール'), - (0x334B, 'M', 'メガ'), - (0x334C, 'M', 'メガトン'), - (0x334D, 'M', 'メートル'), - (0x334E, 'M', 'ヤード'), - (0x334F, 'M', 'ヤール'), - (0x3350, 'M', 'ユアン'), - (0x3351, 'M', 'リットル'), - (0x3352, 'M', 'リラ'), - (0x3353, 'M', 'ルピー'), - (0x3354, 'M', 'ルーブル'), - (0x3355, 'M', 'レム'), - (0x3356, 'M', 'レントゲン'), - (0x3357, 'M', 'ワット'), - (0x3358, 'M', '0点'), - (0x3359, 'M', '1点'), - (0x335A, 'M', '2点'), - (0x335B, 'M', '3点'), - (0x335C, 'M', '4点'), - (0x335D, 'M', '5点'), - (0x335E, 'M', '6点'), - (0x335F, 'M', '7点'), - (0x3360, 'M', '8点'), - (0x3361, 'M', '9点'), - (0x3362, 'M', '10点'), - (0x3363, 'M', '11点'), - (0x3364, 'M', '12点'), - (0x3365, 'M', '13点'), - (0x3366, 'M', '14点'), - (0x3367, 'M', '15点'), - (0x3368, 'M', '16点'), - (0x3369, 'M', '17点'), - (0x336A, 'M', '18点'), - (0x336B, 'M', '19点'), - (0x336C, 'M', '20点'), - (0x336D, 'M', '21点'), - (0x336E, 'M', '22点'), - (0x336F, 'M', '23点'), - (0x3370, 'M', '24点'), - (0x3371, 'M', 'hpa'), - (0x3372, 'M', 'da'), - (0x3373, 'M', 'au'), - (0x3374, 'M', 'bar'), - (0x3375, 'M', 'ov'), - (0x3376, 'M', 'pc'), - (0x3377, 'M', 'dm'), - (0x3378, 'M', 'dm2'), - (0x3379, 'M', 'dm3'), - ] - -def _seg_34() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x337A, 'M', 'iu'), - (0x337B, 'M', '平成'), - (0x337C, 'M', '昭和'), - (0x337D, 'M', '大正'), - (0x337E, 'M', '明治'), - (0x337F, 'M', '株式会社'), - (0x3380, 'M', 'pa'), - (0x3381, 'M', 'na'), - (0x3382, 'M', 'μa'), - (0x3383, 'M', 'ma'), - (0x3384, 'M', 'ka'), - (0x3385, 'M', 'kb'), - (0x3386, 'M', 'mb'), - (0x3387, 'M', 'gb'), - (0x3388, 'M', 'cal'), - (0x3389, 'M', 'kcal'), - (0x338A, 'M', 'pf'), - (0x338B, 'M', 'nf'), - (0x338C, 'M', 'μf'), - (0x338D, 'M', 'μg'), - (0x338E, 'M', 'mg'), - (0x338F, 'M', 'kg'), - (0x3390, 'M', 'hz'), - (0x3391, 'M', 'khz'), - (0x3392, 'M', 'mhz'), - (0x3393, 'M', 'ghz'), - (0x3394, 'M', 'thz'), - (0x3395, 'M', 'μl'), - (0x3396, 'M', 'ml'), - (0x3397, 'M', 'dl'), - (0x3398, 'M', 'kl'), - (0x3399, 'M', 'fm'), - (0x339A, 'M', 'nm'), - (0x339B, 'M', 'μm'), - (0x339C, 'M', 'mm'), - (0x339D, 'M', 'cm'), - (0x339E, 'M', 'km'), - (0x339F, 'M', 'mm2'), - (0x33A0, 'M', 'cm2'), - (0x33A1, 'M', 'm2'), - (0x33A2, 'M', 'km2'), - (0x33A3, 'M', 'mm3'), - (0x33A4, 'M', 'cm3'), - (0x33A5, 'M', 'm3'), - (0x33A6, 'M', 'km3'), - (0x33A7, 'M', 'm∕s'), - (0x33A8, 'M', 'm∕s2'), - (0x33A9, 'M', 'pa'), - (0x33AA, 'M', 'kpa'), - (0x33AB, 'M', 'mpa'), - (0x33AC, 'M', 'gpa'), - (0x33AD, 'M', 'rad'), - (0x33AE, 'M', 'rad∕s'), - (0x33AF, 'M', 'rad∕s2'), - (0x33B0, 'M', 'ps'), - (0x33B1, 'M', 'ns'), - (0x33B2, 'M', 'μs'), - (0x33B3, 'M', 'ms'), - (0x33B4, 'M', 'pv'), - (0x33B5, 'M', 'nv'), - (0x33B6, 'M', 'μv'), - (0x33B7, 'M', 'mv'), - (0x33B8, 'M', 'kv'), - (0x33B9, 'M', 'mv'), - (0x33BA, 'M', 'pw'), - (0x33BB, 'M', 'nw'), - (0x33BC, 'M', 'μw'), - (0x33BD, 'M', 'mw'), - (0x33BE, 'M', 'kw'), - (0x33BF, 'M', 'mw'), - (0x33C0, 'M', 'kω'), - (0x33C1, 'M', 'mω'), - (0x33C2, 'X'), - (0x33C3, 'M', 'bq'), - (0x33C4, 'M', 'cc'), - (0x33C5, 'M', 'cd'), - (0x33C6, 'M', 'c∕kg'), - (0x33C7, 'X'), - (0x33C8, 'M', 'db'), - (0x33C9, 'M', 'gy'), - (0x33CA, 'M', 'ha'), - (0x33CB, 'M', 'hp'), - (0x33CC, 'M', 'in'), - (0x33CD, 'M', 'kk'), - (0x33CE, 'M', 'km'), - (0x33CF, 'M', 'kt'), - (0x33D0, 'M', 'lm'), - (0x33D1, 'M', 'ln'), - (0x33D2, 'M', 'log'), - (0x33D3, 'M', 'lx'), - (0x33D4, 'M', 'mb'), - (0x33D5, 'M', 'mil'), - (0x33D6, 'M', 'mol'), - (0x33D7, 'M', 'ph'), - (0x33D8, 'X'), - (0x33D9, 'M', 'ppm'), - (0x33DA, 'M', 'pr'), - (0x33DB, 'M', 'sr'), - (0x33DC, 'M', 'sv'), - (0x33DD, 'M', 'wb'), - ] - -def _seg_35() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x33DE, 'M', 'v∕m'), - (0x33DF, 'M', 'a∕m'), - (0x33E0, 'M', '1日'), - (0x33E1, 'M', '2日'), - (0x33E2, 'M', '3日'), - (0x33E3, 'M', '4日'), - (0x33E4, 'M', '5日'), - (0x33E5, 'M', '6日'), - (0x33E6, 'M', '7日'), - (0x33E7, 'M', '8日'), - (0x33E8, 'M', '9日'), - (0x33E9, 'M', '10日'), - (0x33EA, 'M', '11日'), - (0x33EB, 'M', '12日'), - (0x33EC, 'M', '13日'), - (0x33ED, 'M', '14日'), - (0x33EE, 'M', '15日'), - (0x33EF, 'M', '16日'), - (0x33F0, 'M', '17日'), - (0x33F1, 'M', '18日'), - (0x33F2, 'M', '19日'), - (0x33F3, 'M', '20日'), - (0x33F4, 'M', '21日'), - (0x33F5, 'M', '22日'), - (0x33F6, 'M', '23日'), - (0x33F7, 'M', '24日'), - (0x33F8, 'M', '25日'), - (0x33F9, 'M', '26日'), - (0x33FA, 'M', '27日'), - (0x33FB, 'M', '28日'), - (0x33FC, 'M', '29日'), - (0x33FD, 'M', '30日'), - (0x33FE, 'M', '31日'), - (0x33FF, 'M', 'gal'), - (0x3400, 'V'), - (0xA48D, 'X'), - (0xA490, 'V'), - (0xA4C7, 'X'), - (0xA4D0, 'V'), - (0xA62C, 'X'), - (0xA640, 'M', 'ꙁ'), - (0xA641, 'V'), - (0xA642, 'M', 'ꙃ'), - (0xA643, 'V'), - (0xA644, 'M', 'ꙅ'), - (0xA645, 'V'), - (0xA646, 'M', 'ꙇ'), - (0xA647, 'V'), - (0xA648, 'M', 'ꙉ'), - (0xA649, 'V'), - (0xA64A, 'M', 'ꙋ'), - (0xA64B, 'V'), - (0xA64C, 'M', 'ꙍ'), - (0xA64D, 'V'), - (0xA64E, 'M', 'ꙏ'), - (0xA64F, 'V'), - (0xA650, 'M', 'ꙑ'), - (0xA651, 'V'), - (0xA652, 'M', 'ꙓ'), - (0xA653, 'V'), - (0xA654, 'M', 'ꙕ'), - (0xA655, 'V'), - (0xA656, 'M', 'ꙗ'), - (0xA657, 'V'), - (0xA658, 'M', 'ꙙ'), - (0xA659, 'V'), - (0xA65A, 'M', 'ꙛ'), - (0xA65B, 'V'), - (0xA65C, 'M', 'ꙝ'), - (0xA65D, 'V'), - (0xA65E, 'M', 'ꙟ'), - (0xA65F, 'V'), - (0xA660, 'M', 'ꙡ'), - (0xA661, 'V'), - (0xA662, 'M', 'ꙣ'), - (0xA663, 'V'), - (0xA664, 'M', 'ꙥ'), - (0xA665, 'V'), - (0xA666, 'M', 'ꙧ'), - (0xA667, 'V'), - (0xA668, 'M', 'ꙩ'), - (0xA669, 'V'), - (0xA66A, 'M', 'ꙫ'), - (0xA66B, 'V'), - (0xA66C, 'M', 'ꙭ'), - (0xA66D, 'V'), - (0xA680, 'M', 'ꚁ'), - (0xA681, 'V'), - (0xA682, 'M', 'ꚃ'), - (0xA683, 'V'), - (0xA684, 'M', 'ꚅ'), - (0xA685, 'V'), - (0xA686, 'M', 'ꚇ'), - (0xA687, 'V'), - (0xA688, 'M', 'ꚉ'), - (0xA689, 'V'), - (0xA68A, 'M', 'ꚋ'), - (0xA68B, 'V'), - (0xA68C, 'M', 'ꚍ'), - (0xA68D, 'V'), - ] - -def _seg_36() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xA68E, 'M', 'ꚏ'), - (0xA68F, 'V'), - (0xA690, 'M', 'ꚑ'), - (0xA691, 'V'), - (0xA692, 'M', 'ꚓ'), - (0xA693, 'V'), - (0xA694, 'M', 'ꚕ'), - (0xA695, 'V'), - (0xA696, 'M', 'ꚗ'), - (0xA697, 'V'), - (0xA698, 'M', 'ꚙ'), - (0xA699, 'V'), - (0xA69A, 'M', 'ꚛ'), - (0xA69B, 'V'), - (0xA69C, 'M', 'ъ'), - (0xA69D, 'M', 'ь'), - (0xA69E, 'V'), - (0xA6F8, 'X'), - (0xA700, 'V'), - (0xA722, 'M', 'ꜣ'), - (0xA723, 'V'), - (0xA724, 'M', 'ꜥ'), - (0xA725, 'V'), - (0xA726, 'M', 'ꜧ'), - (0xA727, 'V'), - (0xA728, 'M', 'ꜩ'), - (0xA729, 'V'), - (0xA72A, 'M', 'ꜫ'), - (0xA72B, 'V'), - (0xA72C, 'M', 'ꜭ'), - (0xA72D, 'V'), - (0xA72E, 'M', 'ꜯ'), - (0xA72F, 'V'), - (0xA732, 'M', 'ꜳ'), - (0xA733, 'V'), - (0xA734, 'M', 'ꜵ'), - (0xA735, 'V'), - (0xA736, 'M', 'ꜷ'), - (0xA737, 'V'), - (0xA738, 'M', 'ꜹ'), - (0xA739, 'V'), - (0xA73A, 'M', 'ꜻ'), - (0xA73B, 'V'), - (0xA73C, 'M', 'ꜽ'), - (0xA73D, 'V'), - (0xA73E, 'M', 'ꜿ'), - (0xA73F, 'V'), - (0xA740, 'M', 'ꝁ'), - (0xA741, 'V'), - (0xA742, 'M', 'ꝃ'), - (0xA743, 'V'), - (0xA744, 'M', 'ꝅ'), - (0xA745, 'V'), - (0xA746, 'M', 'ꝇ'), - (0xA747, 'V'), - (0xA748, 'M', 'ꝉ'), - (0xA749, 'V'), - (0xA74A, 'M', 'ꝋ'), - (0xA74B, 'V'), - (0xA74C, 'M', 'ꝍ'), - (0xA74D, 'V'), - (0xA74E, 'M', 'ꝏ'), - (0xA74F, 'V'), - (0xA750, 'M', 'ꝑ'), - (0xA751, 'V'), - (0xA752, 'M', 'ꝓ'), - (0xA753, 'V'), - (0xA754, 'M', 'ꝕ'), - (0xA755, 'V'), - (0xA756, 'M', 'ꝗ'), - (0xA757, 'V'), - (0xA758, 'M', 'ꝙ'), - (0xA759, 'V'), - (0xA75A, 'M', 'ꝛ'), - (0xA75B, 'V'), - (0xA75C, 'M', 'ꝝ'), - (0xA75D, 'V'), - (0xA75E, 'M', 'ꝟ'), - (0xA75F, 'V'), - (0xA760, 'M', 'ꝡ'), - (0xA761, 'V'), - (0xA762, 'M', 'ꝣ'), - (0xA763, 'V'), - (0xA764, 'M', 'ꝥ'), - (0xA765, 'V'), - (0xA766, 'M', 'ꝧ'), - (0xA767, 'V'), - (0xA768, 'M', 'ꝩ'), - (0xA769, 'V'), - (0xA76A, 'M', 'ꝫ'), - (0xA76B, 'V'), - (0xA76C, 'M', 'ꝭ'), - (0xA76D, 'V'), - (0xA76E, 'M', 'ꝯ'), - (0xA76F, 'V'), - (0xA770, 'M', 'ꝯ'), - (0xA771, 'V'), - (0xA779, 'M', 'ꝺ'), - (0xA77A, 'V'), - (0xA77B, 'M', 'ꝼ'), - ] - -def _seg_37() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xA77C, 'V'), - (0xA77D, 'M', 'ᵹ'), - (0xA77E, 'M', 'ꝿ'), - (0xA77F, 'V'), - (0xA780, 'M', 'ꞁ'), - (0xA781, 'V'), - (0xA782, 'M', 'ꞃ'), - (0xA783, 'V'), - (0xA784, 'M', 'ꞅ'), - (0xA785, 'V'), - (0xA786, 'M', 'ꞇ'), - (0xA787, 'V'), - (0xA78B, 'M', 'ꞌ'), - (0xA78C, 'V'), - (0xA78D, 'M', 'ɥ'), - (0xA78E, 'V'), - (0xA790, 'M', 'ꞑ'), - (0xA791, 'V'), - (0xA792, 'M', 'ꞓ'), - (0xA793, 'V'), - (0xA796, 'M', 'ꞗ'), - (0xA797, 'V'), - (0xA798, 'M', 'ꞙ'), - (0xA799, 'V'), - (0xA79A, 'M', 'ꞛ'), - (0xA79B, 'V'), - (0xA79C, 'M', 'ꞝ'), - (0xA79D, 'V'), - (0xA79E, 'M', 'ꞟ'), - (0xA79F, 'V'), - (0xA7A0, 'M', 'ꞡ'), - (0xA7A1, 'V'), - (0xA7A2, 'M', 'ꞣ'), - (0xA7A3, 'V'), - (0xA7A4, 'M', 'ꞥ'), - (0xA7A5, 'V'), - (0xA7A6, 'M', 'ꞧ'), - (0xA7A7, 'V'), - (0xA7A8, 'M', 'ꞩ'), - (0xA7A9, 'V'), - (0xA7AA, 'M', 'ɦ'), - (0xA7AB, 'M', 'ɜ'), - (0xA7AC, 'M', 'ɡ'), - (0xA7AD, 'M', 'ɬ'), - (0xA7AE, 'M', 'ɪ'), - (0xA7AF, 'V'), - (0xA7B0, 'M', 'ʞ'), - (0xA7B1, 'M', 'ʇ'), - (0xA7B2, 'M', 'ʝ'), - (0xA7B3, 'M', 'ꭓ'), - (0xA7B4, 'M', 'ꞵ'), - (0xA7B5, 'V'), - (0xA7B6, 'M', 'ꞷ'), - (0xA7B7, 'V'), - (0xA7B8, 'M', 'ꞹ'), - (0xA7B9, 'V'), - (0xA7BA, 'M', 'ꞻ'), - (0xA7BB, 'V'), - (0xA7BC, 'M', 'ꞽ'), - (0xA7BD, 'V'), - (0xA7BE, 'M', 'ꞿ'), - (0xA7BF, 'V'), - (0xA7C0, 'M', 'ꟁ'), - (0xA7C1, 'V'), - (0xA7C2, 'M', 'ꟃ'), - (0xA7C3, 'V'), - (0xA7C4, 'M', 'ꞔ'), - (0xA7C5, 'M', 'ʂ'), - (0xA7C6, 'M', 'ᶎ'), - (0xA7C7, 'M', 'ꟈ'), - (0xA7C8, 'V'), - (0xA7C9, 'M', 'ꟊ'), - (0xA7CA, 'V'), - (0xA7CB, 'X'), - (0xA7D0, 'M', 'ꟑ'), - (0xA7D1, 'V'), - (0xA7D2, 'X'), - (0xA7D3, 'V'), - (0xA7D4, 'X'), - (0xA7D5, 'V'), - (0xA7D6, 'M', 'ꟗ'), - (0xA7D7, 'V'), - (0xA7D8, 'M', 'ꟙ'), - (0xA7D9, 'V'), - (0xA7DA, 'X'), - (0xA7F2, 'M', 'c'), - (0xA7F3, 'M', 'f'), - (0xA7F4, 'M', 'q'), - (0xA7F5, 'M', 'ꟶ'), - (0xA7F6, 'V'), - (0xA7F8, 'M', 'ħ'), - (0xA7F9, 'M', 'œ'), - (0xA7FA, 'V'), - (0xA82D, 'X'), - (0xA830, 'V'), - (0xA83A, 'X'), - (0xA840, 'V'), - (0xA878, 'X'), - (0xA880, 'V'), - (0xA8C6, 'X'), - ] - -def _seg_38() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xA8CE, 'V'), - (0xA8DA, 'X'), - (0xA8E0, 'V'), - (0xA954, 'X'), - (0xA95F, 'V'), - (0xA97D, 'X'), - (0xA980, 'V'), - (0xA9CE, 'X'), - (0xA9CF, 'V'), - (0xA9DA, 'X'), - (0xA9DE, 'V'), - (0xA9FF, 'X'), - (0xAA00, 'V'), - (0xAA37, 'X'), - (0xAA40, 'V'), - (0xAA4E, 'X'), - (0xAA50, 'V'), - (0xAA5A, 'X'), - (0xAA5C, 'V'), - (0xAAC3, 'X'), - (0xAADB, 'V'), - (0xAAF7, 'X'), - (0xAB01, 'V'), - (0xAB07, 'X'), - (0xAB09, 'V'), - (0xAB0F, 'X'), - (0xAB11, 'V'), - (0xAB17, 'X'), - (0xAB20, 'V'), - (0xAB27, 'X'), - (0xAB28, 'V'), - (0xAB2F, 'X'), - (0xAB30, 'V'), - (0xAB5C, 'M', 'ꜧ'), - (0xAB5D, 'M', 'ꬷ'), - (0xAB5E, 'M', 'ɫ'), - (0xAB5F, 'M', 'ꭒ'), - (0xAB60, 'V'), - (0xAB69, 'M', 'ʍ'), - (0xAB6A, 'V'), - (0xAB6C, 'X'), - (0xAB70, 'M', 'Ꭰ'), - (0xAB71, 'M', 'Ꭱ'), - (0xAB72, 'M', 'Ꭲ'), - (0xAB73, 'M', 'Ꭳ'), - (0xAB74, 'M', 'Ꭴ'), - (0xAB75, 'M', 'Ꭵ'), - (0xAB76, 'M', 'Ꭶ'), - (0xAB77, 'M', 'Ꭷ'), - (0xAB78, 'M', 'Ꭸ'), - (0xAB79, 'M', 'Ꭹ'), - (0xAB7A, 'M', 'Ꭺ'), - (0xAB7B, 'M', 'Ꭻ'), - (0xAB7C, 'M', 'Ꭼ'), - (0xAB7D, 'M', 'Ꭽ'), - (0xAB7E, 'M', 'Ꭾ'), - (0xAB7F, 'M', 'Ꭿ'), - (0xAB80, 'M', 'Ꮀ'), - (0xAB81, 'M', 'Ꮁ'), - (0xAB82, 'M', 'Ꮂ'), - (0xAB83, 'M', 'Ꮃ'), - (0xAB84, 'M', 'Ꮄ'), - (0xAB85, 'M', 'Ꮅ'), - (0xAB86, 'M', 'Ꮆ'), - (0xAB87, 'M', 'Ꮇ'), - (0xAB88, 'M', 'Ꮈ'), - (0xAB89, 'M', 'Ꮉ'), - (0xAB8A, 'M', 'Ꮊ'), - (0xAB8B, 'M', 'Ꮋ'), - (0xAB8C, 'M', 'Ꮌ'), - (0xAB8D, 'M', 'Ꮍ'), - (0xAB8E, 'M', 'Ꮎ'), - (0xAB8F, 'M', 'Ꮏ'), - (0xAB90, 'M', 'Ꮐ'), - (0xAB91, 'M', 'Ꮑ'), - (0xAB92, 'M', 'Ꮒ'), - (0xAB93, 'M', 'Ꮓ'), - (0xAB94, 'M', 'Ꮔ'), - (0xAB95, 'M', 'Ꮕ'), - (0xAB96, 'M', 'Ꮖ'), - (0xAB97, 'M', 'Ꮗ'), - (0xAB98, 'M', 'Ꮘ'), - (0xAB99, 'M', 'Ꮙ'), - (0xAB9A, 'M', 'Ꮚ'), - (0xAB9B, 'M', 'Ꮛ'), - (0xAB9C, 'M', 'Ꮜ'), - (0xAB9D, 'M', 'Ꮝ'), - (0xAB9E, 'M', 'Ꮞ'), - (0xAB9F, 'M', 'Ꮟ'), - (0xABA0, 'M', 'Ꮠ'), - (0xABA1, 'M', 'Ꮡ'), - (0xABA2, 'M', 'Ꮢ'), - (0xABA3, 'M', 'Ꮣ'), - (0xABA4, 'M', 'Ꮤ'), - (0xABA5, 'M', 'Ꮥ'), - (0xABA6, 'M', 'Ꮦ'), - (0xABA7, 'M', 'Ꮧ'), - (0xABA8, 'M', 'Ꮨ'), - (0xABA9, 'M', 'Ꮩ'), - (0xABAA, 'M', 'Ꮪ'), - ] - -def _seg_39() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xABAB, 'M', 'Ꮫ'), - (0xABAC, 'M', 'Ꮬ'), - (0xABAD, 'M', 'Ꮭ'), - (0xABAE, 'M', 'Ꮮ'), - (0xABAF, 'M', 'Ꮯ'), - (0xABB0, 'M', 'Ꮰ'), - (0xABB1, 'M', 'Ꮱ'), - (0xABB2, 'M', 'Ꮲ'), - (0xABB3, 'M', 'Ꮳ'), - (0xABB4, 'M', 'Ꮴ'), - (0xABB5, 'M', 'Ꮵ'), - (0xABB6, 'M', 'Ꮶ'), - (0xABB7, 'M', 'Ꮷ'), - (0xABB8, 'M', 'Ꮸ'), - (0xABB9, 'M', 'Ꮹ'), - (0xABBA, 'M', 'Ꮺ'), - (0xABBB, 'M', 'Ꮻ'), - (0xABBC, 'M', 'Ꮼ'), - (0xABBD, 'M', 'Ꮽ'), - (0xABBE, 'M', 'Ꮾ'), - (0xABBF, 'M', 'Ꮿ'), - (0xABC0, 'V'), - (0xABEE, 'X'), - (0xABF0, 'V'), - (0xABFA, 'X'), - (0xAC00, 'V'), - (0xD7A4, 'X'), - (0xD7B0, 'V'), - (0xD7C7, 'X'), - (0xD7CB, 'V'), - (0xD7FC, 'X'), - (0xF900, 'M', '豈'), - (0xF901, 'M', '更'), - (0xF902, 'M', '車'), - (0xF903, 'M', '賈'), - (0xF904, 'M', '滑'), - (0xF905, 'M', '串'), - (0xF906, 'M', '句'), - (0xF907, 'M', '龜'), - (0xF909, 'M', '契'), - (0xF90A, 'M', '金'), - (0xF90B, 'M', '喇'), - (0xF90C, 'M', '奈'), - (0xF90D, 'M', '懶'), - (0xF90E, 'M', '癩'), - (0xF90F, 'M', '羅'), - (0xF910, 'M', '蘿'), - (0xF911, 'M', '螺'), - (0xF912, 'M', '裸'), - (0xF913, 'M', '邏'), - (0xF914, 'M', '樂'), - (0xF915, 'M', '洛'), - (0xF916, 'M', '烙'), - (0xF917, 'M', '珞'), - (0xF918, 'M', '落'), - (0xF919, 'M', '酪'), - (0xF91A, 'M', '駱'), - (0xF91B, 'M', '亂'), - (0xF91C, 'M', '卵'), - (0xF91D, 'M', '欄'), - (0xF91E, 'M', '爛'), - (0xF91F, 'M', '蘭'), - (0xF920, 'M', '鸞'), - (0xF921, 'M', '嵐'), - (0xF922, 'M', '濫'), - (0xF923, 'M', '藍'), - (0xF924, 'M', '襤'), - (0xF925, 'M', '拉'), - (0xF926, 'M', '臘'), - (0xF927, 'M', '蠟'), - (0xF928, 'M', '廊'), - (0xF929, 'M', '朗'), - (0xF92A, 'M', '浪'), - (0xF92B, 'M', '狼'), - (0xF92C, 'M', '郎'), - (0xF92D, 'M', '來'), - (0xF92E, 'M', '冷'), - (0xF92F, 'M', '勞'), - (0xF930, 'M', '擄'), - (0xF931, 'M', '櫓'), - (0xF932, 'M', '爐'), - (0xF933, 'M', '盧'), - (0xF934, 'M', '老'), - (0xF935, 'M', '蘆'), - (0xF936, 'M', '虜'), - (0xF937, 'M', '路'), - (0xF938, 'M', '露'), - (0xF939, 'M', '魯'), - (0xF93A, 'M', '鷺'), - (0xF93B, 'M', '碌'), - (0xF93C, 'M', '祿'), - (0xF93D, 'M', '綠'), - (0xF93E, 'M', '菉'), - (0xF93F, 'M', '錄'), - (0xF940, 'M', '鹿'), - (0xF941, 'M', '論'), - (0xF942, 'M', '壟'), - (0xF943, 'M', '弄'), - (0xF944, 'M', '籠'), - (0xF945, 'M', '聾'), - ] - -def _seg_40() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xF946, 'M', '牢'), - (0xF947, 'M', '磊'), - (0xF948, 'M', '賂'), - (0xF949, 'M', '雷'), - (0xF94A, 'M', '壘'), - (0xF94B, 'M', '屢'), - (0xF94C, 'M', '樓'), - (0xF94D, 'M', '淚'), - (0xF94E, 'M', '漏'), - (0xF94F, 'M', '累'), - (0xF950, 'M', '縷'), - (0xF951, 'M', '陋'), - (0xF952, 'M', '勒'), - (0xF953, 'M', '肋'), - (0xF954, 'M', '凜'), - (0xF955, 'M', '凌'), - (0xF956, 'M', '稜'), - (0xF957, 'M', '綾'), - (0xF958, 'M', '菱'), - (0xF959, 'M', '陵'), - (0xF95A, 'M', '讀'), - (0xF95B, 'M', '拏'), - (0xF95C, 'M', '樂'), - (0xF95D, 'M', '諾'), - (0xF95E, 'M', '丹'), - (0xF95F, 'M', '寧'), - (0xF960, 'M', '怒'), - (0xF961, 'M', '率'), - (0xF962, 'M', '異'), - (0xF963, 'M', '北'), - (0xF964, 'M', '磻'), - (0xF965, 'M', '便'), - (0xF966, 'M', '復'), - (0xF967, 'M', '不'), - (0xF968, 'M', '泌'), - (0xF969, 'M', '數'), - (0xF96A, 'M', '索'), - (0xF96B, 'M', '參'), - (0xF96C, 'M', '塞'), - (0xF96D, 'M', '省'), - (0xF96E, 'M', '葉'), - (0xF96F, 'M', '說'), - (0xF970, 'M', '殺'), - (0xF971, 'M', '辰'), - (0xF972, 'M', '沈'), - (0xF973, 'M', '拾'), - (0xF974, 'M', '若'), - (0xF975, 'M', '掠'), - (0xF976, 'M', '略'), - (0xF977, 'M', '亮'), - (0xF978, 'M', '兩'), - (0xF979, 'M', '凉'), - (0xF97A, 'M', '梁'), - (0xF97B, 'M', '糧'), - (0xF97C, 'M', '良'), - (0xF97D, 'M', '諒'), - (0xF97E, 'M', '量'), - (0xF97F, 'M', '勵'), - (0xF980, 'M', '呂'), - (0xF981, 'M', '女'), - (0xF982, 'M', '廬'), - (0xF983, 'M', '旅'), - (0xF984, 'M', '濾'), - (0xF985, 'M', '礪'), - (0xF986, 'M', '閭'), - (0xF987, 'M', '驪'), - (0xF988, 'M', '麗'), - (0xF989, 'M', '黎'), - (0xF98A, 'M', '力'), - (0xF98B, 'M', '曆'), - (0xF98C, 'M', '歷'), - (0xF98D, 'M', '轢'), - (0xF98E, 'M', '年'), - (0xF98F, 'M', '憐'), - (0xF990, 'M', '戀'), - (0xF991, 'M', '撚'), - (0xF992, 'M', '漣'), - (0xF993, 'M', '煉'), - (0xF994, 'M', '璉'), - (0xF995, 'M', '秊'), - (0xF996, 'M', '練'), - (0xF997, 'M', '聯'), - (0xF998, 'M', '輦'), - (0xF999, 'M', '蓮'), - (0xF99A, 'M', '連'), - (0xF99B, 'M', '鍊'), - (0xF99C, 'M', '列'), - (0xF99D, 'M', '劣'), - (0xF99E, 'M', '咽'), - (0xF99F, 'M', '烈'), - (0xF9A0, 'M', '裂'), - (0xF9A1, 'M', '說'), - (0xF9A2, 'M', '廉'), - (0xF9A3, 'M', '念'), - (0xF9A4, 'M', '捻'), - (0xF9A5, 'M', '殮'), - (0xF9A6, 'M', '簾'), - (0xF9A7, 'M', '獵'), - (0xF9A8, 'M', '令'), - (0xF9A9, 'M', '囹'), - ] - -def _seg_41() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xF9AA, 'M', '寧'), - (0xF9AB, 'M', '嶺'), - (0xF9AC, 'M', '怜'), - (0xF9AD, 'M', '玲'), - (0xF9AE, 'M', '瑩'), - (0xF9AF, 'M', '羚'), - (0xF9B0, 'M', '聆'), - (0xF9B1, 'M', '鈴'), - (0xF9B2, 'M', '零'), - (0xF9B3, 'M', '靈'), - (0xF9B4, 'M', '領'), - (0xF9B5, 'M', '例'), - (0xF9B6, 'M', '禮'), - (0xF9B7, 'M', '醴'), - (0xF9B8, 'M', '隸'), - (0xF9B9, 'M', '惡'), - (0xF9BA, 'M', '了'), - (0xF9BB, 'M', '僚'), - (0xF9BC, 'M', '寮'), - (0xF9BD, 'M', '尿'), - (0xF9BE, 'M', '料'), - (0xF9BF, 'M', '樂'), - (0xF9C0, 'M', '燎'), - (0xF9C1, 'M', '療'), - (0xF9C2, 'M', '蓼'), - (0xF9C3, 'M', '遼'), - (0xF9C4, 'M', '龍'), - (0xF9C5, 'M', '暈'), - (0xF9C6, 'M', '阮'), - (0xF9C7, 'M', '劉'), - (0xF9C8, 'M', '杻'), - (0xF9C9, 'M', '柳'), - (0xF9CA, 'M', '流'), - (0xF9CB, 'M', '溜'), - (0xF9CC, 'M', '琉'), - (0xF9CD, 'M', '留'), - (0xF9CE, 'M', '硫'), - (0xF9CF, 'M', '紐'), - (0xF9D0, 'M', '類'), - (0xF9D1, 'M', '六'), - (0xF9D2, 'M', '戮'), - (0xF9D3, 'M', '陸'), - (0xF9D4, 'M', '倫'), - (0xF9D5, 'M', '崙'), - (0xF9D6, 'M', '淪'), - (0xF9D7, 'M', '輪'), - (0xF9D8, 'M', '律'), - (0xF9D9, 'M', '慄'), - (0xF9DA, 'M', '栗'), - (0xF9DB, 'M', '率'), - (0xF9DC, 'M', '隆'), - (0xF9DD, 'M', '利'), - (0xF9DE, 'M', '吏'), - (0xF9DF, 'M', '履'), - (0xF9E0, 'M', '易'), - (0xF9E1, 'M', '李'), - (0xF9E2, 'M', '梨'), - (0xF9E3, 'M', '泥'), - (0xF9E4, 'M', '理'), - (0xF9E5, 'M', '痢'), - (0xF9E6, 'M', '罹'), - (0xF9E7, 'M', '裏'), - (0xF9E8, 'M', '裡'), - (0xF9E9, 'M', '里'), - (0xF9EA, 'M', '離'), - (0xF9EB, 'M', '匿'), - (0xF9EC, 'M', '溺'), - (0xF9ED, 'M', '吝'), - (0xF9EE, 'M', '燐'), - (0xF9EF, 'M', '璘'), - (0xF9F0, 'M', '藺'), - (0xF9F1, 'M', '隣'), - (0xF9F2, 'M', '鱗'), - (0xF9F3, 'M', '麟'), - (0xF9F4, 'M', '林'), - (0xF9F5, 'M', '淋'), - (0xF9F6, 'M', '臨'), - (0xF9F7, 'M', '立'), - (0xF9F8, 'M', '笠'), - (0xF9F9, 'M', '粒'), - (0xF9FA, 'M', '狀'), - (0xF9FB, 'M', '炙'), - (0xF9FC, 'M', '識'), - (0xF9FD, 'M', '什'), - (0xF9FE, 'M', '茶'), - (0xF9FF, 'M', '刺'), - (0xFA00, 'M', '切'), - (0xFA01, 'M', '度'), - (0xFA02, 'M', '拓'), - (0xFA03, 'M', '糖'), - (0xFA04, 'M', '宅'), - (0xFA05, 'M', '洞'), - (0xFA06, 'M', '暴'), - (0xFA07, 'M', '輻'), - (0xFA08, 'M', '行'), - (0xFA09, 'M', '降'), - (0xFA0A, 'M', '見'), - (0xFA0B, 'M', '廓'), - (0xFA0C, 'M', '兀'), - (0xFA0D, 'M', '嗀'), - ] - -def _seg_42() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFA0E, 'V'), - (0xFA10, 'M', '塚'), - (0xFA11, 'V'), - (0xFA12, 'M', '晴'), - (0xFA13, 'V'), - (0xFA15, 'M', '凞'), - (0xFA16, 'M', '猪'), - (0xFA17, 'M', '益'), - (0xFA18, 'M', '礼'), - (0xFA19, 'M', '神'), - (0xFA1A, 'M', '祥'), - (0xFA1B, 'M', '福'), - (0xFA1C, 'M', '靖'), - (0xFA1D, 'M', '精'), - (0xFA1E, 'M', '羽'), - (0xFA1F, 'V'), - (0xFA20, 'M', '蘒'), - (0xFA21, 'V'), - (0xFA22, 'M', '諸'), - (0xFA23, 'V'), - (0xFA25, 'M', '逸'), - (0xFA26, 'M', '都'), - (0xFA27, 'V'), - (0xFA2A, 'M', '飯'), - (0xFA2B, 'M', '飼'), - (0xFA2C, 'M', '館'), - (0xFA2D, 'M', '鶴'), - (0xFA2E, 'M', '郞'), - (0xFA2F, 'M', '隷'), - (0xFA30, 'M', '侮'), - (0xFA31, 'M', '僧'), - (0xFA32, 'M', '免'), - (0xFA33, 'M', '勉'), - (0xFA34, 'M', '勤'), - (0xFA35, 'M', '卑'), - (0xFA36, 'M', '喝'), - (0xFA37, 'M', '嘆'), - (0xFA38, 'M', '器'), - (0xFA39, 'M', '塀'), - (0xFA3A, 'M', '墨'), - (0xFA3B, 'M', '層'), - (0xFA3C, 'M', '屮'), - (0xFA3D, 'M', '悔'), - (0xFA3E, 'M', '慨'), - (0xFA3F, 'M', '憎'), - (0xFA40, 'M', '懲'), - (0xFA41, 'M', '敏'), - (0xFA42, 'M', '既'), - (0xFA43, 'M', '暑'), - (0xFA44, 'M', '梅'), - (0xFA45, 'M', '海'), - (0xFA46, 'M', '渚'), - (0xFA47, 'M', '漢'), - (0xFA48, 'M', '煮'), - (0xFA49, 'M', '爫'), - (0xFA4A, 'M', '琢'), - (0xFA4B, 'M', '碑'), - (0xFA4C, 'M', '社'), - (0xFA4D, 'M', '祉'), - (0xFA4E, 'M', '祈'), - (0xFA4F, 'M', '祐'), - (0xFA50, 'M', '祖'), - (0xFA51, 'M', '祝'), - (0xFA52, 'M', '禍'), - (0xFA53, 'M', '禎'), - (0xFA54, 'M', '穀'), - (0xFA55, 'M', '突'), - (0xFA56, 'M', '節'), - (0xFA57, 'M', '練'), - (0xFA58, 'M', '縉'), - (0xFA59, 'M', '繁'), - (0xFA5A, 'M', '署'), - (0xFA5B, 'M', '者'), - (0xFA5C, 'M', '臭'), - (0xFA5D, 'M', '艹'), - (0xFA5F, 'M', '著'), - (0xFA60, 'M', '褐'), - (0xFA61, 'M', '視'), - (0xFA62, 'M', '謁'), - (0xFA63, 'M', '謹'), - (0xFA64, 'M', '賓'), - (0xFA65, 'M', '贈'), - (0xFA66, 'M', '辶'), - (0xFA67, 'M', '逸'), - (0xFA68, 'M', '難'), - (0xFA69, 'M', '響'), - (0xFA6A, 'M', '頻'), - (0xFA6B, 'M', '恵'), - (0xFA6C, 'M', '𤋮'), - (0xFA6D, 'M', '舘'), - (0xFA6E, 'X'), - (0xFA70, 'M', '並'), - (0xFA71, 'M', '况'), - (0xFA72, 'M', '全'), - (0xFA73, 'M', '侀'), - (0xFA74, 'M', '充'), - (0xFA75, 'M', '冀'), - (0xFA76, 'M', '勇'), - (0xFA77, 'M', '勺'), - (0xFA78, 'M', '喝'), - ] - -def _seg_43() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFA79, 'M', '啕'), - (0xFA7A, 'M', '喙'), - (0xFA7B, 'M', '嗢'), - (0xFA7C, 'M', '塚'), - (0xFA7D, 'M', '墳'), - (0xFA7E, 'M', '奄'), - (0xFA7F, 'M', '奔'), - (0xFA80, 'M', '婢'), - (0xFA81, 'M', '嬨'), - (0xFA82, 'M', '廒'), - (0xFA83, 'M', '廙'), - (0xFA84, 'M', '彩'), - (0xFA85, 'M', '徭'), - (0xFA86, 'M', '惘'), - (0xFA87, 'M', '慎'), - (0xFA88, 'M', '愈'), - (0xFA89, 'M', '憎'), - (0xFA8A, 'M', '慠'), - (0xFA8B, 'M', '懲'), - (0xFA8C, 'M', '戴'), - (0xFA8D, 'M', '揄'), - (0xFA8E, 'M', '搜'), - (0xFA8F, 'M', '摒'), - (0xFA90, 'M', '敖'), - (0xFA91, 'M', '晴'), - (0xFA92, 'M', '朗'), - (0xFA93, 'M', '望'), - (0xFA94, 'M', '杖'), - (0xFA95, 'M', '歹'), - (0xFA96, 'M', '殺'), - (0xFA97, 'M', '流'), - (0xFA98, 'M', '滛'), - (0xFA99, 'M', '滋'), - (0xFA9A, 'M', '漢'), - (0xFA9B, 'M', '瀞'), - (0xFA9C, 'M', '煮'), - (0xFA9D, 'M', '瞧'), - (0xFA9E, 'M', '爵'), - (0xFA9F, 'M', '犯'), - (0xFAA0, 'M', '猪'), - (0xFAA1, 'M', '瑱'), - (0xFAA2, 'M', '甆'), - (0xFAA3, 'M', '画'), - (0xFAA4, 'M', '瘝'), - (0xFAA5, 'M', '瘟'), - (0xFAA6, 'M', '益'), - (0xFAA7, 'M', '盛'), - (0xFAA8, 'M', '直'), - (0xFAA9, 'M', '睊'), - (0xFAAA, 'M', '着'), - (0xFAAB, 'M', '磌'), - (0xFAAC, 'M', '窱'), - (0xFAAD, 'M', '節'), - (0xFAAE, 'M', '类'), - (0xFAAF, 'M', '絛'), - (0xFAB0, 'M', '練'), - (0xFAB1, 'M', '缾'), - (0xFAB2, 'M', '者'), - (0xFAB3, 'M', '荒'), - (0xFAB4, 'M', '華'), - (0xFAB5, 'M', '蝹'), - (0xFAB6, 'M', '襁'), - (0xFAB7, 'M', '覆'), - (0xFAB8, 'M', '視'), - (0xFAB9, 'M', '調'), - (0xFABA, 'M', '諸'), - (0xFABB, 'M', '請'), - (0xFABC, 'M', '謁'), - (0xFABD, 'M', '諾'), - (0xFABE, 'M', '諭'), - (0xFABF, 'M', '謹'), - (0xFAC0, 'M', '變'), - (0xFAC1, 'M', '贈'), - (0xFAC2, 'M', '輸'), - (0xFAC3, 'M', '遲'), - (0xFAC4, 'M', '醙'), - (0xFAC5, 'M', '鉶'), - (0xFAC6, 'M', '陼'), - (0xFAC7, 'M', '難'), - (0xFAC8, 'M', '靖'), - (0xFAC9, 'M', '韛'), - (0xFACA, 'M', '響'), - (0xFACB, 'M', '頋'), - (0xFACC, 'M', '頻'), - (0xFACD, 'M', '鬒'), - (0xFACE, 'M', '龜'), - (0xFACF, 'M', '𢡊'), - (0xFAD0, 'M', '𢡄'), - (0xFAD1, 'M', '𣏕'), - (0xFAD2, 'M', '㮝'), - (0xFAD3, 'M', '䀘'), - (0xFAD4, 'M', '䀹'), - (0xFAD5, 'M', '𥉉'), - (0xFAD6, 'M', '𥳐'), - (0xFAD7, 'M', '𧻓'), - (0xFAD8, 'M', '齃'), - (0xFAD9, 'M', '龎'), - (0xFADA, 'X'), - (0xFB00, 'M', 'ff'), - (0xFB01, 'M', 'fi'), - ] - -def _seg_44() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFB02, 'M', 'fl'), - (0xFB03, 'M', 'ffi'), - (0xFB04, 'M', 'ffl'), - (0xFB05, 'M', 'st'), - (0xFB07, 'X'), - (0xFB13, 'M', 'մն'), - (0xFB14, 'M', 'մե'), - (0xFB15, 'M', 'մի'), - (0xFB16, 'M', 'վն'), - (0xFB17, 'M', 'մխ'), - (0xFB18, 'X'), - (0xFB1D, 'M', 'יִ'), - (0xFB1E, 'V'), - (0xFB1F, 'M', 'ײַ'), - (0xFB20, 'M', 'ע'), - (0xFB21, 'M', 'א'), - (0xFB22, 'M', 'ד'), - (0xFB23, 'M', 'ה'), - (0xFB24, 'M', 'כ'), - (0xFB25, 'M', 'ל'), - (0xFB26, 'M', 'ם'), - (0xFB27, 'M', 'ר'), - (0xFB28, 'M', 'ת'), - (0xFB29, '3', '+'), - (0xFB2A, 'M', 'שׁ'), - (0xFB2B, 'M', 'שׂ'), - (0xFB2C, 'M', 'שּׁ'), - (0xFB2D, 'M', 'שּׂ'), - (0xFB2E, 'M', 'אַ'), - (0xFB2F, 'M', 'אָ'), - (0xFB30, 'M', 'אּ'), - (0xFB31, 'M', 'בּ'), - (0xFB32, 'M', 'גּ'), - (0xFB33, 'M', 'דּ'), - (0xFB34, 'M', 'הּ'), - (0xFB35, 'M', 'וּ'), - (0xFB36, 'M', 'זּ'), - (0xFB37, 'X'), - (0xFB38, 'M', 'טּ'), - (0xFB39, 'M', 'יּ'), - (0xFB3A, 'M', 'ךּ'), - (0xFB3B, 'M', 'כּ'), - (0xFB3C, 'M', 'לּ'), - (0xFB3D, 'X'), - (0xFB3E, 'M', 'מּ'), - (0xFB3F, 'X'), - (0xFB40, 'M', 'נּ'), - (0xFB41, 'M', 'סּ'), - (0xFB42, 'X'), - (0xFB43, 'M', 'ףּ'), - (0xFB44, 'M', 'פּ'), - (0xFB45, 'X'), - (0xFB46, 'M', 'צּ'), - (0xFB47, 'M', 'קּ'), - (0xFB48, 'M', 'רּ'), - (0xFB49, 'M', 'שּ'), - (0xFB4A, 'M', 'תּ'), - (0xFB4B, 'M', 'וֹ'), - (0xFB4C, 'M', 'בֿ'), - (0xFB4D, 'M', 'כֿ'), - (0xFB4E, 'M', 'פֿ'), - (0xFB4F, 'M', 'אל'), - (0xFB50, 'M', 'ٱ'), - (0xFB52, 'M', 'ٻ'), - (0xFB56, 'M', 'پ'), - (0xFB5A, 'M', 'ڀ'), - (0xFB5E, 'M', 'ٺ'), - (0xFB62, 'M', 'ٿ'), - (0xFB66, 'M', 'ٹ'), - (0xFB6A, 'M', 'ڤ'), - (0xFB6E, 'M', 'ڦ'), - (0xFB72, 'M', 'ڄ'), - (0xFB76, 'M', 'ڃ'), - (0xFB7A, 'M', 'چ'), - (0xFB7E, 'M', 'ڇ'), - (0xFB82, 'M', 'ڍ'), - (0xFB84, 'M', 'ڌ'), - (0xFB86, 'M', 'ڎ'), - (0xFB88, 'M', 'ڈ'), - (0xFB8A, 'M', 'ژ'), - (0xFB8C, 'M', 'ڑ'), - (0xFB8E, 'M', 'ک'), - (0xFB92, 'M', 'گ'), - (0xFB96, 'M', 'ڳ'), - (0xFB9A, 'M', 'ڱ'), - (0xFB9E, 'M', 'ں'), - (0xFBA0, 'M', 'ڻ'), - (0xFBA4, 'M', 'ۀ'), - (0xFBA6, 'M', 'ہ'), - (0xFBAA, 'M', 'ھ'), - (0xFBAE, 'M', 'ے'), - (0xFBB0, 'M', 'ۓ'), - (0xFBB2, 'V'), - (0xFBC3, 'X'), - (0xFBD3, 'M', 'ڭ'), - (0xFBD7, 'M', 'ۇ'), - (0xFBD9, 'M', 'ۆ'), - (0xFBDB, 'M', 'ۈ'), - (0xFBDD, 'M', 'ۇٴ'), - (0xFBDE, 'M', 'ۋ'), - ] - -def _seg_45() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFBE0, 'M', 'ۅ'), - (0xFBE2, 'M', 'ۉ'), - (0xFBE4, 'M', 'ې'), - (0xFBE8, 'M', 'ى'), - (0xFBEA, 'M', 'ئا'), - (0xFBEC, 'M', 'ئە'), - (0xFBEE, 'M', 'ئو'), - (0xFBF0, 'M', 'ئۇ'), - (0xFBF2, 'M', 'ئۆ'), - (0xFBF4, 'M', 'ئۈ'), - (0xFBF6, 'M', 'ئې'), - (0xFBF9, 'M', 'ئى'), - (0xFBFC, 'M', 'ی'), - (0xFC00, 'M', 'ئج'), - (0xFC01, 'M', 'ئح'), - (0xFC02, 'M', 'ئم'), - (0xFC03, 'M', 'ئى'), - (0xFC04, 'M', 'ئي'), - (0xFC05, 'M', 'بج'), - (0xFC06, 'M', 'بح'), - (0xFC07, 'M', 'بخ'), - (0xFC08, 'M', 'بم'), - (0xFC09, 'M', 'بى'), - (0xFC0A, 'M', 'بي'), - (0xFC0B, 'M', 'تج'), - (0xFC0C, 'M', 'تح'), - (0xFC0D, 'M', 'تخ'), - (0xFC0E, 'M', 'تم'), - (0xFC0F, 'M', 'تى'), - (0xFC10, 'M', 'تي'), - (0xFC11, 'M', 'ثج'), - (0xFC12, 'M', 'ثم'), - (0xFC13, 'M', 'ثى'), - (0xFC14, 'M', 'ثي'), - (0xFC15, 'M', 'جح'), - (0xFC16, 'M', 'جم'), - (0xFC17, 'M', 'حج'), - (0xFC18, 'M', 'حم'), - (0xFC19, 'M', 'خج'), - (0xFC1A, 'M', 'خح'), - (0xFC1B, 'M', 'خم'), - (0xFC1C, 'M', 'سج'), - (0xFC1D, 'M', 'سح'), - (0xFC1E, 'M', 'سخ'), - (0xFC1F, 'M', 'سم'), - (0xFC20, 'M', 'صح'), - (0xFC21, 'M', 'صم'), - (0xFC22, 'M', 'ضج'), - (0xFC23, 'M', 'ضح'), - (0xFC24, 'M', 'ضخ'), - (0xFC25, 'M', 'ضم'), - (0xFC26, 'M', 'طح'), - (0xFC27, 'M', 'طم'), - (0xFC28, 'M', 'ظم'), - (0xFC29, 'M', 'عج'), - (0xFC2A, 'M', 'عم'), - (0xFC2B, 'M', 'غج'), - (0xFC2C, 'M', 'غم'), - (0xFC2D, 'M', 'فج'), - (0xFC2E, 'M', 'فح'), - (0xFC2F, 'M', 'فخ'), - (0xFC30, 'M', 'فم'), - (0xFC31, 'M', 'فى'), - (0xFC32, 'M', 'في'), - (0xFC33, 'M', 'قح'), - (0xFC34, 'M', 'قم'), - (0xFC35, 'M', 'قى'), - (0xFC36, 'M', 'قي'), - (0xFC37, 'M', 'كا'), - (0xFC38, 'M', 'كج'), - (0xFC39, 'M', 'كح'), - (0xFC3A, 'M', 'كخ'), - (0xFC3B, 'M', 'كل'), - (0xFC3C, 'M', 'كم'), - (0xFC3D, 'M', 'كى'), - (0xFC3E, 'M', 'كي'), - (0xFC3F, 'M', 'لج'), - (0xFC40, 'M', 'لح'), - (0xFC41, 'M', 'لخ'), - (0xFC42, 'M', 'لم'), - (0xFC43, 'M', 'لى'), - (0xFC44, 'M', 'لي'), - (0xFC45, 'M', 'مج'), - (0xFC46, 'M', 'مح'), - (0xFC47, 'M', 'مخ'), - (0xFC48, 'M', 'مم'), - (0xFC49, 'M', 'مى'), - (0xFC4A, 'M', 'مي'), - (0xFC4B, 'M', 'نج'), - (0xFC4C, 'M', 'نح'), - (0xFC4D, 'M', 'نخ'), - (0xFC4E, 'M', 'نم'), - (0xFC4F, 'M', 'نى'), - (0xFC50, 'M', 'ني'), - (0xFC51, 'M', 'هج'), - (0xFC52, 'M', 'هم'), - (0xFC53, 'M', 'هى'), - (0xFC54, 'M', 'هي'), - (0xFC55, 'M', 'يج'), - (0xFC56, 'M', 'يح'), - ] - -def _seg_46() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFC57, 'M', 'يخ'), - (0xFC58, 'M', 'يم'), - (0xFC59, 'M', 'يى'), - (0xFC5A, 'M', 'يي'), - (0xFC5B, 'M', 'ذٰ'), - (0xFC5C, 'M', 'رٰ'), - (0xFC5D, 'M', 'ىٰ'), - (0xFC5E, '3', ' ٌّ'), - (0xFC5F, '3', ' ٍّ'), - (0xFC60, '3', ' َّ'), - (0xFC61, '3', ' ُّ'), - (0xFC62, '3', ' ِّ'), - (0xFC63, '3', ' ّٰ'), - (0xFC64, 'M', 'ئر'), - (0xFC65, 'M', 'ئز'), - (0xFC66, 'M', 'ئم'), - (0xFC67, 'M', 'ئن'), - (0xFC68, 'M', 'ئى'), - (0xFC69, 'M', 'ئي'), - (0xFC6A, 'M', 'بر'), - (0xFC6B, 'M', 'بز'), - (0xFC6C, 'M', 'بم'), - (0xFC6D, 'M', 'بن'), - (0xFC6E, 'M', 'بى'), - (0xFC6F, 'M', 'بي'), - (0xFC70, 'M', 'تر'), - (0xFC71, 'M', 'تز'), - (0xFC72, 'M', 'تم'), - (0xFC73, 'M', 'تن'), - (0xFC74, 'M', 'تى'), - (0xFC75, 'M', 'تي'), - (0xFC76, 'M', 'ثر'), - (0xFC77, 'M', 'ثز'), - (0xFC78, 'M', 'ثم'), - (0xFC79, 'M', 'ثن'), - (0xFC7A, 'M', 'ثى'), - (0xFC7B, 'M', 'ثي'), - (0xFC7C, 'M', 'فى'), - (0xFC7D, 'M', 'في'), - (0xFC7E, 'M', 'قى'), - (0xFC7F, 'M', 'قي'), - (0xFC80, 'M', 'كا'), - (0xFC81, 'M', 'كل'), - (0xFC82, 'M', 'كم'), - (0xFC83, 'M', 'كى'), - (0xFC84, 'M', 'كي'), - (0xFC85, 'M', 'لم'), - (0xFC86, 'M', 'لى'), - (0xFC87, 'M', 'لي'), - (0xFC88, 'M', 'ما'), - (0xFC89, 'M', 'مم'), - (0xFC8A, 'M', 'نر'), - (0xFC8B, 'M', 'نز'), - (0xFC8C, 'M', 'نم'), - (0xFC8D, 'M', 'نن'), - (0xFC8E, 'M', 'نى'), - (0xFC8F, 'M', 'ني'), - (0xFC90, 'M', 'ىٰ'), - (0xFC91, 'M', 'ير'), - (0xFC92, 'M', 'يز'), - (0xFC93, 'M', 'يم'), - (0xFC94, 'M', 'ين'), - (0xFC95, 'M', 'يى'), - (0xFC96, 'M', 'يي'), - (0xFC97, 'M', 'ئج'), - (0xFC98, 'M', 'ئح'), - (0xFC99, 'M', 'ئخ'), - (0xFC9A, 'M', 'ئم'), - (0xFC9B, 'M', 'ئه'), - (0xFC9C, 'M', 'بج'), - (0xFC9D, 'M', 'بح'), - (0xFC9E, 'M', 'بخ'), - (0xFC9F, 'M', 'بم'), - (0xFCA0, 'M', 'به'), - (0xFCA1, 'M', 'تج'), - (0xFCA2, 'M', 'تح'), - (0xFCA3, 'M', 'تخ'), - (0xFCA4, 'M', 'تم'), - (0xFCA5, 'M', 'ته'), - (0xFCA6, 'M', 'ثم'), - (0xFCA7, 'M', 'جح'), - (0xFCA8, 'M', 'جم'), - (0xFCA9, 'M', 'حج'), - (0xFCAA, 'M', 'حم'), - (0xFCAB, 'M', 'خج'), - (0xFCAC, 'M', 'خم'), - (0xFCAD, 'M', 'سج'), - (0xFCAE, 'M', 'سح'), - (0xFCAF, 'M', 'سخ'), - (0xFCB0, 'M', 'سم'), - (0xFCB1, 'M', 'صح'), - (0xFCB2, 'M', 'صخ'), - (0xFCB3, 'M', 'صم'), - (0xFCB4, 'M', 'ضج'), - (0xFCB5, 'M', 'ضح'), - (0xFCB6, 'M', 'ضخ'), - (0xFCB7, 'M', 'ضم'), - (0xFCB8, 'M', 'طح'), - (0xFCB9, 'M', 'ظم'), - (0xFCBA, 'M', 'عج'), - ] - -def _seg_47() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFCBB, 'M', 'عم'), - (0xFCBC, 'M', 'غج'), - (0xFCBD, 'M', 'غم'), - (0xFCBE, 'M', 'فج'), - (0xFCBF, 'M', 'فح'), - (0xFCC0, 'M', 'فخ'), - (0xFCC1, 'M', 'فم'), - (0xFCC2, 'M', 'قح'), - (0xFCC3, 'M', 'قم'), - (0xFCC4, 'M', 'كج'), - (0xFCC5, 'M', 'كح'), - (0xFCC6, 'M', 'كخ'), - (0xFCC7, 'M', 'كل'), - (0xFCC8, 'M', 'كم'), - (0xFCC9, 'M', 'لج'), - (0xFCCA, 'M', 'لح'), - (0xFCCB, 'M', 'لخ'), - (0xFCCC, 'M', 'لم'), - (0xFCCD, 'M', 'له'), - (0xFCCE, 'M', 'مج'), - (0xFCCF, 'M', 'مح'), - (0xFCD0, 'M', 'مخ'), - (0xFCD1, 'M', 'مم'), - (0xFCD2, 'M', 'نج'), - (0xFCD3, 'M', 'نح'), - (0xFCD4, 'M', 'نخ'), - (0xFCD5, 'M', 'نم'), - (0xFCD6, 'M', 'نه'), - (0xFCD7, 'M', 'هج'), - (0xFCD8, 'M', 'هم'), - (0xFCD9, 'M', 'هٰ'), - (0xFCDA, 'M', 'يج'), - (0xFCDB, 'M', 'يح'), - (0xFCDC, 'M', 'يخ'), - (0xFCDD, 'M', 'يم'), - (0xFCDE, 'M', 'يه'), - (0xFCDF, 'M', 'ئم'), - (0xFCE0, 'M', 'ئه'), - (0xFCE1, 'M', 'بم'), - (0xFCE2, 'M', 'به'), - (0xFCE3, 'M', 'تم'), - (0xFCE4, 'M', 'ته'), - (0xFCE5, 'M', 'ثم'), - (0xFCE6, 'M', 'ثه'), - (0xFCE7, 'M', 'سم'), - (0xFCE8, 'M', 'سه'), - (0xFCE9, 'M', 'شم'), - (0xFCEA, 'M', 'شه'), - (0xFCEB, 'M', 'كل'), - (0xFCEC, 'M', 'كم'), - (0xFCED, 'M', 'لم'), - (0xFCEE, 'M', 'نم'), - (0xFCEF, 'M', 'نه'), - (0xFCF0, 'M', 'يم'), - (0xFCF1, 'M', 'يه'), - (0xFCF2, 'M', 'ـَّ'), - (0xFCF3, 'M', 'ـُّ'), - (0xFCF4, 'M', 'ـِّ'), - (0xFCF5, 'M', 'طى'), - (0xFCF6, 'M', 'طي'), - (0xFCF7, 'M', 'عى'), - (0xFCF8, 'M', 'عي'), - (0xFCF9, 'M', 'غى'), - (0xFCFA, 'M', 'غي'), - (0xFCFB, 'M', 'سى'), - (0xFCFC, 'M', 'سي'), - (0xFCFD, 'M', 'شى'), - (0xFCFE, 'M', 'شي'), - (0xFCFF, 'M', 'حى'), - (0xFD00, 'M', 'حي'), - (0xFD01, 'M', 'جى'), - (0xFD02, 'M', 'جي'), - (0xFD03, 'M', 'خى'), - (0xFD04, 'M', 'خي'), - (0xFD05, 'M', 'صى'), - (0xFD06, 'M', 'صي'), - (0xFD07, 'M', 'ضى'), - (0xFD08, 'M', 'ضي'), - (0xFD09, 'M', 'شج'), - (0xFD0A, 'M', 'شح'), - (0xFD0B, 'M', 'شخ'), - (0xFD0C, 'M', 'شم'), - (0xFD0D, 'M', 'شر'), - (0xFD0E, 'M', 'سر'), - (0xFD0F, 'M', 'صر'), - (0xFD10, 'M', 'ضر'), - (0xFD11, 'M', 'طى'), - (0xFD12, 'M', 'طي'), - (0xFD13, 'M', 'عى'), - (0xFD14, 'M', 'عي'), - (0xFD15, 'M', 'غى'), - (0xFD16, 'M', 'غي'), - (0xFD17, 'M', 'سى'), - (0xFD18, 'M', 'سي'), - (0xFD19, 'M', 'شى'), - (0xFD1A, 'M', 'شي'), - (0xFD1B, 'M', 'حى'), - (0xFD1C, 'M', 'حي'), - (0xFD1D, 'M', 'جى'), - (0xFD1E, 'M', 'جي'), - ] - -def _seg_48() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFD1F, 'M', 'خى'), - (0xFD20, 'M', 'خي'), - (0xFD21, 'M', 'صى'), - (0xFD22, 'M', 'صي'), - (0xFD23, 'M', 'ضى'), - (0xFD24, 'M', 'ضي'), - (0xFD25, 'M', 'شج'), - (0xFD26, 'M', 'شح'), - (0xFD27, 'M', 'شخ'), - (0xFD28, 'M', 'شم'), - (0xFD29, 'M', 'شر'), - (0xFD2A, 'M', 'سر'), - (0xFD2B, 'M', 'صر'), - (0xFD2C, 'M', 'ضر'), - (0xFD2D, 'M', 'شج'), - (0xFD2E, 'M', 'شح'), - (0xFD2F, 'M', 'شخ'), - (0xFD30, 'M', 'شم'), - (0xFD31, 'M', 'سه'), - (0xFD32, 'M', 'شه'), - (0xFD33, 'M', 'طم'), - (0xFD34, 'M', 'سج'), - (0xFD35, 'M', 'سح'), - (0xFD36, 'M', 'سخ'), - (0xFD37, 'M', 'شج'), - (0xFD38, 'M', 'شح'), - (0xFD39, 'M', 'شخ'), - (0xFD3A, 'M', 'طم'), - (0xFD3B, 'M', 'ظم'), - (0xFD3C, 'M', 'اً'), - (0xFD3E, 'V'), - (0xFD50, 'M', 'تجم'), - (0xFD51, 'M', 'تحج'), - (0xFD53, 'M', 'تحم'), - (0xFD54, 'M', 'تخم'), - (0xFD55, 'M', 'تمج'), - (0xFD56, 'M', 'تمح'), - (0xFD57, 'M', 'تمخ'), - (0xFD58, 'M', 'جمح'), - (0xFD5A, 'M', 'حمي'), - (0xFD5B, 'M', 'حمى'), - (0xFD5C, 'M', 'سحج'), - (0xFD5D, 'M', 'سجح'), - (0xFD5E, 'M', 'سجى'), - (0xFD5F, 'M', 'سمح'), - (0xFD61, 'M', 'سمج'), - (0xFD62, 'M', 'سمم'), - (0xFD64, 'M', 'صحح'), - (0xFD66, 'M', 'صمم'), - (0xFD67, 'M', 'شحم'), - (0xFD69, 'M', 'شجي'), - (0xFD6A, 'M', 'شمخ'), - (0xFD6C, 'M', 'شمم'), - (0xFD6E, 'M', 'ضحى'), - (0xFD6F, 'M', 'ضخم'), - (0xFD71, 'M', 'طمح'), - (0xFD73, 'M', 'طمم'), - (0xFD74, 'M', 'طمي'), - (0xFD75, 'M', 'عجم'), - (0xFD76, 'M', 'عمم'), - (0xFD78, 'M', 'عمى'), - (0xFD79, 'M', 'غمم'), - (0xFD7A, 'M', 'غمي'), - (0xFD7B, 'M', 'غمى'), - (0xFD7C, 'M', 'فخم'), - (0xFD7E, 'M', 'قمح'), - (0xFD7F, 'M', 'قمم'), - (0xFD80, 'M', 'لحم'), - (0xFD81, 'M', 'لحي'), - (0xFD82, 'M', 'لحى'), - (0xFD83, 'M', 'لجج'), - (0xFD85, 'M', 'لخم'), - (0xFD87, 'M', 'لمح'), - (0xFD89, 'M', 'محج'), - (0xFD8A, 'M', 'محم'), - (0xFD8B, 'M', 'محي'), - (0xFD8C, 'M', 'مجح'), - (0xFD8D, 'M', 'مجم'), - (0xFD8E, 'M', 'مخج'), - (0xFD8F, 'M', 'مخم'), - (0xFD90, 'X'), - (0xFD92, 'M', 'مجخ'), - (0xFD93, 'M', 'همج'), - (0xFD94, 'M', 'همم'), - (0xFD95, 'M', 'نحم'), - (0xFD96, 'M', 'نحى'), - (0xFD97, 'M', 'نجم'), - (0xFD99, 'M', 'نجى'), - (0xFD9A, 'M', 'نمي'), - (0xFD9B, 'M', 'نمى'), - (0xFD9C, 'M', 'يمم'), - (0xFD9E, 'M', 'بخي'), - (0xFD9F, 'M', 'تجي'), - (0xFDA0, 'M', 'تجى'), - (0xFDA1, 'M', 'تخي'), - (0xFDA2, 'M', 'تخى'), - (0xFDA3, 'M', 'تمي'), - (0xFDA4, 'M', 'تمى'), - (0xFDA5, 'M', 'جمي'), - (0xFDA6, 'M', 'جحى'), - ] - -def _seg_49() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFDA7, 'M', 'جمى'), - (0xFDA8, 'M', 'سخى'), - (0xFDA9, 'M', 'صحي'), - (0xFDAA, 'M', 'شحي'), - (0xFDAB, 'M', 'ضحي'), - (0xFDAC, 'M', 'لجي'), - (0xFDAD, 'M', 'لمي'), - (0xFDAE, 'M', 'يحي'), - (0xFDAF, 'M', 'يجي'), - (0xFDB0, 'M', 'يمي'), - (0xFDB1, 'M', 'ممي'), - (0xFDB2, 'M', 'قمي'), - (0xFDB3, 'M', 'نحي'), - (0xFDB4, 'M', 'قمح'), - (0xFDB5, 'M', 'لحم'), - (0xFDB6, 'M', 'عمي'), - (0xFDB7, 'M', 'كمي'), - (0xFDB8, 'M', 'نجح'), - (0xFDB9, 'M', 'مخي'), - (0xFDBA, 'M', 'لجم'), - (0xFDBB, 'M', 'كمم'), - (0xFDBC, 'M', 'لجم'), - (0xFDBD, 'M', 'نجح'), - (0xFDBE, 'M', 'جحي'), - (0xFDBF, 'M', 'حجي'), - (0xFDC0, 'M', 'مجي'), - (0xFDC1, 'M', 'فمي'), - (0xFDC2, 'M', 'بحي'), - (0xFDC3, 'M', 'كمم'), - (0xFDC4, 'M', 'عجم'), - (0xFDC5, 'M', 'صمم'), - (0xFDC6, 'M', 'سخي'), - (0xFDC7, 'M', 'نجي'), - (0xFDC8, 'X'), - (0xFDCF, 'V'), - (0xFDD0, 'X'), - (0xFDF0, 'M', 'صلے'), - (0xFDF1, 'M', 'قلے'), - (0xFDF2, 'M', 'الله'), - (0xFDF3, 'M', 'اكبر'), - (0xFDF4, 'M', 'محمد'), - (0xFDF5, 'M', 'صلعم'), - (0xFDF6, 'M', 'رسول'), - (0xFDF7, 'M', 'عليه'), - (0xFDF8, 'M', 'وسلم'), - (0xFDF9, 'M', 'صلى'), - (0xFDFA, '3', 'صلى الله عليه وسلم'), - (0xFDFB, '3', 'جل جلاله'), - (0xFDFC, 'M', 'ریال'), - (0xFDFD, 'V'), - (0xFE00, 'I'), - (0xFE10, '3', ','), - (0xFE11, 'M', '、'), - (0xFE12, 'X'), - (0xFE13, '3', ':'), - (0xFE14, '3', ';'), - (0xFE15, '3', '!'), - (0xFE16, '3', '?'), - (0xFE17, 'M', '〖'), - (0xFE18, 'M', '〗'), - (0xFE19, 'X'), - (0xFE20, 'V'), - (0xFE30, 'X'), - (0xFE31, 'M', '—'), - (0xFE32, 'M', '–'), - (0xFE33, '3', '_'), - (0xFE35, '3', '('), - (0xFE36, '3', ')'), - (0xFE37, '3', '{'), - (0xFE38, '3', '}'), - (0xFE39, 'M', '〔'), - (0xFE3A, 'M', '〕'), - (0xFE3B, 'M', '【'), - (0xFE3C, 'M', '】'), - (0xFE3D, 'M', '《'), - (0xFE3E, 'M', '》'), - (0xFE3F, 'M', '〈'), - (0xFE40, 'M', '〉'), - (0xFE41, 'M', '「'), - (0xFE42, 'M', '」'), - (0xFE43, 'M', '『'), - (0xFE44, 'M', '』'), - (0xFE45, 'V'), - (0xFE47, '3', '['), - (0xFE48, '3', ']'), - (0xFE49, '3', ' ̅'), - (0xFE4D, '3', '_'), - (0xFE50, '3', ','), - (0xFE51, 'M', '、'), - (0xFE52, 'X'), - (0xFE54, '3', ';'), - (0xFE55, '3', ':'), - (0xFE56, '3', '?'), - (0xFE57, '3', '!'), - (0xFE58, 'M', '—'), - (0xFE59, '3', '('), - (0xFE5A, '3', ')'), - (0xFE5B, '3', '{'), - (0xFE5C, '3', '}'), - (0xFE5D, 'M', '〔'), - ] - -def _seg_50() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFE5E, 'M', '〕'), - (0xFE5F, '3', '#'), - (0xFE60, '3', '&'), - (0xFE61, '3', '*'), - (0xFE62, '3', '+'), - (0xFE63, 'M', '-'), - (0xFE64, '3', '<'), - (0xFE65, '3', '>'), - (0xFE66, '3', '='), - (0xFE67, 'X'), - (0xFE68, '3', '\\'), - (0xFE69, '3', '$'), - (0xFE6A, '3', '%'), - (0xFE6B, '3', '@'), - (0xFE6C, 'X'), - (0xFE70, '3', ' ً'), - (0xFE71, 'M', 'ـً'), - (0xFE72, '3', ' ٌ'), - (0xFE73, 'V'), - (0xFE74, '3', ' ٍ'), - (0xFE75, 'X'), - (0xFE76, '3', ' َ'), - (0xFE77, 'M', 'ـَ'), - (0xFE78, '3', ' ُ'), - (0xFE79, 'M', 'ـُ'), - (0xFE7A, '3', ' ِ'), - (0xFE7B, 'M', 'ـِ'), - (0xFE7C, '3', ' ّ'), - (0xFE7D, 'M', 'ـّ'), - (0xFE7E, '3', ' ْ'), - (0xFE7F, 'M', 'ـْ'), - (0xFE80, 'M', 'ء'), - (0xFE81, 'M', 'آ'), - (0xFE83, 'M', 'أ'), - (0xFE85, 'M', 'ؤ'), - (0xFE87, 'M', 'إ'), - (0xFE89, 'M', 'ئ'), - (0xFE8D, 'M', 'ا'), - (0xFE8F, 'M', 'ب'), - (0xFE93, 'M', 'ة'), - (0xFE95, 'M', 'ت'), - (0xFE99, 'M', 'ث'), - (0xFE9D, 'M', 'ج'), - (0xFEA1, 'M', 'ح'), - (0xFEA5, 'M', 'خ'), - (0xFEA9, 'M', 'د'), - (0xFEAB, 'M', 'ذ'), - (0xFEAD, 'M', 'ر'), - (0xFEAF, 'M', 'ز'), - (0xFEB1, 'M', 'س'), - (0xFEB5, 'M', 'ش'), - (0xFEB9, 'M', 'ص'), - (0xFEBD, 'M', 'ض'), - (0xFEC1, 'M', 'ط'), - (0xFEC5, 'M', 'ظ'), - (0xFEC9, 'M', 'ع'), - (0xFECD, 'M', 'غ'), - (0xFED1, 'M', 'ف'), - (0xFED5, 'M', 'ق'), - (0xFED9, 'M', 'ك'), - (0xFEDD, 'M', 'ل'), - (0xFEE1, 'M', 'م'), - (0xFEE5, 'M', 'ن'), - (0xFEE9, 'M', 'ه'), - (0xFEED, 'M', 'و'), - (0xFEEF, 'M', 'ى'), - (0xFEF1, 'M', 'ي'), - (0xFEF5, 'M', 'لآ'), - (0xFEF7, 'M', 'لأ'), - (0xFEF9, 'M', 'لإ'), - (0xFEFB, 'M', 'لا'), - (0xFEFD, 'X'), - (0xFEFF, 'I'), - (0xFF00, 'X'), - (0xFF01, '3', '!'), - (0xFF02, '3', '"'), - (0xFF03, '3', '#'), - (0xFF04, '3', '$'), - (0xFF05, '3', '%'), - (0xFF06, '3', '&'), - (0xFF07, '3', '\''), - (0xFF08, '3', '('), - (0xFF09, '3', ')'), - (0xFF0A, '3', '*'), - (0xFF0B, '3', '+'), - (0xFF0C, '3', ','), - (0xFF0D, 'M', '-'), - (0xFF0E, 'M', '.'), - (0xFF0F, '3', '/'), - (0xFF10, 'M', '0'), - (0xFF11, 'M', '1'), - (0xFF12, 'M', '2'), - (0xFF13, 'M', '3'), - (0xFF14, 'M', '4'), - (0xFF15, 'M', '5'), - (0xFF16, 'M', '6'), - (0xFF17, 'M', '7'), - (0xFF18, 'M', '8'), - (0xFF19, 'M', '9'), - (0xFF1A, '3', ':'), - ] - -def _seg_51() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFF1B, '3', ';'), - (0xFF1C, '3', '<'), - (0xFF1D, '3', '='), - (0xFF1E, '3', '>'), - (0xFF1F, '3', '?'), - (0xFF20, '3', '@'), - (0xFF21, 'M', 'a'), - (0xFF22, 'M', 'b'), - (0xFF23, 'M', 'c'), - (0xFF24, 'M', 'd'), - (0xFF25, 'M', 'e'), - (0xFF26, 'M', 'f'), - (0xFF27, 'M', 'g'), - (0xFF28, 'M', 'h'), - (0xFF29, 'M', 'i'), - (0xFF2A, 'M', 'j'), - (0xFF2B, 'M', 'k'), - (0xFF2C, 'M', 'l'), - (0xFF2D, 'M', 'm'), - (0xFF2E, 'M', 'n'), - (0xFF2F, 'M', 'o'), - (0xFF30, 'M', 'p'), - (0xFF31, 'M', 'q'), - (0xFF32, 'M', 'r'), - (0xFF33, 'M', 's'), - (0xFF34, 'M', 't'), - (0xFF35, 'M', 'u'), - (0xFF36, 'M', 'v'), - (0xFF37, 'M', 'w'), - (0xFF38, 'M', 'x'), - (0xFF39, 'M', 'y'), - (0xFF3A, 'M', 'z'), - (0xFF3B, '3', '['), - (0xFF3C, '3', '\\'), - (0xFF3D, '3', ']'), - (0xFF3E, '3', '^'), - (0xFF3F, '3', '_'), - (0xFF40, '3', '`'), - (0xFF41, 'M', 'a'), - (0xFF42, 'M', 'b'), - (0xFF43, 'M', 'c'), - (0xFF44, 'M', 'd'), - (0xFF45, 'M', 'e'), - (0xFF46, 'M', 'f'), - (0xFF47, 'M', 'g'), - (0xFF48, 'M', 'h'), - (0xFF49, 'M', 'i'), - (0xFF4A, 'M', 'j'), - (0xFF4B, 'M', 'k'), - (0xFF4C, 'M', 'l'), - (0xFF4D, 'M', 'm'), - (0xFF4E, 'M', 'n'), - (0xFF4F, 'M', 'o'), - (0xFF50, 'M', 'p'), - (0xFF51, 'M', 'q'), - (0xFF52, 'M', 'r'), - (0xFF53, 'M', 's'), - (0xFF54, 'M', 't'), - (0xFF55, 'M', 'u'), - (0xFF56, 'M', 'v'), - (0xFF57, 'M', 'w'), - (0xFF58, 'M', 'x'), - (0xFF59, 'M', 'y'), - (0xFF5A, 'M', 'z'), - (0xFF5B, '3', '{'), - (0xFF5C, '3', '|'), - (0xFF5D, '3', '}'), - (0xFF5E, '3', '~'), - (0xFF5F, 'M', '⦅'), - (0xFF60, 'M', '⦆'), - (0xFF61, 'M', '.'), - (0xFF62, 'M', '「'), - (0xFF63, 'M', '」'), - (0xFF64, 'M', '、'), - (0xFF65, 'M', '・'), - (0xFF66, 'M', 'ヲ'), - (0xFF67, 'M', 'ァ'), - (0xFF68, 'M', 'ィ'), - (0xFF69, 'M', 'ゥ'), - (0xFF6A, 'M', 'ェ'), - (0xFF6B, 'M', 'ォ'), - (0xFF6C, 'M', 'ャ'), - (0xFF6D, 'M', 'ュ'), - (0xFF6E, 'M', 'ョ'), - (0xFF6F, 'M', 'ッ'), - (0xFF70, 'M', 'ー'), - (0xFF71, 'M', 'ア'), - (0xFF72, 'M', 'イ'), - (0xFF73, 'M', 'ウ'), - (0xFF74, 'M', 'エ'), - (0xFF75, 'M', 'オ'), - (0xFF76, 'M', 'カ'), - (0xFF77, 'M', 'キ'), - (0xFF78, 'M', 'ク'), - (0xFF79, 'M', 'ケ'), - (0xFF7A, 'M', 'コ'), - (0xFF7B, 'M', 'サ'), - (0xFF7C, 'M', 'シ'), - (0xFF7D, 'M', 'ス'), - (0xFF7E, 'M', 'セ'), - ] - -def _seg_52() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFF7F, 'M', 'ソ'), - (0xFF80, 'M', 'タ'), - (0xFF81, 'M', 'チ'), - (0xFF82, 'M', 'ツ'), - (0xFF83, 'M', 'テ'), - (0xFF84, 'M', 'ト'), - (0xFF85, 'M', 'ナ'), - (0xFF86, 'M', 'ニ'), - (0xFF87, 'M', 'ヌ'), - (0xFF88, 'M', 'ネ'), - (0xFF89, 'M', 'ノ'), - (0xFF8A, 'M', 'ハ'), - (0xFF8B, 'M', 'ヒ'), - (0xFF8C, 'M', 'フ'), - (0xFF8D, 'M', 'ヘ'), - (0xFF8E, 'M', 'ホ'), - (0xFF8F, 'M', 'マ'), - (0xFF90, 'M', 'ミ'), - (0xFF91, 'M', 'ム'), - (0xFF92, 'M', 'メ'), - (0xFF93, 'M', 'モ'), - (0xFF94, 'M', 'ヤ'), - (0xFF95, 'M', 'ユ'), - (0xFF96, 'M', 'ヨ'), - (0xFF97, 'M', 'ラ'), - (0xFF98, 'M', 'リ'), - (0xFF99, 'M', 'ル'), - (0xFF9A, 'M', 'レ'), - (0xFF9B, 'M', 'ロ'), - (0xFF9C, 'M', 'ワ'), - (0xFF9D, 'M', 'ン'), - (0xFF9E, 'M', '゙'), - (0xFF9F, 'M', '゚'), - (0xFFA0, 'X'), - (0xFFA1, 'M', 'ᄀ'), - (0xFFA2, 'M', 'ᄁ'), - (0xFFA3, 'M', 'ᆪ'), - (0xFFA4, 'M', 'ᄂ'), - (0xFFA5, 'M', 'ᆬ'), - (0xFFA6, 'M', 'ᆭ'), - (0xFFA7, 'M', 'ᄃ'), - (0xFFA8, 'M', 'ᄄ'), - (0xFFA9, 'M', 'ᄅ'), - (0xFFAA, 'M', 'ᆰ'), - (0xFFAB, 'M', 'ᆱ'), - (0xFFAC, 'M', 'ᆲ'), - (0xFFAD, 'M', 'ᆳ'), - (0xFFAE, 'M', 'ᆴ'), - (0xFFAF, 'M', 'ᆵ'), - (0xFFB0, 'M', 'ᄚ'), - (0xFFB1, 'M', 'ᄆ'), - (0xFFB2, 'M', 'ᄇ'), - (0xFFB3, 'M', 'ᄈ'), - (0xFFB4, 'M', 'ᄡ'), - (0xFFB5, 'M', 'ᄉ'), - (0xFFB6, 'M', 'ᄊ'), - (0xFFB7, 'M', 'ᄋ'), - (0xFFB8, 'M', 'ᄌ'), - (0xFFB9, 'M', 'ᄍ'), - (0xFFBA, 'M', 'ᄎ'), - (0xFFBB, 'M', 'ᄏ'), - (0xFFBC, 'M', 'ᄐ'), - (0xFFBD, 'M', 'ᄑ'), - (0xFFBE, 'M', 'ᄒ'), - (0xFFBF, 'X'), - (0xFFC2, 'M', 'ᅡ'), - (0xFFC3, 'M', 'ᅢ'), - (0xFFC4, 'M', 'ᅣ'), - (0xFFC5, 'M', 'ᅤ'), - (0xFFC6, 'M', 'ᅥ'), - (0xFFC7, 'M', 'ᅦ'), - (0xFFC8, 'X'), - (0xFFCA, 'M', 'ᅧ'), - (0xFFCB, 'M', 'ᅨ'), - (0xFFCC, 'M', 'ᅩ'), - (0xFFCD, 'M', 'ᅪ'), - (0xFFCE, 'M', 'ᅫ'), - (0xFFCF, 'M', 'ᅬ'), - (0xFFD0, 'X'), - (0xFFD2, 'M', 'ᅭ'), - (0xFFD3, 'M', 'ᅮ'), - (0xFFD4, 'M', 'ᅯ'), - (0xFFD5, 'M', 'ᅰ'), - (0xFFD6, 'M', 'ᅱ'), - (0xFFD7, 'M', 'ᅲ'), - (0xFFD8, 'X'), - (0xFFDA, 'M', 'ᅳ'), - (0xFFDB, 'M', 'ᅴ'), - (0xFFDC, 'M', 'ᅵ'), - (0xFFDD, 'X'), - (0xFFE0, 'M', '¢'), - (0xFFE1, 'M', '£'), - (0xFFE2, 'M', '¬'), - (0xFFE3, '3', ' ̄'), - (0xFFE4, 'M', '¦'), - (0xFFE5, 'M', '¥'), - (0xFFE6, 'M', '₩'), - (0xFFE7, 'X'), - (0xFFE8, 'M', '│'), - (0xFFE9, 'M', '←'), - ] - -def _seg_53() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFFEA, 'M', '↑'), - (0xFFEB, 'M', '→'), - (0xFFEC, 'M', '↓'), - (0xFFED, 'M', '■'), - (0xFFEE, 'M', '○'), - (0xFFEF, 'X'), - (0x10000, 'V'), - (0x1000C, 'X'), - (0x1000D, 'V'), - (0x10027, 'X'), - (0x10028, 'V'), - (0x1003B, 'X'), - (0x1003C, 'V'), - (0x1003E, 'X'), - (0x1003F, 'V'), - (0x1004E, 'X'), - (0x10050, 'V'), - (0x1005E, 'X'), - (0x10080, 'V'), - (0x100FB, 'X'), - (0x10100, 'V'), - (0x10103, 'X'), - (0x10107, 'V'), - (0x10134, 'X'), - (0x10137, 'V'), - (0x1018F, 'X'), - (0x10190, 'V'), - (0x1019D, 'X'), - (0x101A0, 'V'), - (0x101A1, 'X'), - (0x101D0, 'V'), - (0x101FE, 'X'), - (0x10280, 'V'), - (0x1029D, 'X'), - (0x102A0, 'V'), - (0x102D1, 'X'), - (0x102E0, 'V'), - (0x102FC, 'X'), - (0x10300, 'V'), - (0x10324, 'X'), - (0x1032D, 'V'), - (0x1034B, 'X'), - (0x10350, 'V'), - (0x1037B, 'X'), - (0x10380, 'V'), - (0x1039E, 'X'), - (0x1039F, 'V'), - (0x103C4, 'X'), - (0x103C8, 'V'), - (0x103D6, 'X'), - (0x10400, 'M', '𐐨'), - (0x10401, 'M', '𐐩'), - (0x10402, 'M', '𐐪'), - (0x10403, 'M', '𐐫'), - (0x10404, 'M', '𐐬'), - (0x10405, 'M', '𐐭'), - (0x10406, 'M', '𐐮'), - (0x10407, 'M', '𐐯'), - (0x10408, 'M', '𐐰'), - (0x10409, 'M', '𐐱'), - (0x1040A, 'M', '𐐲'), - (0x1040B, 'M', '𐐳'), - (0x1040C, 'M', '𐐴'), - (0x1040D, 'M', '𐐵'), - (0x1040E, 'M', '𐐶'), - (0x1040F, 'M', '𐐷'), - (0x10410, 'M', '𐐸'), - (0x10411, 'M', '𐐹'), - (0x10412, 'M', '𐐺'), - (0x10413, 'M', '𐐻'), - (0x10414, 'M', '𐐼'), - (0x10415, 'M', '𐐽'), - (0x10416, 'M', '𐐾'), - (0x10417, 'M', '𐐿'), - (0x10418, 'M', '𐑀'), - (0x10419, 'M', '𐑁'), - (0x1041A, 'M', '𐑂'), - (0x1041B, 'M', '𐑃'), - (0x1041C, 'M', '𐑄'), - (0x1041D, 'M', '𐑅'), - (0x1041E, 'M', '𐑆'), - (0x1041F, 'M', '𐑇'), - (0x10420, 'M', '𐑈'), - (0x10421, 'M', '𐑉'), - (0x10422, 'M', '𐑊'), - (0x10423, 'M', '𐑋'), - (0x10424, 'M', '𐑌'), - (0x10425, 'M', '𐑍'), - (0x10426, 'M', '𐑎'), - (0x10427, 'M', '𐑏'), - (0x10428, 'V'), - (0x1049E, 'X'), - (0x104A0, 'V'), - (0x104AA, 'X'), - (0x104B0, 'M', '𐓘'), - (0x104B1, 'M', '𐓙'), - (0x104B2, 'M', '𐓚'), - (0x104B3, 'M', '𐓛'), - (0x104B4, 'M', '𐓜'), - (0x104B5, 'M', '𐓝'), - ] - -def _seg_54() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x104B6, 'M', '𐓞'), - (0x104B7, 'M', '𐓟'), - (0x104B8, 'M', '𐓠'), - (0x104B9, 'M', '𐓡'), - (0x104BA, 'M', '𐓢'), - (0x104BB, 'M', '𐓣'), - (0x104BC, 'M', '𐓤'), - (0x104BD, 'M', '𐓥'), - (0x104BE, 'M', '𐓦'), - (0x104BF, 'M', '𐓧'), - (0x104C0, 'M', '𐓨'), - (0x104C1, 'M', '𐓩'), - (0x104C2, 'M', '𐓪'), - (0x104C3, 'M', '𐓫'), - (0x104C4, 'M', '𐓬'), - (0x104C5, 'M', '𐓭'), - (0x104C6, 'M', '𐓮'), - (0x104C7, 'M', '𐓯'), - (0x104C8, 'M', '𐓰'), - (0x104C9, 'M', '𐓱'), - (0x104CA, 'M', '𐓲'), - (0x104CB, 'M', '𐓳'), - (0x104CC, 'M', '𐓴'), - (0x104CD, 'M', '𐓵'), - (0x104CE, 'M', '𐓶'), - (0x104CF, 'M', '𐓷'), - (0x104D0, 'M', '𐓸'), - (0x104D1, 'M', '𐓹'), - (0x104D2, 'M', '𐓺'), - (0x104D3, 'M', '𐓻'), - (0x104D4, 'X'), - (0x104D8, 'V'), - (0x104FC, 'X'), - (0x10500, 'V'), - (0x10528, 'X'), - (0x10530, 'V'), - (0x10564, 'X'), - (0x1056F, 'V'), - (0x10570, 'M', '𐖗'), - (0x10571, 'M', '𐖘'), - (0x10572, 'M', '𐖙'), - (0x10573, 'M', '𐖚'), - (0x10574, 'M', '𐖛'), - (0x10575, 'M', '𐖜'), - (0x10576, 'M', '𐖝'), - (0x10577, 'M', '𐖞'), - (0x10578, 'M', '𐖟'), - (0x10579, 'M', '𐖠'), - (0x1057A, 'M', '𐖡'), - (0x1057B, 'X'), - (0x1057C, 'M', '𐖣'), - (0x1057D, 'M', '𐖤'), - (0x1057E, 'M', '𐖥'), - (0x1057F, 'M', '𐖦'), - (0x10580, 'M', '𐖧'), - (0x10581, 'M', '𐖨'), - (0x10582, 'M', '𐖩'), - (0x10583, 'M', '𐖪'), - (0x10584, 'M', '𐖫'), - (0x10585, 'M', '𐖬'), - (0x10586, 'M', '𐖭'), - (0x10587, 'M', '𐖮'), - (0x10588, 'M', '𐖯'), - (0x10589, 'M', '𐖰'), - (0x1058A, 'M', '𐖱'), - (0x1058B, 'X'), - (0x1058C, 'M', '𐖳'), - (0x1058D, 'M', '𐖴'), - (0x1058E, 'M', '𐖵'), - (0x1058F, 'M', '𐖶'), - (0x10590, 'M', '𐖷'), - (0x10591, 'M', '𐖸'), - (0x10592, 'M', '𐖹'), - (0x10593, 'X'), - (0x10594, 'M', '𐖻'), - (0x10595, 'M', '𐖼'), - (0x10596, 'X'), - (0x10597, 'V'), - (0x105A2, 'X'), - (0x105A3, 'V'), - (0x105B2, 'X'), - (0x105B3, 'V'), - (0x105BA, 'X'), - (0x105BB, 'V'), - (0x105BD, 'X'), - (0x10600, 'V'), - (0x10737, 'X'), - (0x10740, 'V'), - (0x10756, 'X'), - (0x10760, 'V'), - (0x10768, 'X'), - (0x10780, 'V'), - (0x10781, 'M', 'ː'), - (0x10782, 'M', 'ˑ'), - (0x10783, 'M', 'æ'), - (0x10784, 'M', 'ʙ'), - (0x10785, 'M', 'ɓ'), - (0x10786, 'X'), - (0x10787, 'M', 'ʣ'), - (0x10788, 'M', 'ꭦ'), - ] - -def _seg_55() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x10789, 'M', 'ʥ'), - (0x1078A, 'M', 'ʤ'), - (0x1078B, 'M', 'ɖ'), - (0x1078C, 'M', 'ɗ'), - (0x1078D, 'M', 'ᶑ'), - (0x1078E, 'M', 'ɘ'), - (0x1078F, 'M', 'ɞ'), - (0x10790, 'M', 'ʩ'), - (0x10791, 'M', 'ɤ'), - (0x10792, 'M', 'ɢ'), - (0x10793, 'M', 'ɠ'), - (0x10794, 'M', 'ʛ'), - (0x10795, 'M', 'ħ'), - (0x10796, 'M', 'ʜ'), - (0x10797, 'M', 'ɧ'), - (0x10798, 'M', 'ʄ'), - (0x10799, 'M', 'ʪ'), - (0x1079A, 'M', 'ʫ'), - (0x1079B, 'M', 'ɬ'), - (0x1079C, 'M', '𝼄'), - (0x1079D, 'M', 'ꞎ'), - (0x1079E, 'M', 'ɮ'), - (0x1079F, 'M', '𝼅'), - (0x107A0, 'M', 'ʎ'), - (0x107A1, 'M', '𝼆'), - (0x107A2, 'M', 'ø'), - (0x107A3, 'M', 'ɶ'), - (0x107A4, 'M', 'ɷ'), - (0x107A5, 'M', 'q'), - (0x107A6, 'M', 'ɺ'), - (0x107A7, 'M', '𝼈'), - (0x107A8, 'M', 'ɽ'), - (0x107A9, 'M', 'ɾ'), - (0x107AA, 'M', 'ʀ'), - (0x107AB, 'M', 'ʨ'), - (0x107AC, 'M', 'ʦ'), - (0x107AD, 'M', 'ꭧ'), - (0x107AE, 'M', 'ʧ'), - (0x107AF, 'M', 'ʈ'), - (0x107B0, 'M', 'ⱱ'), - (0x107B1, 'X'), - (0x107B2, 'M', 'ʏ'), - (0x107B3, 'M', 'ʡ'), - (0x107B4, 'M', 'ʢ'), - (0x107B5, 'M', 'ʘ'), - (0x107B6, 'M', 'ǀ'), - (0x107B7, 'M', 'ǁ'), - (0x107B8, 'M', 'ǂ'), - (0x107B9, 'M', '𝼊'), - (0x107BA, 'M', '𝼞'), - (0x107BB, 'X'), - (0x10800, 'V'), - (0x10806, 'X'), - (0x10808, 'V'), - (0x10809, 'X'), - (0x1080A, 'V'), - (0x10836, 'X'), - (0x10837, 'V'), - (0x10839, 'X'), - (0x1083C, 'V'), - (0x1083D, 'X'), - (0x1083F, 'V'), - (0x10856, 'X'), - (0x10857, 'V'), - (0x1089F, 'X'), - (0x108A7, 'V'), - (0x108B0, 'X'), - (0x108E0, 'V'), - (0x108F3, 'X'), - (0x108F4, 'V'), - (0x108F6, 'X'), - (0x108FB, 'V'), - (0x1091C, 'X'), - (0x1091F, 'V'), - (0x1093A, 'X'), - (0x1093F, 'V'), - (0x10940, 'X'), - (0x10980, 'V'), - (0x109B8, 'X'), - (0x109BC, 'V'), - (0x109D0, 'X'), - (0x109D2, 'V'), - (0x10A04, 'X'), - (0x10A05, 'V'), - (0x10A07, 'X'), - (0x10A0C, 'V'), - (0x10A14, 'X'), - (0x10A15, 'V'), - (0x10A18, 'X'), - (0x10A19, 'V'), - (0x10A36, 'X'), - (0x10A38, 'V'), - (0x10A3B, 'X'), - (0x10A3F, 'V'), - (0x10A49, 'X'), - (0x10A50, 'V'), - (0x10A59, 'X'), - (0x10A60, 'V'), - (0x10AA0, 'X'), - (0x10AC0, 'V'), - ] - -def _seg_56() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x10AE7, 'X'), - (0x10AEB, 'V'), - (0x10AF7, 'X'), - (0x10B00, 'V'), - (0x10B36, 'X'), - (0x10B39, 'V'), - (0x10B56, 'X'), - (0x10B58, 'V'), - (0x10B73, 'X'), - (0x10B78, 'V'), - (0x10B92, 'X'), - (0x10B99, 'V'), - (0x10B9D, 'X'), - (0x10BA9, 'V'), - (0x10BB0, 'X'), - (0x10C00, 'V'), - (0x10C49, 'X'), - (0x10C80, 'M', '𐳀'), - (0x10C81, 'M', '𐳁'), - (0x10C82, 'M', '𐳂'), - (0x10C83, 'M', '𐳃'), - (0x10C84, 'M', '𐳄'), - (0x10C85, 'M', '𐳅'), - (0x10C86, 'M', '𐳆'), - (0x10C87, 'M', '𐳇'), - (0x10C88, 'M', '𐳈'), - (0x10C89, 'M', '𐳉'), - (0x10C8A, 'M', '𐳊'), - (0x10C8B, 'M', '𐳋'), - (0x10C8C, 'M', '𐳌'), - (0x10C8D, 'M', '𐳍'), - (0x10C8E, 'M', '𐳎'), - (0x10C8F, 'M', '𐳏'), - (0x10C90, 'M', '𐳐'), - (0x10C91, 'M', '𐳑'), - (0x10C92, 'M', '𐳒'), - (0x10C93, 'M', '𐳓'), - (0x10C94, 'M', '𐳔'), - (0x10C95, 'M', '𐳕'), - (0x10C96, 'M', '𐳖'), - (0x10C97, 'M', '𐳗'), - (0x10C98, 'M', '𐳘'), - (0x10C99, 'M', '𐳙'), - (0x10C9A, 'M', '𐳚'), - (0x10C9B, 'M', '𐳛'), - (0x10C9C, 'M', '𐳜'), - (0x10C9D, 'M', '𐳝'), - (0x10C9E, 'M', '𐳞'), - (0x10C9F, 'M', '𐳟'), - (0x10CA0, 'M', '𐳠'), - (0x10CA1, 'M', '𐳡'), - (0x10CA2, 'M', '𐳢'), - (0x10CA3, 'M', '𐳣'), - (0x10CA4, 'M', '𐳤'), - (0x10CA5, 'M', '𐳥'), - (0x10CA6, 'M', '𐳦'), - (0x10CA7, 'M', '𐳧'), - (0x10CA8, 'M', '𐳨'), - (0x10CA9, 'M', '𐳩'), - (0x10CAA, 'M', '𐳪'), - (0x10CAB, 'M', '𐳫'), - (0x10CAC, 'M', '𐳬'), - (0x10CAD, 'M', '𐳭'), - (0x10CAE, 'M', '𐳮'), - (0x10CAF, 'M', '𐳯'), - (0x10CB0, 'M', '𐳰'), - (0x10CB1, 'M', '𐳱'), - (0x10CB2, 'M', '𐳲'), - (0x10CB3, 'X'), - (0x10CC0, 'V'), - (0x10CF3, 'X'), - (0x10CFA, 'V'), - (0x10D28, 'X'), - (0x10D30, 'V'), - (0x10D3A, 'X'), - (0x10E60, 'V'), - (0x10E7F, 'X'), - (0x10E80, 'V'), - (0x10EAA, 'X'), - (0x10EAB, 'V'), - (0x10EAE, 'X'), - (0x10EB0, 'V'), - (0x10EB2, 'X'), - (0x10EFD, 'V'), - (0x10F28, 'X'), - (0x10F30, 'V'), - (0x10F5A, 'X'), - (0x10F70, 'V'), - (0x10F8A, 'X'), - (0x10FB0, 'V'), - (0x10FCC, 'X'), - (0x10FE0, 'V'), - (0x10FF7, 'X'), - (0x11000, 'V'), - (0x1104E, 'X'), - (0x11052, 'V'), - (0x11076, 'X'), - (0x1107F, 'V'), - (0x110BD, 'X'), - (0x110BE, 'V'), - ] - -def _seg_57() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x110C3, 'X'), - (0x110D0, 'V'), - (0x110E9, 'X'), - (0x110F0, 'V'), - (0x110FA, 'X'), - (0x11100, 'V'), - (0x11135, 'X'), - (0x11136, 'V'), - (0x11148, 'X'), - (0x11150, 'V'), - (0x11177, 'X'), - (0x11180, 'V'), - (0x111E0, 'X'), - (0x111E1, 'V'), - (0x111F5, 'X'), - (0x11200, 'V'), - (0x11212, 'X'), - (0x11213, 'V'), - (0x11242, 'X'), - (0x11280, 'V'), - (0x11287, 'X'), - (0x11288, 'V'), - (0x11289, 'X'), - (0x1128A, 'V'), - (0x1128E, 'X'), - (0x1128F, 'V'), - (0x1129E, 'X'), - (0x1129F, 'V'), - (0x112AA, 'X'), - (0x112B0, 'V'), - (0x112EB, 'X'), - (0x112F0, 'V'), - (0x112FA, 'X'), - (0x11300, 'V'), - (0x11304, 'X'), - (0x11305, 'V'), - (0x1130D, 'X'), - (0x1130F, 'V'), - (0x11311, 'X'), - (0x11313, 'V'), - (0x11329, 'X'), - (0x1132A, 'V'), - (0x11331, 'X'), - (0x11332, 'V'), - (0x11334, 'X'), - (0x11335, 'V'), - (0x1133A, 'X'), - (0x1133B, 'V'), - (0x11345, 'X'), - (0x11347, 'V'), - (0x11349, 'X'), - (0x1134B, 'V'), - (0x1134E, 'X'), - (0x11350, 'V'), - (0x11351, 'X'), - (0x11357, 'V'), - (0x11358, 'X'), - (0x1135D, 'V'), - (0x11364, 'X'), - (0x11366, 'V'), - (0x1136D, 'X'), - (0x11370, 'V'), - (0x11375, 'X'), - (0x11400, 'V'), - (0x1145C, 'X'), - (0x1145D, 'V'), - (0x11462, 'X'), - (0x11480, 'V'), - (0x114C8, 'X'), - (0x114D0, 'V'), - (0x114DA, 'X'), - (0x11580, 'V'), - (0x115B6, 'X'), - (0x115B8, 'V'), - (0x115DE, 'X'), - (0x11600, 'V'), - (0x11645, 'X'), - (0x11650, 'V'), - (0x1165A, 'X'), - (0x11660, 'V'), - (0x1166D, 'X'), - (0x11680, 'V'), - (0x116BA, 'X'), - (0x116C0, 'V'), - (0x116CA, 'X'), - (0x11700, 'V'), - (0x1171B, 'X'), - (0x1171D, 'V'), - (0x1172C, 'X'), - (0x11730, 'V'), - (0x11747, 'X'), - (0x11800, 'V'), - (0x1183C, 'X'), - (0x118A0, 'M', '𑣀'), - (0x118A1, 'M', '𑣁'), - (0x118A2, 'M', '𑣂'), - (0x118A3, 'M', '𑣃'), - (0x118A4, 'M', '𑣄'), - (0x118A5, 'M', '𑣅'), - (0x118A6, 'M', '𑣆'), - ] - -def _seg_58() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x118A7, 'M', '𑣇'), - (0x118A8, 'M', '𑣈'), - (0x118A9, 'M', '𑣉'), - (0x118AA, 'M', '𑣊'), - (0x118AB, 'M', '𑣋'), - (0x118AC, 'M', '𑣌'), - (0x118AD, 'M', '𑣍'), - (0x118AE, 'M', '𑣎'), - (0x118AF, 'M', '𑣏'), - (0x118B0, 'M', '𑣐'), - (0x118B1, 'M', '𑣑'), - (0x118B2, 'M', '𑣒'), - (0x118B3, 'M', '𑣓'), - (0x118B4, 'M', '𑣔'), - (0x118B5, 'M', '𑣕'), - (0x118B6, 'M', '𑣖'), - (0x118B7, 'M', '𑣗'), - (0x118B8, 'M', '𑣘'), - (0x118B9, 'M', '𑣙'), - (0x118BA, 'M', '𑣚'), - (0x118BB, 'M', '𑣛'), - (0x118BC, 'M', '𑣜'), - (0x118BD, 'M', '𑣝'), - (0x118BE, 'M', '𑣞'), - (0x118BF, 'M', '𑣟'), - (0x118C0, 'V'), - (0x118F3, 'X'), - (0x118FF, 'V'), - (0x11907, 'X'), - (0x11909, 'V'), - (0x1190A, 'X'), - (0x1190C, 'V'), - (0x11914, 'X'), - (0x11915, 'V'), - (0x11917, 'X'), - (0x11918, 'V'), - (0x11936, 'X'), - (0x11937, 'V'), - (0x11939, 'X'), - (0x1193B, 'V'), - (0x11947, 'X'), - (0x11950, 'V'), - (0x1195A, 'X'), - (0x119A0, 'V'), - (0x119A8, 'X'), - (0x119AA, 'V'), - (0x119D8, 'X'), - (0x119DA, 'V'), - (0x119E5, 'X'), - (0x11A00, 'V'), - (0x11A48, 'X'), - (0x11A50, 'V'), - (0x11AA3, 'X'), - (0x11AB0, 'V'), - (0x11AF9, 'X'), - (0x11B00, 'V'), - (0x11B0A, 'X'), - (0x11C00, 'V'), - (0x11C09, 'X'), - (0x11C0A, 'V'), - (0x11C37, 'X'), - (0x11C38, 'V'), - (0x11C46, 'X'), - (0x11C50, 'V'), - (0x11C6D, 'X'), - (0x11C70, 'V'), - (0x11C90, 'X'), - (0x11C92, 'V'), - (0x11CA8, 'X'), - (0x11CA9, 'V'), - (0x11CB7, 'X'), - (0x11D00, 'V'), - (0x11D07, 'X'), - (0x11D08, 'V'), - (0x11D0A, 'X'), - (0x11D0B, 'V'), - (0x11D37, 'X'), - (0x11D3A, 'V'), - (0x11D3B, 'X'), - (0x11D3C, 'V'), - (0x11D3E, 'X'), - (0x11D3F, 'V'), - (0x11D48, 'X'), - (0x11D50, 'V'), - (0x11D5A, 'X'), - (0x11D60, 'V'), - (0x11D66, 'X'), - (0x11D67, 'V'), - (0x11D69, 'X'), - (0x11D6A, 'V'), - (0x11D8F, 'X'), - (0x11D90, 'V'), - (0x11D92, 'X'), - (0x11D93, 'V'), - (0x11D99, 'X'), - (0x11DA0, 'V'), - (0x11DAA, 'X'), - (0x11EE0, 'V'), - (0x11EF9, 'X'), - (0x11F00, 'V'), - ] - -def _seg_59() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x11F11, 'X'), - (0x11F12, 'V'), - (0x11F3B, 'X'), - (0x11F3E, 'V'), - (0x11F5A, 'X'), - (0x11FB0, 'V'), - (0x11FB1, 'X'), - (0x11FC0, 'V'), - (0x11FF2, 'X'), - (0x11FFF, 'V'), - (0x1239A, 'X'), - (0x12400, 'V'), - (0x1246F, 'X'), - (0x12470, 'V'), - (0x12475, 'X'), - (0x12480, 'V'), - (0x12544, 'X'), - (0x12F90, 'V'), - (0x12FF3, 'X'), - (0x13000, 'V'), - (0x13430, 'X'), - (0x13440, 'V'), - (0x13456, 'X'), - (0x14400, 'V'), - (0x14647, 'X'), - (0x16800, 'V'), - (0x16A39, 'X'), - (0x16A40, 'V'), - (0x16A5F, 'X'), - (0x16A60, 'V'), - (0x16A6A, 'X'), - (0x16A6E, 'V'), - (0x16ABF, 'X'), - (0x16AC0, 'V'), - (0x16ACA, 'X'), - (0x16AD0, 'V'), - (0x16AEE, 'X'), - (0x16AF0, 'V'), - (0x16AF6, 'X'), - (0x16B00, 'V'), - (0x16B46, 'X'), - (0x16B50, 'V'), - (0x16B5A, 'X'), - (0x16B5B, 'V'), - (0x16B62, 'X'), - (0x16B63, 'V'), - (0x16B78, 'X'), - (0x16B7D, 'V'), - (0x16B90, 'X'), - (0x16E40, 'M', '𖹠'), - (0x16E41, 'M', '𖹡'), - (0x16E42, 'M', '𖹢'), - (0x16E43, 'M', '𖹣'), - (0x16E44, 'M', '𖹤'), - (0x16E45, 'M', '𖹥'), - (0x16E46, 'M', '𖹦'), - (0x16E47, 'M', '𖹧'), - (0x16E48, 'M', '𖹨'), - (0x16E49, 'M', '𖹩'), - (0x16E4A, 'M', '𖹪'), - (0x16E4B, 'M', '𖹫'), - (0x16E4C, 'M', '𖹬'), - (0x16E4D, 'M', '𖹭'), - (0x16E4E, 'M', '𖹮'), - (0x16E4F, 'M', '𖹯'), - (0x16E50, 'M', '𖹰'), - (0x16E51, 'M', '𖹱'), - (0x16E52, 'M', '𖹲'), - (0x16E53, 'M', '𖹳'), - (0x16E54, 'M', '𖹴'), - (0x16E55, 'M', '𖹵'), - (0x16E56, 'M', '𖹶'), - (0x16E57, 'M', '𖹷'), - (0x16E58, 'M', '𖹸'), - (0x16E59, 'M', '𖹹'), - (0x16E5A, 'M', '𖹺'), - (0x16E5B, 'M', '𖹻'), - (0x16E5C, 'M', '𖹼'), - (0x16E5D, 'M', '𖹽'), - (0x16E5E, 'M', '𖹾'), - (0x16E5F, 'M', '𖹿'), - (0x16E60, 'V'), - (0x16E9B, 'X'), - (0x16F00, 'V'), - (0x16F4B, 'X'), - (0x16F4F, 'V'), - (0x16F88, 'X'), - (0x16F8F, 'V'), - (0x16FA0, 'X'), - (0x16FE0, 'V'), - (0x16FE5, 'X'), - (0x16FF0, 'V'), - (0x16FF2, 'X'), - (0x17000, 'V'), - (0x187F8, 'X'), - (0x18800, 'V'), - (0x18CD6, 'X'), - (0x18D00, 'V'), - (0x18D09, 'X'), - (0x1AFF0, 'V'), - ] - -def _seg_60() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1AFF4, 'X'), - (0x1AFF5, 'V'), - (0x1AFFC, 'X'), - (0x1AFFD, 'V'), - (0x1AFFF, 'X'), - (0x1B000, 'V'), - (0x1B123, 'X'), - (0x1B132, 'V'), - (0x1B133, 'X'), - (0x1B150, 'V'), - (0x1B153, 'X'), - (0x1B155, 'V'), - (0x1B156, 'X'), - (0x1B164, 'V'), - (0x1B168, 'X'), - (0x1B170, 'V'), - (0x1B2FC, 'X'), - (0x1BC00, 'V'), - (0x1BC6B, 'X'), - (0x1BC70, 'V'), - (0x1BC7D, 'X'), - (0x1BC80, 'V'), - (0x1BC89, 'X'), - (0x1BC90, 'V'), - (0x1BC9A, 'X'), - (0x1BC9C, 'V'), - (0x1BCA0, 'I'), - (0x1BCA4, 'X'), - (0x1CF00, 'V'), - (0x1CF2E, 'X'), - (0x1CF30, 'V'), - (0x1CF47, 'X'), - (0x1CF50, 'V'), - (0x1CFC4, 'X'), - (0x1D000, 'V'), - (0x1D0F6, 'X'), - (0x1D100, 'V'), - (0x1D127, 'X'), - (0x1D129, 'V'), - (0x1D15E, 'M', '𝅗𝅥'), - (0x1D15F, 'M', '𝅘𝅥'), - (0x1D160, 'M', '𝅘𝅥𝅮'), - (0x1D161, 'M', '𝅘𝅥𝅯'), - (0x1D162, 'M', '𝅘𝅥𝅰'), - (0x1D163, 'M', '𝅘𝅥𝅱'), - (0x1D164, 'M', '𝅘𝅥𝅲'), - (0x1D165, 'V'), - (0x1D173, 'X'), - (0x1D17B, 'V'), - (0x1D1BB, 'M', '𝆹𝅥'), - (0x1D1BC, 'M', '𝆺𝅥'), - (0x1D1BD, 'M', '𝆹𝅥𝅮'), - (0x1D1BE, 'M', '𝆺𝅥𝅮'), - (0x1D1BF, 'M', '𝆹𝅥𝅯'), - (0x1D1C0, 'M', '𝆺𝅥𝅯'), - (0x1D1C1, 'V'), - (0x1D1EB, 'X'), - (0x1D200, 'V'), - (0x1D246, 'X'), - (0x1D2C0, 'V'), - (0x1D2D4, 'X'), - (0x1D2E0, 'V'), - (0x1D2F4, 'X'), - (0x1D300, 'V'), - (0x1D357, 'X'), - (0x1D360, 'V'), - (0x1D379, 'X'), - (0x1D400, 'M', 'a'), - (0x1D401, 'M', 'b'), - (0x1D402, 'M', 'c'), - (0x1D403, 'M', 'd'), - (0x1D404, 'M', 'e'), - (0x1D405, 'M', 'f'), - (0x1D406, 'M', 'g'), - (0x1D407, 'M', 'h'), - (0x1D408, 'M', 'i'), - (0x1D409, 'M', 'j'), - (0x1D40A, 'M', 'k'), - (0x1D40B, 'M', 'l'), - (0x1D40C, 'M', 'm'), - (0x1D40D, 'M', 'n'), - (0x1D40E, 'M', 'o'), - (0x1D40F, 'M', 'p'), - (0x1D410, 'M', 'q'), - (0x1D411, 'M', 'r'), - (0x1D412, 'M', 's'), - (0x1D413, 'M', 't'), - (0x1D414, 'M', 'u'), - (0x1D415, 'M', 'v'), - (0x1D416, 'M', 'w'), - (0x1D417, 'M', 'x'), - (0x1D418, 'M', 'y'), - (0x1D419, 'M', 'z'), - (0x1D41A, 'M', 'a'), - (0x1D41B, 'M', 'b'), - (0x1D41C, 'M', 'c'), - (0x1D41D, 'M', 'd'), - (0x1D41E, 'M', 'e'), - (0x1D41F, 'M', 'f'), - (0x1D420, 'M', 'g'), - ] - -def _seg_61() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D421, 'M', 'h'), - (0x1D422, 'M', 'i'), - (0x1D423, 'M', 'j'), - (0x1D424, 'M', 'k'), - (0x1D425, 'M', 'l'), - (0x1D426, 'M', 'm'), - (0x1D427, 'M', 'n'), - (0x1D428, 'M', 'o'), - (0x1D429, 'M', 'p'), - (0x1D42A, 'M', 'q'), - (0x1D42B, 'M', 'r'), - (0x1D42C, 'M', 's'), - (0x1D42D, 'M', 't'), - (0x1D42E, 'M', 'u'), - (0x1D42F, 'M', 'v'), - (0x1D430, 'M', 'w'), - (0x1D431, 'M', 'x'), - (0x1D432, 'M', 'y'), - (0x1D433, 'M', 'z'), - (0x1D434, 'M', 'a'), - (0x1D435, 'M', 'b'), - (0x1D436, 'M', 'c'), - (0x1D437, 'M', 'd'), - (0x1D438, 'M', 'e'), - (0x1D439, 'M', 'f'), - (0x1D43A, 'M', 'g'), - (0x1D43B, 'M', 'h'), - (0x1D43C, 'M', 'i'), - (0x1D43D, 'M', 'j'), - (0x1D43E, 'M', 'k'), - (0x1D43F, 'M', 'l'), - (0x1D440, 'M', 'm'), - (0x1D441, 'M', 'n'), - (0x1D442, 'M', 'o'), - (0x1D443, 'M', 'p'), - (0x1D444, 'M', 'q'), - (0x1D445, 'M', 'r'), - (0x1D446, 'M', 's'), - (0x1D447, 'M', 't'), - (0x1D448, 'M', 'u'), - (0x1D449, 'M', 'v'), - (0x1D44A, 'M', 'w'), - (0x1D44B, 'M', 'x'), - (0x1D44C, 'M', 'y'), - (0x1D44D, 'M', 'z'), - (0x1D44E, 'M', 'a'), - (0x1D44F, 'M', 'b'), - (0x1D450, 'M', 'c'), - (0x1D451, 'M', 'd'), - (0x1D452, 'M', 'e'), - (0x1D453, 'M', 'f'), - (0x1D454, 'M', 'g'), - (0x1D455, 'X'), - (0x1D456, 'M', 'i'), - (0x1D457, 'M', 'j'), - (0x1D458, 'M', 'k'), - (0x1D459, 'M', 'l'), - (0x1D45A, 'M', 'm'), - (0x1D45B, 'M', 'n'), - (0x1D45C, 'M', 'o'), - (0x1D45D, 'M', 'p'), - (0x1D45E, 'M', 'q'), - (0x1D45F, 'M', 'r'), - (0x1D460, 'M', 's'), - (0x1D461, 'M', 't'), - (0x1D462, 'M', 'u'), - (0x1D463, 'M', 'v'), - (0x1D464, 'M', 'w'), - (0x1D465, 'M', 'x'), - (0x1D466, 'M', 'y'), - (0x1D467, 'M', 'z'), - (0x1D468, 'M', 'a'), - (0x1D469, 'M', 'b'), - (0x1D46A, 'M', 'c'), - (0x1D46B, 'M', 'd'), - (0x1D46C, 'M', 'e'), - (0x1D46D, 'M', 'f'), - (0x1D46E, 'M', 'g'), - (0x1D46F, 'M', 'h'), - (0x1D470, 'M', 'i'), - (0x1D471, 'M', 'j'), - (0x1D472, 'M', 'k'), - (0x1D473, 'M', 'l'), - (0x1D474, 'M', 'm'), - (0x1D475, 'M', 'n'), - (0x1D476, 'M', 'o'), - (0x1D477, 'M', 'p'), - (0x1D478, 'M', 'q'), - (0x1D479, 'M', 'r'), - (0x1D47A, 'M', 's'), - (0x1D47B, 'M', 't'), - (0x1D47C, 'M', 'u'), - (0x1D47D, 'M', 'v'), - (0x1D47E, 'M', 'w'), - (0x1D47F, 'M', 'x'), - (0x1D480, 'M', 'y'), - (0x1D481, 'M', 'z'), - (0x1D482, 'M', 'a'), - (0x1D483, 'M', 'b'), - (0x1D484, 'M', 'c'), - ] - -def _seg_62() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D485, 'M', 'd'), - (0x1D486, 'M', 'e'), - (0x1D487, 'M', 'f'), - (0x1D488, 'M', 'g'), - (0x1D489, 'M', 'h'), - (0x1D48A, 'M', 'i'), - (0x1D48B, 'M', 'j'), - (0x1D48C, 'M', 'k'), - (0x1D48D, 'M', 'l'), - (0x1D48E, 'M', 'm'), - (0x1D48F, 'M', 'n'), - (0x1D490, 'M', 'o'), - (0x1D491, 'M', 'p'), - (0x1D492, 'M', 'q'), - (0x1D493, 'M', 'r'), - (0x1D494, 'M', 's'), - (0x1D495, 'M', 't'), - (0x1D496, 'M', 'u'), - (0x1D497, 'M', 'v'), - (0x1D498, 'M', 'w'), - (0x1D499, 'M', 'x'), - (0x1D49A, 'M', 'y'), - (0x1D49B, 'M', 'z'), - (0x1D49C, 'M', 'a'), - (0x1D49D, 'X'), - (0x1D49E, 'M', 'c'), - (0x1D49F, 'M', 'd'), - (0x1D4A0, 'X'), - (0x1D4A2, 'M', 'g'), - (0x1D4A3, 'X'), - (0x1D4A5, 'M', 'j'), - (0x1D4A6, 'M', 'k'), - (0x1D4A7, 'X'), - (0x1D4A9, 'M', 'n'), - (0x1D4AA, 'M', 'o'), - (0x1D4AB, 'M', 'p'), - (0x1D4AC, 'M', 'q'), - (0x1D4AD, 'X'), - (0x1D4AE, 'M', 's'), - (0x1D4AF, 'M', 't'), - (0x1D4B0, 'M', 'u'), - (0x1D4B1, 'M', 'v'), - (0x1D4B2, 'M', 'w'), - (0x1D4B3, 'M', 'x'), - (0x1D4B4, 'M', 'y'), - (0x1D4B5, 'M', 'z'), - (0x1D4B6, 'M', 'a'), - (0x1D4B7, 'M', 'b'), - (0x1D4B8, 'M', 'c'), - (0x1D4B9, 'M', 'd'), - (0x1D4BA, 'X'), - (0x1D4BB, 'M', 'f'), - (0x1D4BC, 'X'), - (0x1D4BD, 'M', 'h'), - (0x1D4BE, 'M', 'i'), - (0x1D4BF, 'M', 'j'), - (0x1D4C0, 'M', 'k'), - (0x1D4C1, 'M', 'l'), - (0x1D4C2, 'M', 'm'), - (0x1D4C3, 'M', 'n'), - (0x1D4C4, 'X'), - (0x1D4C5, 'M', 'p'), - (0x1D4C6, 'M', 'q'), - (0x1D4C7, 'M', 'r'), - (0x1D4C8, 'M', 's'), - (0x1D4C9, 'M', 't'), - (0x1D4CA, 'M', 'u'), - (0x1D4CB, 'M', 'v'), - (0x1D4CC, 'M', 'w'), - (0x1D4CD, 'M', 'x'), - (0x1D4CE, 'M', 'y'), - (0x1D4CF, 'M', 'z'), - (0x1D4D0, 'M', 'a'), - (0x1D4D1, 'M', 'b'), - (0x1D4D2, 'M', 'c'), - (0x1D4D3, 'M', 'd'), - (0x1D4D4, 'M', 'e'), - (0x1D4D5, 'M', 'f'), - (0x1D4D6, 'M', 'g'), - (0x1D4D7, 'M', 'h'), - (0x1D4D8, 'M', 'i'), - (0x1D4D9, 'M', 'j'), - (0x1D4DA, 'M', 'k'), - (0x1D4DB, 'M', 'l'), - (0x1D4DC, 'M', 'm'), - (0x1D4DD, 'M', 'n'), - (0x1D4DE, 'M', 'o'), - (0x1D4DF, 'M', 'p'), - (0x1D4E0, 'M', 'q'), - (0x1D4E1, 'M', 'r'), - (0x1D4E2, 'M', 's'), - (0x1D4E3, 'M', 't'), - (0x1D4E4, 'M', 'u'), - (0x1D4E5, 'M', 'v'), - (0x1D4E6, 'M', 'w'), - (0x1D4E7, 'M', 'x'), - (0x1D4E8, 'M', 'y'), - (0x1D4E9, 'M', 'z'), - (0x1D4EA, 'M', 'a'), - (0x1D4EB, 'M', 'b'), - ] - -def _seg_63() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D4EC, 'M', 'c'), - (0x1D4ED, 'M', 'd'), - (0x1D4EE, 'M', 'e'), - (0x1D4EF, 'M', 'f'), - (0x1D4F0, 'M', 'g'), - (0x1D4F1, 'M', 'h'), - (0x1D4F2, 'M', 'i'), - (0x1D4F3, 'M', 'j'), - (0x1D4F4, 'M', 'k'), - (0x1D4F5, 'M', 'l'), - (0x1D4F6, 'M', 'm'), - (0x1D4F7, 'M', 'n'), - (0x1D4F8, 'M', 'o'), - (0x1D4F9, 'M', 'p'), - (0x1D4FA, 'M', 'q'), - (0x1D4FB, 'M', 'r'), - (0x1D4FC, 'M', 's'), - (0x1D4FD, 'M', 't'), - (0x1D4FE, 'M', 'u'), - (0x1D4FF, 'M', 'v'), - (0x1D500, 'M', 'w'), - (0x1D501, 'M', 'x'), - (0x1D502, 'M', 'y'), - (0x1D503, 'M', 'z'), - (0x1D504, 'M', 'a'), - (0x1D505, 'M', 'b'), - (0x1D506, 'X'), - (0x1D507, 'M', 'd'), - (0x1D508, 'M', 'e'), - (0x1D509, 'M', 'f'), - (0x1D50A, 'M', 'g'), - (0x1D50B, 'X'), - (0x1D50D, 'M', 'j'), - (0x1D50E, 'M', 'k'), - (0x1D50F, 'M', 'l'), - (0x1D510, 'M', 'm'), - (0x1D511, 'M', 'n'), - (0x1D512, 'M', 'o'), - (0x1D513, 'M', 'p'), - (0x1D514, 'M', 'q'), - (0x1D515, 'X'), - (0x1D516, 'M', 's'), - (0x1D517, 'M', 't'), - (0x1D518, 'M', 'u'), - (0x1D519, 'M', 'v'), - (0x1D51A, 'M', 'w'), - (0x1D51B, 'M', 'x'), - (0x1D51C, 'M', 'y'), - (0x1D51D, 'X'), - (0x1D51E, 'M', 'a'), - (0x1D51F, 'M', 'b'), - (0x1D520, 'M', 'c'), - (0x1D521, 'M', 'd'), - (0x1D522, 'M', 'e'), - (0x1D523, 'M', 'f'), - (0x1D524, 'M', 'g'), - (0x1D525, 'M', 'h'), - (0x1D526, 'M', 'i'), - (0x1D527, 'M', 'j'), - (0x1D528, 'M', 'k'), - (0x1D529, 'M', 'l'), - (0x1D52A, 'M', 'm'), - (0x1D52B, 'M', 'n'), - (0x1D52C, 'M', 'o'), - (0x1D52D, 'M', 'p'), - (0x1D52E, 'M', 'q'), - (0x1D52F, 'M', 'r'), - (0x1D530, 'M', 's'), - (0x1D531, 'M', 't'), - (0x1D532, 'M', 'u'), - (0x1D533, 'M', 'v'), - (0x1D534, 'M', 'w'), - (0x1D535, 'M', 'x'), - (0x1D536, 'M', 'y'), - (0x1D537, 'M', 'z'), - (0x1D538, 'M', 'a'), - (0x1D539, 'M', 'b'), - (0x1D53A, 'X'), - (0x1D53B, 'M', 'd'), - (0x1D53C, 'M', 'e'), - (0x1D53D, 'M', 'f'), - (0x1D53E, 'M', 'g'), - (0x1D53F, 'X'), - (0x1D540, 'M', 'i'), - (0x1D541, 'M', 'j'), - (0x1D542, 'M', 'k'), - (0x1D543, 'M', 'l'), - (0x1D544, 'M', 'm'), - (0x1D545, 'X'), - (0x1D546, 'M', 'o'), - (0x1D547, 'X'), - (0x1D54A, 'M', 's'), - (0x1D54B, 'M', 't'), - (0x1D54C, 'M', 'u'), - (0x1D54D, 'M', 'v'), - (0x1D54E, 'M', 'w'), - (0x1D54F, 'M', 'x'), - (0x1D550, 'M', 'y'), - (0x1D551, 'X'), - (0x1D552, 'M', 'a'), - ] - -def _seg_64() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D553, 'M', 'b'), - (0x1D554, 'M', 'c'), - (0x1D555, 'M', 'd'), - (0x1D556, 'M', 'e'), - (0x1D557, 'M', 'f'), - (0x1D558, 'M', 'g'), - (0x1D559, 'M', 'h'), - (0x1D55A, 'M', 'i'), - (0x1D55B, 'M', 'j'), - (0x1D55C, 'M', 'k'), - (0x1D55D, 'M', 'l'), - (0x1D55E, 'M', 'm'), - (0x1D55F, 'M', 'n'), - (0x1D560, 'M', 'o'), - (0x1D561, 'M', 'p'), - (0x1D562, 'M', 'q'), - (0x1D563, 'M', 'r'), - (0x1D564, 'M', 's'), - (0x1D565, 'M', 't'), - (0x1D566, 'M', 'u'), - (0x1D567, 'M', 'v'), - (0x1D568, 'M', 'w'), - (0x1D569, 'M', 'x'), - (0x1D56A, 'M', 'y'), - (0x1D56B, 'M', 'z'), - (0x1D56C, 'M', 'a'), - (0x1D56D, 'M', 'b'), - (0x1D56E, 'M', 'c'), - (0x1D56F, 'M', 'd'), - (0x1D570, 'M', 'e'), - (0x1D571, 'M', 'f'), - (0x1D572, 'M', 'g'), - (0x1D573, 'M', 'h'), - (0x1D574, 'M', 'i'), - (0x1D575, 'M', 'j'), - (0x1D576, 'M', 'k'), - (0x1D577, 'M', 'l'), - (0x1D578, 'M', 'm'), - (0x1D579, 'M', 'n'), - (0x1D57A, 'M', 'o'), - (0x1D57B, 'M', 'p'), - (0x1D57C, 'M', 'q'), - (0x1D57D, 'M', 'r'), - (0x1D57E, 'M', 's'), - (0x1D57F, 'M', 't'), - (0x1D580, 'M', 'u'), - (0x1D581, 'M', 'v'), - (0x1D582, 'M', 'w'), - (0x1D583, 'M', 'x'), - (0x1D584, 'M', 'y'), - (0x1D585, 'M', 'z'), - (0x1D586, 'M', 'a'), - (0x1D587, 'M', 'b'), - (0x1D588, 'M', 'c'), - (0x1D589, 'M', 'd'), - (0x1D58A, 'M', 'e'), - (0x1D58B, 'M', 'f'), - (0x1D58C, 'M', 'g'), - (0x1D58D, 'M', 'h'), - (0x1D58E, 'M', 'i'), - (0x1D58F, 'M', 'j'), - (0x1D590, 'M', 'k'), - (0x1D591, 'M', 'l'), - (0x1D592, 'M', 'm'), - (0x1D593, 'M', 'n'), - (0x1D594, 'M', 'o'), - (0x1D595, 'M', 'p'), - (0x1D596, 'M', 'q'), - (0x1D597, 'M', 'r'), - (0x1D598, 'M', 's'), - (0x1D599, 'M', 't'), - (0x1D59A, 'M', 'u'), - (0x1D59B, 'M', 'v'), - (0x1D59C, 'M', 'w'), - (0x1D59D, 'M', 'x'), - (0x1D59E, 'M', 'y'), - (0x1D59F, 'M', 'z'), - (0x1D5A0, 'M', 'a'), - (0x1D5A1, 'M', 'b'), - (0x1D5A2, 'M', 'c'), - (0x1D5A3, 'M', 'd'), - (0x1D5A4, 'M', 'e'), - (0x1D5A5, 'M', 'f'), - (0x1D5A6, 'M', 'g'), - (0x1D5A7, 'M', 'h'), - (0x1D5A8, 'M', 'i'), - (0x1D5A9, 'M', 'j'), - (0x1D5AA, 'M', 'k'), - (0x1D5AB, 'M', 'l'), - (0x1D5AC, 'M', 'm'), - (0x1D5AD, 'M', 'n'), - (0x1D5AE, 'M', 'o'), - (0x1D5AF, 'M', 'p'), - (0x1D5B0, 'M', 'q'), - (0x1D5B1, 'M', 'r'), - (0x1D5B2, 'M', 's'), - (0x1D5B3, 'M', 't'), - (0x1D5B4, 'M', 'u'), - (0x1D5B5, 'M', 'v'), - (0x1D5B6, 'M', 'w'), - ] - -def _seg_65() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D5B7, 'M', 'x'), - (0x1D5B8, 'M', 'y'), - (0x1D5B9, 'M', 'z'), - (0x1D5BA, 'M', 'a'), - (0x1D5BB, 'M', 'b'), - (0x1D5BC, 'M', 'c'), - (0x1D5BD, 'M', 'd'), - (0x1D5BE, 'M', 'e'), - (0x1D5BF, 'M', 'f'), - (0x1D5C0, 'M', 'g'), - (0x1D5C1, 'M', 'h'), - (0x1D5C2, 'M', 'i'), - (0x1D5C3, 'M', 'j'), - (0x1D5C4, 'M', 'k'), - (0x1D5C5, 'M', 'l'), - (0x1D5C6, 'M', 'm'), - (0x1D5C7, 'M', 'n'), - (0x1D5C8, 'M', 'o'), - (0x1D5C9, 'M', 'p'), - (0x1D5CA, 'M', 'q'), - (0x1D5CB, 'M', 'r'), - (0x1D5CC, 'M', 's'), - (0x1D5CD, 'M', 't'), - (0x1D5CE, 'M', 'u'), - (0x1D5CF, 'M', 'v'), - (0x1D5D0, 'M', 'w'), - (0x1D5D1, 'M', 'x'), - (0x1D5D2, 'M', 'y'), - (0x1D5D3, 'M', 'z'), - (0x1D5D4, 'M', 'a'), - (0x1D5D5, 'M', 'b'), - (0x1D5D6, 'M', 'c'), - (0x1D5D7, 'M', 'd'), - (0x1D5D8, 'M', 'e'), - (0x1D5D9, 'M', 'f'), - (0x1D5DA, 'M', 'g'), - (0x1D5DB, 'M', 'h'), - (0x1D5DC, 'M', 'i'), - (0x1D5DD, 'M', 'j'), - (0x1D5DE, 'M', 'k'), - (0x1D5DF, 'M', 'l'), - (0x1D5E0, 'M', 'm'), - (0x1D5E1, 'M', 'n'), - (0x1D5E2, 'M', 'o'), - (0x1D5E3, 'M', 'p'), - (0x1D5E4, 'M', 'q'), - (0x1D5E5, 'M', 'r'), - (0x1D5E6, 'M', 's'), - (0x1D5E7, 'M', 't'), - (0x1D5E8, 'M', 'u'), - (0x1D5E9, 'M', 'v'), - (0x1D5EA, 'M', 'w'), - (0x1D5EB, 'M', 'x'), - (0x1D5EC, 'M', 'y'), - (0x1D5ED, 'M', 'z'), - (0x1D5EE, 'M', 'a'), - (0x1D5EF, 'M', 'b'), - (0x1D5F0, 'M', 'c'), - (0x1D5F1, 'M', 'd'), - (0x1D5F2, 'M', 'e'), - (0x1D5F3, 'M', 'f'), - (0x1D5F4, 'M', 'g'), - (0x1D5F5, 'M', 'h'), - (0x1D5F6, 'M', 'i'), - (0x1D5F7, 'M', 'j'), - (0x1D5F8, 'M', 'k'), - (0x1D5F9, 'M', 'l'), - (0x1D5FA, 'M', 'm'), - (0x1D5FB, 'M', 'n'), - (0x1D5FC, 'M', 'o'), - (0x1D5FD, 'M', 'p'), - (0x1D5FE, 'M', 'q'), - (0x1D5FF, 'M', 'r'), - (0x1D600, 'M', 's'), - (0x1D601, 'M', 't'), - (0x1D602, 'M', 'u'), - (0x1D603, 'M', 'v'), - (0x1D604, 'M', 'w'), - (0x1D605, 'M', 'x'), - (0x1D606, 'M', 'y'), - (0x1D607, 'M', 'z'), - (0x1D608, 'M', 'a'), - (0x1D609, 'M', 'b'), - (0x1D60A, 'M', 'c'), - (0x1D60B, 'M', 'd'), - (0x1D60C, 'M', 'e'), - (0x1D60D, 'M', 'f'), - (0x1D60E, 'M', 'g'), - (0x1D60F, 'M', 'h'), - (0x1D610, 'M', 'i'), - (0x1D611, 'M', 'j'), - (0x1D612, 'M', 'k'), - (0x1D613, 'M', 'l'), - (0x1D614, 'M', 'm'), - (0x1D615, 'M', 'n'), - (0x1D616, 'M', 'o'), - (0x1D617, 'M', 'p'), - (0x1D618, 'M', 'q'), - (0x1D619, 'M', 'r'), - (0x1D61A, 'M', 's'), - ] - -def _seg_66() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D61B, 'M', 't'), - (0x1D61C, 'M', 'u'), - (0x1D61D, 'M', 'v'), - (0x1D61E, 'M', 'w'), - (0x1D61F, 'M', 'x'), - (0x1D620, 'M', 'y'), - (0x1D621, 'M', 'z'), - (0x1D622, 'M', 'a'), - (0x1D623, 'M', 'b'), - (0x1D624, 'M', 'c'), - (0x1D625, 'M', 'd'), - (0x1D626, 'M', 'e'), - (0x1D627, 'M', 'f'), - (0x1D628, 'M', 'g'), - (0x1D629, 'M', 'h'), - (0x1D62A, 'M', 'i'), - (0x1D62B, 'M', 'j'), - (0x1D62C, 'M', 'k'), - (0x1D62D, 'M', 'l'), - (0x1D62E, 'M', 'm'), - (0x1D62F, 'M', 'n'), - (0x1D630, 'M', 'o'), - (0x1D631, 'M', 'p'), - (0x1D632, 'M', 'q'), - (0x1D633, 'M', 'r'), - (0x1D634, 'M', 's'), - (0x1D635, 'M', 't'), - (0x1D636, 'M', 'u'), - (0x1D637, 'M', 'v'), - (0x1D638, 'M', 'w'), - (0x1D639, 'M', 'x'), - (0x1D63A, 'M', 'y'), - (0x1D63B, 'M', 'z'), - (0x1D63C, 'M', 'a'), - (0x1D63D, 'M', 'b'), - (0x1D63E, 'M', 'c'), - (0x1D63F, 'M', 'd'), - (0x1D640, 'M', 'e'), - (0x1D641, 'M', 'f'), - (0x1D642, 'M', 'g'), - (0x1D643, 'M', 'h'), - (0x1D644, 'M', 'i'), - (0x1D645, 'M', 'j'), - (0x1D646, 'M', 'k'), - (0x1D647, 'M', 'l'), - (0x1D648, 'M', 'm'), - (0x1D649, 'M', 'n'), - (0x1D64A, 'M', 'o'), - (0x1D64B, 'M', 'p'), - (0x1D64C, 'M', 'q'), - (0x1D64D, 'M', 'r'), - (0x1D64E, 'M', 's'), - (0x1D64F, 'M', 't'), - (0x1D650, 'M', 'u'), - (0x1D651, 'M', 'v'), - (0x1D652, 'M', 'w'), - (0x1D653, 'M', 'x'), - (0x1D654, 'M', 'y'), - (0x1D655, 'M', 'z'), - (0x1D656, 'M', 'a'), - (0x1D657, 'M', 'b'), - (0x1D658, 'M', 'c'), - (0x1D659, 'M', 'd'), - (0x1D65A, 'M', 'e'), - (0x1D65B, 'M', 'f'), - (0x1D65C, 'M', 'g'), - (0x1D65D, 'M', 'h'), - (0x1D65E, 'M', 'i'), - (0x1D65F, 'M', 'j'), - (0x1D660, 'M', 'k'), - (0x1D661, 'M', 'l'), - (0x1D662, 'M', 'm'), - (0x1D663, 'M', 'n'), - (0x1D664, 'M', 'o'), - (0x1D665, 'M', 'p'), - (0x1D666, 'M', 'q'), - (0x1D667, 'M', 'r'), - (0x1D668, 'M', 's'), - (0x1D669, 'M', 't'), - (0x1D66A, 'M', 'u'), - (0x1D66B, 'M', 'v'), - (0x1D66C, 'M', 'w'), - (0x1D66D, 'M', 'x'), - (0x1D66E, 'M', 'y'), - (0x1D66F, 'M', 'z'), - (0x1D670, 'M', 'a'), - (0x1D671, 'M', 'b'), - (0x1D672, 'M', 'c'), - (0x1D673, 'M', 'd'), - (0x1D674, 'M', 'e'), - (0x1D675, 'M', 'f'), - (0x1D676, 'M', 'g'), - (0x1D677, 'M', 'h'), - (0x1D678, 'M', 'i'), - (0x1D679, 'M', 'j'), - (0x1D67A, 'M', 'k'), - (0x1D67B, 'M', 'l'), - (0x1D67C, 'M', 'm'), - (0x1D67D, 'M', 'n'), - (0x1D67E, 'M', 'o'), - ] - -def _seg_67() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D67F, 'M', 'p'), - (0x1D680, 'M', 'q'), - (0x1D681, 'M', 'r'), - (0x1D682, 'M', 's'), - (0x1D683, 'M', 't'), - (0x1D684, 'M', 'u'), - (0x1D685, 'M', 'v'), - (0x1D686, 'M', 'w'), - (0x1D687, 'M', 'x'), - (0x1D688, 'M', 'y'), - (0x1D689, 'M', 'z'), - (0x1D68A, 'M', 'a'), - (0x1D68B, 'M', 'b'), - (0x1D68C, 'M', 'c'), - (0x1D68D, 'M', 'd'), - (0x1D68E, 'M', 'e'), - (0x1D68F, 'M', 'f'), - (0x1D690, 'M', 'g'), - (0x1D691, 'M', 'h'), - (0x1D692, 'M', 'i'), - (0x1D693, 'M', 'j'), - (0x1D694, 'M', 'k'), - (0x1D695, 'M', 'l'), - (0x1D696, 'M', 'm'), - (0x1D697, 'M', 'n'), - (0x1D698, 'M', 'o'), - (0x1D699, 'M', 'p'), - (0x1D69A, 'M', 'q'), - (0x1D69B, 'M', 'r'), - (0x1D69C, 'M', 's'), - (0x1D69D, 'M', 't'), - (0x1D69E, 'M', 'u'), - (0x1D69F, 'M', 'v'), - (0x1D6A0, 'M', 'w'), - (0x1D6A1, 'M', 'x'), - (0x1D6A2, 'M', 'y'), - (0x1D6A3, 'M', 'z'), - (0x1D6A4, 'M', 'ı'), - (0x1D6A5, 'M', 'ȷ'), - (0x1D6A6, 'X'), - (0x1D6A8, 'M', 'α'), - (0x1D6A9, 'M', 'β'), - (0x1D6AA, 'M', 'γ'), - (0x1D6AB, 'M', 'δ'), - (0x1D6AC, 'M', 'ε'), - (0x1D6AD, 'M', 'ζ'), - (0x1D6AE, 'M', 'η'), - (0x1D6AF, 'M', 'θ'), - (0x1D6B0, 'M', 'ι'), - (0x1D6B1, 'M', 'κ'), - (0x1D6B2, 'M', 'λ'), - (0x1D6B3, 'M', 'μ'), - (0x1D6B4, 'M', 'ν'), - (0x1D6B5, 'M', 'ξ'), - (0x1D6B6, 'M', 'ο'), - (0x1D6B7, 'M', 'π'), - (0x1D6B8, 'M', 'ρ'), - (0x1D6B9, 'M', 'θ'), - (0x1D6BA, 'M', 'σ'), - (0x1D6BB, 'M', 'τ'), - (0x1D6BC, 'M', 'υ'), - (0x1D6BD, 'M', 'φ'), - (0x1D6BE, 'M', 'χ'), - (0x1D6BF, 'M', 'ψ'), - (0x1D6C0, 'M', 'ω'), - (0x1D6C1, 'M', '∇'), - (0x1D6C2, 'M', 'α'), - (0x1D6C3, 'M', 'β'), - (0x1D6C4, 'M', 'γ'), - (0x1D6C5, 'M', 'δ'), - (0x1D6C6, 'M', 'ε'), - (0x1D6C7, 'M', 'ζ'), - (0x1D6C8, 'M', 'η'), - (0x1D6C9, 'M', 'θ'), - (0x1D6CA, 'M', 'ι'), - (0x1D6CB, 'M', 'κ'), - (0x1D6CC, 'M', 'λ'), - (0x1D6CD, 'M', 'μ'), - (0x1D6CE, 'M', 'ν'), - (0x1D6CF, 'M', 'ξ'), - (0x1D6D0, 'M', 'ο'), - (0x1D6D1, 'M', 'π'), - (0x1D6D2, 'M', 'ρ'), - (0x1D6D3, 'M', 'σ'), - (0x1D6D5, 'M', 'τ'), - (0x1D6D6, 'M', 'υ'), - (0x1D6D7, 'M', 'φ'), - (0x1D6D8, 'M', 'χ'), - (0x1D6D9, 'M', 'ψ'), - (0x1D6DA, 'M', 'ω'), - (0x1D6DB, 'M', '∂'), - (0x1D6DC, 'M', 'ε'), - (0x1D6DD, 'M', 'θ'), - (0x1D6DE, 'M', 'κ'), - (0x1D6DF, 'M', 'φ'), - (0x1D6E0, 'M', 'ρ'), - (0x1D6E1, 'M', 'π'), - (0x1D6E2, 'M', 'α'), - (0x1D6E3, 'M', 'β'), - (0x1D6E4, 'M', 'γ'), - ] - -def _seg_68() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D6E5, 'M', 'δ'), - (0x1D6E6, 'M', 'ε'), - (0x1D6E7, 'M', 'ζ'), - (0x1D6E8, 'M', 'η'), - (0x1D6E9, 'M', 'θ'), - (0x1D6EA, 'M', 'ι'), - (0x1D6EB, 'M', 'κ'), - (0x1D6EC, 'M', 'λ'), - (0x1D6ED, 'M', 'μ'), - (0x1D6EE, 'M', 'ν'), - (0x1D6EF, 'M', 'ξ'), - (0x1D6F0, 'M', 'ο'), - (0x1D6F1, 'M', 'π'), - (0x1D6F2, 'M', 'ρ'), - (0x1D6F3, 'M', 'θ'), - (0x1D6F4, 'M', 'σ'), - (0x1D6F5, 'M', 'τ'), - (0x1D6F6, 'M', 'υ'), - (0x1D6F7, 'M', 'φ'), - (0x1D6F8, 'M', 'χ'), - (0x1D6F9, 'M', 'ψ'), - (0x1D6FA, 'M', 'ω'), - (0x1D6FB, 'M', '∇'), - (0x1D6FC, 'M', 'α'), - (0x1D6FD, 'M', 'β'), - (0x1D6FE, 'M', 'γ'), - (0x1D6FF, 'M', 'δ'), - (0x1D700, 'M', 'ε'), - (0x1D701, 'M', 'ζ'), - (0x1D702, 'M', 'η'), - (0x1D703, 'M', 'θ'), - (0x1D704, 'M', 'ι'), - (0x1D705, 'M', 'κ'), - (0x1D706, 'M', 'λ'), - (0x1D707, 'M', 'μ'), - (0x1D708, 'M', 'ν'), - (0x1D709, 'M', 'ξ'), - (0x1D70A, 'M', 'ο'), - (0x1D70B, 'M', 'π'), - (0x1D70C, 'M', 'ρ'), - (0x1D70D, 'M', 'σ'), - (0x1D70F, 'M', 'τ'), - (0x1D710, 'M', 'υ'), - (0x1D711, 'M', 'φ'), - (0x1D712, 'M', 'χ'), - (0x1D713, 'M', 'ψ'), - (0x1D714, 'M', 'ω'), - (0x1D715, 'M', '∂'), - (0x1D716, 'M', 'ε'), - (0x1D717, 'M', 'θ'), - (0x1D718, 'M', 'κ'), - (0x1D719, 'M', 'φ'), - (0x1D71A, 'M', 'ρ'), - (0x1D71B, 'M', 'π'), - (0x1D71C, 'M', 'α'), - (0x1D71D, 'M', 'β'), - (0x1D71E, 'M', 'γ'), - (0x1D71F, 'M', 'δ'), - (0x1D720, 'M', 'ε'), - (0x1D721, 'M', 'ζ'), - (0x1D722, 'M', 'η'), - (0x1D723, 'M', 'θ'), - (0x1D724, 'M', 'ι'), - (0x1D725, 'M', 'κ'), - (0x1D726, 'M', 'λ'), - (0x1D727, 'M', 'μ'), - (0x1D728, 'M', 'ν'), - (0x1D729, 'M', 'ξ'), - (0x1D72A, 'M', 'ο'), - (0x1D72B, 'M', 'π'), - (0x1D72C, 'M', 'ρ'), - (0x1D72D, 'M', 'θ'), - (0x1D72E, 'M', 'σ'), - (0x1D72F, 'M', 'τ'), - (0x1D730, 'M', 'υ'), - (0x1D731, 'M', 'φ'), - (0x1D732, 'M', 'χ'), - (0x1D733, 'M', 'ψ'), - (0x1D734, 'M', 'ω'), - (0x1D735, 'M', '∇'), - (0x1D736, 'M', 'α'), - (0x1D737, 'M', 'β'), - (0x1D738, 'M', 'γ'), - (0x1D739, 'M', 'δ'), - (0x1D73A, 'M', 'ε'), - (0x1D73B, 'M', 'ζ'), - (0x1D73C, 'M', 'η'), - (0x1D73D, 'M', 'θ'), - (0x1D73E, 'M', 'ι'), - (0x1D73F, 'M', 'κ'), - (0x1D740, 'M', 'λ'), - (0x1D741, 'M', 'μ'), - (0x1D742, 'M', 'ν'), - (0x1D743, 'M', 'ξ'), - (0x1D744, 'M', 'ο'), - (0x1D745, 'M', 'π'), - (0x1D746, 'M', 'ρ'), - (0x1D747, 'M', 'σ'), - (0x1D749, 'M', 'τ'), - (0x1D74A, 'M', 'υ'), - ] - -def _seg_69() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D74B, 'M', 'φ'), - (0x1D74C, 'M', 'χ'), - (0x1D74D, 'M', 'ψ'), - (0x1D74E, 'M', 'ω'), - (0x1D74F, 'M', '∂'), - (0x1D750, 'M', 'ε'), - (0x1D751, 'M', 'θ'), - (0x1D752, 'M', 'κ'), - (0x1D753, 'M', 'φ'), - (0x1D754, 'M', 'ρ'), - (0x1D755, 'M', 'π'), - (0x1D756, 'M', 'α'), - (0x1D757, 'M', 'β'), - (0x1D758, 'M', 'γ'), - (0x1D759, 'M', 'δ'), - (0x1D75A, 'M', 'ε'), - (0x1D75B, 'M', 'ζ'), - (0x1D75C, 'M', 'η'), - (0x1D75D, 'M', 'θ'), - (0x1D75E, 'M', 'ι'), - (0x1D75F, 'M', 'κ'), - (0x1D760, 'M', 'λ'), - (0x1D761, 'M', 'μ'), - (0x1D762, 'M', 'ν'), - (0x1D763, 'M', 'ξ'), - (0x1D764, 'M', 'ο'), - (0x1D765, 'M', 'π'), - (0x1D766, 'M', 'ρ'), - (0x1D767, 'M', 'θ'), - (0x1D768, 'M', 'σ'), - (0x1D769, 'M', 'τ'), - (0x1D76A, 'M', 'υ'), - (0x1D76B, 'M', 'φ'), - (0x1D76C, 'M', 'χ'), - (0x1D76D, 'M', 'ψ'), - (0x1D76E, 'M', 'ω'), - (0x1D76F, 'M', '∇'), - (0x1D770, 'M', 'α'), - (0x1D771, 'M', 'β'), - (0x1D772, 'M', 'γ'), - (0x1D773, 'M', 'δ'), - (0x1D774, 'M', 'ε'), - (0x1D775, 'M', 'ζ'), - (0x1D776, 'M', 'η'), - (0x1D777, 'M', 'θ'), - (0x1D778, 'M', 'ι'), - (0x1D779, 'M', 'κ'), - (0x1D77A, 'M', 'λ'), - (0x1D77B, 'M', 'μ'), - (0x1D77C, 'M', 'ν'), - (0x1D77D, 'M', 'ξ'), - (0x1D77E, 'M', 'ο'), - (0x1D77F, 'M', 'π'), - (0x1D780, 'M', 'ρ'), - (0x1D781, 'M', 'σ'), - (0x1D783, 'M', 'τ'), - (0x1D784, 'M', 'υ'), - (0x1D785, 'M', 'φ'), - (0x1D786, 'M', 'χ'), - (0x1D787, 'M', 'ψ'), - (0x1D788, 'M', 'ω'), - (0x1D789, 'M', '∂'), - (0x1D78A, 'M', 'ε'), - (0x1D78B, 'M', 'θ'), - (0x1D78C, 'M', 'κ'), - (0x1D78D, 'M', 'φ'), - (0x1D78E, 'M', 'ρ'), - (0x1D78F, 'M', 'π'), - (0x1D790, 'M', 'α'), - (0x1D791, 'M', 'β'), - (0x1D792, 'M', 'γ'), - (0x1D793, 'M', 'δ'), - (0x1D794, 'M', 'ε'), - (0x1D795, 'M', 'ζ'), - (0x1D796, 'M', 'η'), - (0x1D797, 'M', 'θ'), - (0x1D798, 'M', 'ι'), - (0x1D799, 'M', 'κ'), - (0x1D79A, 'M', 'λ'), - (0x1D79B, 'M', 'μ'), - (0x1D79C, 'M', 'ν'), - (0x1D79D, 'M', 'ξ'), - (0x1D79E, 'M', 'ο'), - (0x1D79F, 'M', 'π'), - (0x1D7A0, 'M', 'ρ'), - (0x1D7A1, 'M', 'θ'), - (0x1D7A2, 'M', 'σ'), - (0x1D7A3, 'M', 'τ'), - (0x1D7A4, 'M', 'υ'), - (0x1D7A5, 'M', 'φ'), - (0x1D7A6, 'M', 'χ'), - (0x1D7A7, 'M', 'ψ'), - (0x1D7A8, 'M', 'ω'), - (0x1D7A9, 'M', '∇'), - (0x1D7AA, 'M', 'α'), - (0x1D7AB, 'M', 'β'), - (0x1D7AC, 'M', 'γ'), - (0x1D7AD, 'M', 'δ'), - (0x1D7AE, 'M', 'ε'), - (0x1D7AF, 'M', 'ζ'), - ] - -def _seg_70() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D7B0, 'M', 'η'), - (0x1D7B1, 'M', 'θ'), - (0x1D7B2, 'M', 'ι'), - (0x1D7B3, 'M', 'κ'), - (0x1D7B4, 'M', 'λ'), - (0x1D7B5, 'M', 'μ'), - (0x1D7B6, 'M', 'ν'), - (0x1D7B7, 'M', 'ξ'), - (0x1D7B8, 'M', 'ο'), - (0x1D7B9, 'M', 'π'), - (0x1D7BA, 'M', 'ρ'), - (0x1D7BB, 'M', 'σ'), - (0x1D7BD, 'M', 'τ'), - (0x1D7BE, 'M', 'υ'), - (0x1D7BF, 'M', 'φ'), - (0x1D7C0, 'M', 'χ'), - (0x1D7C1, 'M', 'ψ'), - (0x1D7C2, 'M', 'ω'), - (0x1D7C3, 'M', '∂'), - (0x1D7C4, 'M', 'ε'), - (0x1D7C5, 'M', 'θ'), - (0x1D7C6, 'M', 'κ'), - (0x1D7C7, 'M', 'φ'), - (0x1D7C8, 'M', 'ρ'), - (0x1D7C9, 'M', 'π'), - (0x1D7CA, 'M', 'ϝ'), - (0x1D7CC, 'X'), - (0x1D7CE, 'M', '0'), - (0x1D7CF, 'M', '1'), - (0x1D7D0, 'M', '2'), - (0x1D7D1, 'M', '3'), - (0x1D7D2, 'M', '4'), - (0x1D7D3, 'M', '5'), - (0x1D7D4, 'M', '6'), - (0x1D7D5, 'M', '7'), - (0x1D7D6, 'M', '8'), - (0x1D7D7, 'M', '9'), - (0x1D7D8, 'M', '0'), - (0x1D7D9, 'M', '1'), - (0x1D7DA, 'M', '2'), - (0x1D7DB, 'M', '3'), - (0x1D7DC, 'M', '4'), - (0x1D7DD, 'M', '5'), - (0x1D7DE, 'M', '6'), - (0x1D7DF, 'M', '7'), - (0x1D7E0, 'M', '8'), - (0x1D7E1, 'M', '9'), - (0x1D7E2, 'M', '0'), - (0x1D7E3, 'M', '1'), - (0x1D7E4, 'M', '2'), - (0x1D7E5, 'M', '3'), - (0x1D7E6, 'M', '4'), - (0x1D7E7, 'M', '5'), - (0x1D7E8, 'M', '6'), - (0x1D7E9, 'M', '7'), - (0x1D7EA, 'M', '8'), - (0x1D7EB, 'M', '9'), - (0x1D7EC, 'M', '0'), - (0x1D7ED, 'M', '1'), - (0x1D7EE, 'M', '2'), - (0x1D7EF, 'M', '3'), - (0x1D7F0, 'M', '4'), - (0x1D7F1, 'M', '5'), - (0x1D7F2, 'M', '6'), - (0x1D7F3, 'M', '7'), - (0x1D7F4, 'M', '8'), - (0x1D7F5, 'M', '9'), - (0x1D7F6, 'M', '0'), - (0x1D7F7, 'M', '1'), - (0x1D7F8, 'M', '2'), - (0x1D7F9, 'M', '3'), - (0x1D7FA, 'M', '4'), - (0x1D7FB, 'M', '5'), - (0x1D7FC, 'M', '6'), - (0x1D7FD, 'M', '7'), - (0x1D7FE, 'M', '8'), - (0x1D7FF, 'M', '9'), - (0x1D800, 'V'), - (0x1DA8C, 'X'), - (0x1DA9B, 'V'), - (0x1DAA0, 'X'), - (0x1DAA1, 'V'), - (0x1DAB0, 'X'), - (0x1DF00, 'V'), - (0x1DF1F, 'X'), - (0x1DF25, 'V'), - (0x1DF2B, 'X'), - (0x1E000, 'V'), - (0x1E007, 'X'), - (0x1E008, 'V'), - (0x1E019, 'X'), - (0x1E01B, 'V'), - (0x1E022, 'X'), - (0x1E023, 'V'), - (0x1E025, 'X'), - (0x1E026, 'V'), - (0x1E02B, 'X'), - (0x1E030, 'M', 'а'), - (0x1E031, 'M', 'б'), - (0x1E032, 'M', 'в'), - ] - -def _seg_71() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1E033, 'M', 'г'), - (0x1E034, 'M', 'д'), - (0x1E035, 'M', 'е'), - (0x1E036, 'M', 'ж'), - (0x1E037, 'M', 'з'), - (0x1E038, 'M', 'и'), - (0x1E039, 'M', 'к'), - (0x1E03A, 'M', 'л'), - (0x1E03B, 'M', 'м'), - (0x1E03C, 'M', 'о'), - (0x1E03D, 'M', 'п'), - (0x1E03E, 'M', 'р'), - (0x1E03F, 'M', 'с'), - (0x1E040, 'M', 'т'), - (0x1E041, 'M', 'у'), - (0x1E042, 'M', 'ф'), - (0x1E043, 'M', 'х'), - (0x1E044, 'M', 'ц'), - (0x1E045, 'M', 'ч'), - (0x1E046, 'M', 'ш'), - (0x1E047, 'M', 'ы'), - (0x1E048, 'M', 'э'), - (0x1E049, 'M', 'ю'), - (0x1E04A, 'M', 'ꚉ'), - (0x1E04B, 'M', 'ә'), - (0x1E04C, 'M', 'і'), - (0x1E04D, 'M', 'ј'), - (0x1E04E, 'M', 'ө'), - (0x1E04F, 'M', 'ү'), - (0x1E050, 'M', 'ӏ'), - (0x1E051, 'M', 'а'), - (0x1E052, 'M', 'б'), - (0x1E053, 'M', 'в'), - (0x1E054, 'M', 'г'), - (0x1E055, 'M', 'д'), - (0x1E056, 'M', 'е'), - (0x1E057, 'M', 'ж'), - (0x1E058, 'M', 'з'), - (0x1E059, 'M', 'и'), - (0x1E05A, 'M', 'к'), - (0x1E05B, 'M', 'л'), - (0x1E05C, 'M', 'о'), - (0x1E05D, 'M', 'п'), - (0x1E05E, 'M', 'с'), - (0x1E05F, 'M', 'у'), - (0x1E060, 'M', 'ф'), - (0x1E061, 'M', 'х'), - (0x1E062, 'M', 'ц'), - (0x1E063, 'M', 'ч'), - (0x1E064, 'M', 'ш'), - (0x1E065, 'M', 'ъ'), - (0x1E066, 'M', 'ы'), - (0x1E067, 'M', 'ґ'), - (0x1E068, 'M', 'і'), - (0x1E069, 'M', 'ѕ'), - (0x1E06A, 'M', 'џ'), - (0x1E06B, 'M', 'ҫ'), - (0x1E06C, 'M', 'ꙑ'), - (0x1E06D, 'M', 'ұ'), - (0x1E06E, 'X'), - (0x1E08F, 'V'), - (0x1E090, 'X'), - (0x1E100, 'V'), - (0x1E12D, 'X'), - (0x1E130, 'V'), - (0x1E13E, 'X'), - (0x1E140, 'V'), - (0x1E14A, 'X'), - (0x1E14E, 'V'), - (0x1E150, 'X'), - (0x1E290, 'V'), - (0x1E2AF, 'X'), - (0x1E2C0, 'V'), - (0x1E2FA, 'X'), - (0x1E2FF, 'V'), - (0x1E300, 'X'), - (0x1E4D0, 'V'), - (0x1E4FA, 'X'), - (0x1E7E0, 'V'), - (0x1E7E7, 'X'), - (0x1E7E8, 'V'), - (0x1E7EC, 'X'), - (0x1E7ED, 'V'), - (0x1E7EF, 'X'), - (0x1E7F0, 'V'), - (0x1E7FF, 'X'), - (0x1E800, 'V'), - (0x1E8C5, 'X'), - (0x1E8C7, 'V'), - (0x1E8D7, 'X'), - (0x1E900, 'M', '𞤢'), - (0x1E901, 'M', '𞤣'), - (0x1E902, 'M', '𞤤'), - (0x1E903, 'M', '𞤥'), - (0x1E904, 'M', '𞤦'), - (0x1E905, 'M', '𞤧'), - (0x1E906, 'M', '𞤨'), - (0x1E907, 'M', '𞤩'), - (0x1E908, 'M', '𞤪'), - (0x1E909, 'M', '𞤫'), - ] - -def _seg_72() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1E90A, 'M', '𞤬'), - (0x1E90B, 'M', '𞤭'), - (0x1E90C, 'M', '𞤮'), - (0x1E90D, 'M', '𞤯'), - (0x1E90E, 'M', '𞤰'), - (0x1E90F, 'M', '𞤱'), - (0x1E910, 'M', '𞤲'), - (0x1E911, 'M', '𞤳'), - (0x1E912, 'M', '𞤴'), - (0x1E913, 'M', '𞤵'), - (0x1E914, 'M', '𞤶'), - (0x1E915, 'M', '𞤷'), - (0x1E916, 'M', '𞤸'), - (0x1E917, 'M', '𞤹'), - (0x1E918, 'M', '𞤺'), - (0x1E919, 'M', '𞤻'), - (0x1E91A, 'M', '𞤼'), - (0x1E91B, 'M', '𞤽'), - (0x1E91C, 'M', '𞤾'), - (0x1E91D, 'M', '𞤿'), - (0x1E91E, 'M', '𞥀'), - (0x1E91F, 'M', '𞥁'), - (0x1E920, 'M', '𞥂'), - (0x1E921, 'M', '𞥃'), - (0x1E922, 'V'), - (0x1E94C, 'X'), - (0x1E950, 'V'), - (0x1E95A, 'X'), - (0x1E95E, 'V'), - (0x1E960, 'X'), - (0x1EC71, 'V'), - (0x1ECB5, 'X'), - (0x1ED01, 'V'), - (0x1ED3E, 'X'), - (0x1EE00, 'M', 'ا'), - (0x1EE01, 'M', 'ب'), - (0x1EE02, 'M', 'ج'), - (0x1EE03, 'M', 'د'), - (0x1EE04, 'X'), - (0x1EE05, 'M', 'و'), - (0x1EE06, 'M', 'ز'), - (0x1EE07, 'M', 'ح'), - (0x1EE08, 'M', 'ط'), - (0x1EE09, 'M', 'ي'), - (0x1EE0A, 'M', 'ك'), - (0x1EE0B, 'M', 'ل'), - (0x1EE0C, 'M', 'م'), - (0x1EE0D, 'M', 'ن'), - (0x1EE0E, 'M', 'س'), - (0x1EE0F, 'M', 'ع'), - (0x1EE10, 'M', 'ف'), - (0x1EE11, 'M', 'ص'), - (0x1EE12, 'M', 'ق'), - (0x1EE13, 'M', 'ر'), - (0x1EE14, 'M', 'ش'), - (0x1EE15, 'M', 'ت'), - (0x1EE16, 'M', 'ث'), - (0x1EE17, 'M', 'خ'), - (0x1EE18, 'M', 'ذ'), - (0x1EE19, 'M', 'ض'), - (0x1EE1A, 'M', 'ظ'), - (0x1EE1B, 'M', 'غ'), - (0x1EE1C, 'M', 'ٮ'), - (0x1EE1D, 'M', 'ں'), - (0x1EE1E, 'M', 'ڡ'), - (0x1EE1F, 'M', 'ٯ'), - (0x1EE20, 'X'), - (0x1EE21, 'M', 'ب'), - (0x1EE22, 'M', 'ج'), - (0x1EE23, 'X'), - (0x1EE24, 'M', 'ه'), - (0x1EE25, 'X'), - (0x1EE27, 'M', 'ح'), - (0x1EE28, 'X'), - (0x1EE29, 'M', 'ي'), - (0x1EE2A, 'M', 'ك'), - (0x1EE2B, 'M', 'ل'), - (0x1EE2C, 'M', 'م'), - (0x1EE2D, 'M', 'ن'), - (0x1EE2E, 'M', 'س'), - (0x1EE2F, 'M', 'ع'), - (0x1EE30, 'M', 'ف'), - (0x1EE31, 'M', 'ص'), - (0x1EE32, 'M', 'ق'), - (0x1EE33, 'X'), - (0x1EE34, 'M', 'ش'), - (0x1EE35, 'M', 'ت'), - (0x1EE36, 'M', 'ث'), - (0x1EE37, 'M', 'خ'), - (0x1EE38, 'X'), - (0x1EE39, 'M', 'ض'), - (0x1EE3A, 'X'), - (0x1EE3B, 'M', 'غ'), - (0x1EE3C, 'X'), - (0x1EE42, 'M', 'ج'), - (0x1EE43, 'X'), - (0x1EE47, 'M', 'ح'), - (0x1EE48, 'X'), - (0x1EE49, 'M', 'ي'), - (0x1EE4A, 'X'), - ] - -def _seg_73() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1EE4B, 'M', 'ل'), - (0x1EE4C, 'X'), - (0x1EE4D, 'M', 'ن'), - (0x1EE4E, 'M', 'س'), - (0x1EE4F, 'M', 'ع'), - (0x1EE50, 'X'), - (0x1EE51, 'M', 'ص'), - (0x1EE52, 'M', 'ق'), - (0x1EE53, 'X'), - (0x1EE54, 'M', 'ش'), - (0x1EE55, 'X'), - (0x1EE57, 'M', 'خ'), - (0x1EE58, 'X'), - (0x1EE59, 'M', 'ض'), - (0x1EE5A, 'X'), - (0x1EE5B, 'M', 'غ'), - (0x1EE5C, 'X'), - (0x1EE5D, 'M', 'ں'), - (0x1EE5E, 'X'), - (0x1EE5F, 'M', 'ٯ'), - (0x1EE60, 'X'), - (0x1EE61, 'M', 'ب'), - (0x1EE62, 'M', 'ج'), - (0x1EE63, 'X'), - (0x1EE64, 'M', 'ه'), - (0x1EE65, 'X'), - (0x1EE67, 'M', 'ح'), - (0x1EE68, 'M', 'ط'), - (0x1EE69, 'M', 'ي'), - (0x1EE6A, 'M', 'ك'), - (0x1EE6B, 'X'), - (0x1EE6C, 'M', 'م'), - (0x1EE6D, 'M', 'ن'), - (0x1EE6E, 'M', 'س'), - (0x1EE6F, 'M', 'ع'), - (0x1EE70, 'M', 'ف'), - (0x1EE71, 'M', 'ص'), - (0x1EE72, 'M', 'ق'), - (0x1EE73, 'X'), - (0x1EE74, 'M', 'ش'), - (0x1EE75, 'M', 'ت'), - (0x1EE76, 'M', 'ث'), - (0x1EE77, 'M', 'خ'), - (0x1EE78, 'X'), - (0x1EE79, 'M', 'ض'), - (0x1EE7A, 'M', 'ظ'), - (0x1EE7B, 'M', 'غ'), - (0x1EE7C, 'M', 'ٮ'), - (0x1EE7D, 'X'), - (0x1EE7E, 'M', 'ڡ'), - (0x1EE7F, 'X'), - (0x1EE80, 'M', 'ا'), - (0x1EE81, 'M', 'ب'), - (0x1EE82, 'M', 'ج'), - (0x1EE83, 'M', 'د'), - (0x1EE84, 'M', 'ه'), - (0x1EE85, 'M', 'و'), - (0x1EE86, 'M', 'ز'), - (0x1EE87, 'M', 'ح'), - (0x1EE88, 'M', 'ط'), - (0x1EE89, 'M', 'ي'), - (0x1EE8A, 'X'), - (0x1EE8B, 'M', 'ل'), - (0x1EE8C, 'M', 'م'), - (0x1EE8D, 'M', 'ن'), - (0x1EE8E, 'M', 'س'), - (0x1EE8F, 'M', 'ع'), - (0x1EE90, 'M', 'ف'), - (0x1EE91, 'M', 'ص'), - (0x1EE92, 'M', 'ق'), - (0x1EE93, 'M', 'ر'), - (0x1EE94, 'M', 'ش'), - (0x1EE95, 'M', 'ت'), - (0x1EE96, 'M', 'ث'), - (0x1EE97, 'M', 'خ'), - (0x1EE98, 'M', 'ذ'), - (0x1EE99, 'M', 'ض'), - (0x1EE9A, 'M', 'ظ'), - (0x1EE9B, 'M', 'غ'), - (0x1EE9C, 'X'), - (0x1EEA1, 'M', 'ب'), - (0x1EEA2, 'M', 'ج'), - (0x1EEA3, 'M', 'د'), - (0x1EEA4, 'X'), - (0x1EEA5, 'M', 'و'), - (0x1EEA6, 'M', 'ز'), - (0x1EEA7, 'M', 'ح'), - (0x1EEA8, 'M', 'ط'), - (0x1EEA9, 'M', 'ي'), - (0x1EEAA, 'X'), - (0x1EEAB, 'M', 'ل'), - (0x1EEAC, 'M', 'م'), - (0x1EEAD, 'M', 'ن'), - (0x1EEAE, 'M', 'س'), - (0x1EEAF, 'M', 'ع'), - (0x1EEB0, 'M', 'ف'), - (0x1EEB1, 'M', 'ص'), - (0x1EEB2, 'M', 'ق'), - (0x1EEB3, 'M', 'ر'), - (0x1EEB4, 'M', 'ش'), - ] - -def _seg_74() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1EEB5, 'M', 'ت'), - (0x1EEB6, 'M', 'ث'), - (0x1EEB7, 'M', 'خ'), - (0x1EEB8, 'M', 'ذ'), - (0x1EEB9, 'M', 'ض'), - (0x1EEBA, 'M', 'ظ'), - (0x1EEBB, 'M', 'غ'), - (0x1EEBC, 'X'), - (0x1EEF0, 'V'), - (0x1EEF2, 'X'), - (0x1F000, 'V'), - (0x1F02C, 'X'), - (0x1F030, 'V'), - (0x1F094, 'X'), - (0x1F0A0, 'V'), - (0x1F0AF, 'X'), - (0x1F0B1, 'V'), - (0x1F0C0, 'X'), - (0x1F0C1, 'V'), - (0x1F0D0, 'X'), - (0x1F0D1, 'V'), - (0x1F0F6, 'X'), - (0x1F101, '3', '0,'), - (0x1F102, '3', '1,'), - (0x1F103, '3', '2,'), - (0x1F104, '3', '3,'), - (0x1F105, '3', '4,'), - (0x1F106, '3', '5,'), - (0x1F107, '3', '6,'), - (0x1F108, '3', '7,'), - (0x1F109, '3', '8,'), - (0x1F10A, '3', '9,'), - (0x1F10B, 'V'), - (0x1F110, '3', '(a)'), - (0x1F111, '3', '(b)'), - (0x1F112, '3', '(c)'), - (0x1F113, '3', '(d)'), - (0x1F114, '3', '(e)'), - (0x1F115, '3', '(f)'), - (0x1F116, '3', '(g)'), - (0x1F117, '3', '(h)'), - (0x1F118, '3', '(i)'), - (0x1F119, '3', '(j)'), - (0x1F11A, '3', '(k)'), - (0x1F11B, '3', '(l)'), - (0x1F11C, '3', '(m)'), - (0x1F11D, '3', '(n)'), - (0x1F11E, '3', '(o)'), - (0x1F11F, '3', '(p)'), - (0x1F120, '3', '(q)'), - (0x1F121, '3', '(r)'), - (0x1F122, '3', '(s)'), - (0x1F123, '3', '(t)'), - (0x1F124, '3', '(u)'), - (0x1F125, '3', '(v)'), - (0x1F126, '3', '(w)'), - (0x1F127, '3', '(x)'), - (0x1F128, '3', '(y)'), - (0x1F129, '3', '(z)'), - (0x1F12A, 'M', '〔s〕'), - (0x1F12B, 'M', 'c'), - (0x1F12C, 'M', 'r'), - (0x1F12D, 'M', 'cd'), - (0x1F12E, 'M', 'wz'), - (0x1F12F, 'V'), - (0x1F130, 'M', 'a'), - (0x1F131, 'M', 'b'), - (0x1F132, 'M', 'c'), - (0x1F133, 'M', 'd'), - (0x1F134, 'M', 'e'), - (0x1F135, 'M', 'f'), - (0x1F136, 'M', 'g'), - (0x1F137, 'M', 'h'), - (0x1F138, 'M', 'i'), - (0x1F139, 'M', 'j'), - (0x1F13A, 'M', 'k'), - (0x1F13B, 'M', 'l'), - (0x1F13C, 'M', 'm'), - (0x1F13D, 'M', 'n'), - (0x1F13E, 'M', 'o'), - (0x1F13F, 'M', 'p'), - (0x1F140, 'M', 'q'), - (0x1F141, 'M', 'r'), - (0x1F142, 'M', 's'), - (0x1F143, 'M', 't'), - (0x1F144, 'M', 'u'), - (0x1F145, 'M', 'v'), - (0x1F146, 'M', 'w'), - (0x1F147, 'M', 'x'), - (0x1F148, 'M', 'y'), - (0x1F149, 'M', 'z'), - (0x1F14A, 'M', 'hv'), - (0x1F14B, 'M', 'mv'), - (0x1F14C, 'M', 'sd'), - (0x1F14D, 'M', 'ss'), - (0x1F14E, 'M', 'ppv'), - (0x1F14F, 'M', 'wc'), - (0x1F150, 'V'), - (0x1F16A, 'M', 'mc'), - (0x1F16B, 'M', 'md'), - ] - -def _seg_75() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1F16C, 'M', 'mr'), - (0x1F16D, 'V'), - (0x1F190, 'M', 'dj'), - (0x1F191, 'V'), - (0x1F1AE, 'X'), - (0x1F1E6, 'V'), - (0x1F200, 'M', 'ほか'), - (0x1F201, 'M', 'ココ'), - (0x1F202, 'M', 'サ'), - (0x1F203, 'X'), - (0x1F210, 'M', '手'), - (0x1F211, 'M', '字'), - (0x1F212, 'M', '双'), - (0x1F213, 'M', 'デ'), - (0x1F214, 'M', '二'), - (0x1F215, 'M', '多'), - (0x1F216, 'M', '解'), - (0x1F217, 'M', '天'), - (0x1F218, 'M', '交'), - (0x1F219, 'M', '映'), - (0x1F21A, 'M', '無'), - (0x1F21B, 'M', '料'), - (0x1F21C, 'M', '前'), - (0x1F21D, 'M', '後'), - (0x1F21E, 'M', '再'), - (0x1F21F, 'M', '新'), - (0x1F220, 'M', '初'), - (0x1F221, 'M', '終'), - (0x1F222, 'M', '生'), - (0x1F223, 'M', '販'), - (0x1F224, 'M', '声'), - (0x1F225, 'M', '吹'), - (0x1F226, 'M', '演'), - (0x1F227, 'M', '投'), - (0x1F228, 'M', '捕'), - (0x1F229, 'M', '一'), - (0x1F22A, 'M', '三'), - (0x1F22B, 'M', '遊'), - (0x1F22C, 'M', '左'), - (0x1F22D, 'M', '中'), - (0x1F22E, 'M', '右'), - (0x1F22F, 'M', '指'), - (0x1F230, 'M', '走'), - (0x1F231, 'M', '打'), - (0x1F232, 'M', '禁'), - (0x1F233, 'M', '空'), - (0x1F234, 'M', '合'), - (0x1F235, 'M', '満'), - (0x1F236, 'M', '有'), - (0x1F237, 'M', '月'), - (0x1F238, 'M', '申'), - (0x1F239, 'M', '割'), - (0x1F23A, 'M', '営'), - (0x1F23B, 'M', '配'), - (0x1F23C, 'X'), - (0x1F240, 'M', '〔本〕'), - (0x1F241, 'M', '〔三〕'), - (0x1F242, 'M', '〔二〕'), - (0x1F243, 'M', '〔安〕'), - (0x1F244, 'M', '〔点〕'), - (0x1F245, 'M', '〔打〕'), - (0x1F246, 'M', '〔盗〕'), - (0x1F247, 'M', '〔勝〕'), - (0x1F248, 'M', '〔敗〕'), - (0x1F249, 'X'), - (0x1F250, 'M', '得'), - (0x1F251, 'M', '可'), - (0x1F252, 'X'), - (0x1F260, 'V'), - (0x1F266, 'X'), - (0x1F300, 'V'), - (0x1F6D8, 'X'), - (0x1F6DC, 'V'), - (0x1F6ED, 'X'), - (0x1F6F0, 'V'), - (0x1F6FD, 'X'), - (0x1F700, 'V'), - (0x1F777, 'X'), - (0x1F77B, 'V'), - (0x1F7DA, 'X'), - (0x1F7E0, 'V'), - (0x1F7EC, 'X'), - (0x1F7F0, 'V'), - (0x1F7F1, 'X'), - (0x1F800, 'V'), - (0x1F80C, 'X'), - (0x1F810, 'V'), - (0x1F848, 'X'), - (0x1F850, 'V'), - (0x1F85A, 'X'), - (0x1F860, 'V'), - (0x1F888, 'X'), - (0x1F890, 'V'), - (0x1F8AE, 'X'), - (0x1F8B0, 'V'), - (0x1F8B2, 'X'), - (0x1F900, 'V'), - (0x1FA54, 'X'), - (0x1FA60, 'V'), - (0x1FA6E, 'X'), - ] - -def _seg_76() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1FA70, 'V'), - (0x1FA7D, 'X'), - (0x1FA80, 'V'), - (0x1FA89, 'X'), - (0x1FA90, 'V'), - (0x1FABE, 'X'), - (0x1FABF, 'V'), - (0x1FAC6, 'X'), - (0x1FACE, 'V'), - (0x1FADC, 'X'), - (0x1FAE0, 'V'), - (0x1FAE9, 'X'), - (0x1FAF0, 'V'), - (0x1FAF9, 'X'), - (0x1FB00, 'V'), - (0x1FB93, 'X'), - (0x1FB94, 'V'), - (0x1FBCB, 'X'), - (0x1FBF0, 'M', '0'), - (0x1FBF1, 'M', '1'), - (0x1FBF2, 'M', '2'), - (0x1FBF3, 'M', '3'), - (0x1FBF4, 'M', '4'), - (0x1FBF5, 'M', '5'), - (0x1FBF6, 'M', '6'), - (0x1FBF7, 'M', '7'), - (0x1FBF8, 'M', '8'), - (0x1FBF9, 'M', '9'), - (0x1FBFA, 'X'), - (0x20000, 'V'), - (0x2A6E0, 'X'), - (0x2A700, 'V'), - (0x2B73A, 'X'), - (0x2B740, 'V'), - (0x2B81E, 'X'), - (0x2B820, 'V'), - (0x2CEA2, 'X'), - (0x2CEB0, 'V'), - (0x2EBE1, 'X'), - (0x2F800, 'M', '丽'), - (0x2F801, 'M', '丸'), - (0x2F802, 'M', '乁'), - (0x2F803, 'M', '𠄢'), - (0x2F804, 'M', '你'), - (0x2F805, 'M', '侮'), - (0x2F806, 'M', '侻'), - (0x2F807, 'M', '倂'), - (0x2F808, 'M', '偺'), - (0x2F809, 'M', '備'), - (0x2F80A, 'M', '僧'), - (0x2F80B, 'M', '像'), - (0x2F80C, 'M', '㒞'), - (0x2F80D, 'M', '𠘺'), - (0x2F80E, 'M', '免'), - (0x2F80F, 'M', '兔'), - (0x2F810, 'M', '兤'), - (0x2F811, 'M', '具'), - (0x2F812, 'M', '𠔜'), - (0x2F813, 'M', '㒹'), - (0x2F814, 'M', '內'), - (0x2F815, 'M', '再'), - (0x2F816, 'M', '𠕋'), - (0x2F817, 'M', '冗'), - (0x2F818, 'M', '冤'), - (0x2F819, 'M', '仌'), - (0x2F81A, 'M', '冬'), - (0x2F81B, 'M', '况'), - (0x2F81C, 'M', '𩇟'), - (0x2F81D, 'M', '凵'), - (0x2F81E, 'M', '刃'), - (0x2F81F, 'M', '㓟'), - (0x2F820, 'M', '刻'), - (0x2F821, 'M', '剆'), - (0x2F822, 'M', '割'), - (0x2F823, 'M', '剷'), - (0x2F824, 'M', '㔕'), - (0x2F825, 'M', '勇'), - (0x2F826, 'M', '勉'), - (0x2F827, 'M', '勤'), - (0x2F828, 'M', '勺'), - (0x2F829, 'M', '包'), - (0x2F82A, 'M', '匆'), - (0x2F82B, 'M', '北'), - (0x2F82C, 'M', '卉'), - (0x2F82D, 'M', '卑'), - (0x2F82E, 'M', '博'), - (0x2F82F, 'M', '即'), - (0x2F830, 'M', '卽'), - (0x2F831, 'M', '卿'), - (0x2F834, 'M', '𠨬'), - (0x2F835, 'M', '灰'), - (0x2F836, 'M', '及'), - (0x2F837, 'M', '叟'), - (0x2F838, 'M', '𠭣'), - (0x2F839, 'M', '叫'), - (0x2F83A, 'M', '叱'), - (0x2F83B, 'M', '吆'), - (0x2F83C, 'M', '咞'), - (0x2F83D, 'M', '吸'), - (0x2F83E, 'M', '呈'), - ] - -def _seg_77() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2F83F, 'M', '周'), - (0x2F840, 'M', '咢'), - (0x2F841, 'M', '哶'), - (0x2F842, 'M', '唐'), - (0x2F843, 'M', '啓'), - (0x2F844, 'M', '啣'), - (0x2F845, 'M', '善'), - (0x2F847, 'M', '喙'), - (0x2F848, 'M', '喫'), - (0x2F849, 'M', '喳'), - (0x2F84A, 'M', '嗂'), - (0x2F84B, 'M', '圖'), - (0x2F84C, 'M', '嘆'), - (0x2F84D, 'M', '圗'), - (0x2F84E, 'M', '噑'), - (0x2F84F, 'M', '噴'), - (0x2F850, 'M', '切'), - (0x2F851, 'M', '壮'), - (0x2F852, 'M', '城'), - (0x2F853, 'M', '埴'), - (0x2F854, 'M', '堍'), - (0x2F855, 'M', '型'), - (0x2F856, 'M', '堲'), - (0x2F857, 'M', '報'), - (0x2F858, 'M', '墬'), - (0x2F859, 'M', '𡓤'), - (0x2F85A, 'M', '売'), - (0x2F85B, 'M', '壷'), - (0x2F85C, 'M', '夆'), - (0x2F85D, 'M', '多'), - (0x2F85E, 'M', '夢'), - (0x2F85F, 'M', '奢'), - (0x2F860, 'M', '𡚨'), - (0x2F861, 'M', '𡛪'), - (0x2F862, 'M', '姬'), - (0x2F863, 'M', '娛'), - (0x2F864, 'M', '娧'), - (0x2F865, 'M', '姘'), - (0x2F866, 'M', '婦'), - (0x2F867, 'M', '㛮'), - (0x2F868, 'X'), - (0x2F869, 'M', '嬈'), - (0x2F86A, 'M', '嬾'), - (0x2F86C, 'M', '𡧈'), - (0x2F86D, 'M', '寃'), - (0x2F86E, 'M', '寘'), - (0x2F86F, 'M', '寧'), - (0x2F870, 'M', '寳'), - (0x2F871, 'M', '𡬘'), - (0x2F872, 'M', '寿'), - (0x2F873, 'M', '将'), - (0x2F874, 'X'), - (0x2F875, 'M', '尢'), - (0x2F876, 'M', '㞁'), - (0x2F877, 'M', '屠'), - (0x2F878, 'M', '屮'), - (0x2F879, 'M', '峀'), - (0x2F87A, 'M', '岍'), - (0x2F87B, 'M', '𡷤'), - (0x2F87C, 'M', '嵃'), - (0x2F87D, 'M', '𡷦'), - (0x2F87E, 'M', '嵮'), - (0x2F87F, 'M', '嵫'), - (0x2F880, 'M', '嵼'), - (0x2F881, 'M', '巡'), - (0x2F882, 'M', '巢'), - (0x2F883, 'M', '㠯'), - (0x2F884, 'M', '巽'), - (0x2F885, 'M', '帨'), - (0x2F886, 'M', '帽'), - (0x2F887, 'M', '幩'), - (0x2F888, 'M', '㡢'), - (0x2F889, 'M', '𢆃'), - (0x2F88A, 'M', '㡼'), - (0x2F88B, 'M', '庰'), - (0x2F88C, 'M', '庳'), - (0x2F88D, 'M', '庶'), - (0x2F88E, 'M', '廊'), - (0x2F88F, 'M', '𪎒'), - (0x2F890, 'M', '廾'), - (0x2F891, 'M', '𢌱'), - (0x2F893, 'M', '舁'), - (0x2F894, 'M', '弢'), - (0x2F896, 'M', '㣇'), - (0x2F897, 'M', '𣊸'), - (0x2F898, 'M', '𦇚'), - (0x2F899, 'M', '形'), - (0x2F89A, 'M', '彫'), - (0x2F89B, 'M', '㣣'), - (0x2F89C, 'M', '徚'), - (0x2F89D, 'M', '忍'), - (0x2F89E, 'M', '志'), - (0x2F89F, 'M', '忹'), - (0x2F8A0, 'M', '悁'), - (0x2F8A1, 'M', '㤺'), - (0x2F8A2, 'M', '㤜'), - (0x2F8A3, 'M', '悔'), - (0x2F8A4, 'M', '𢛔'), - (0x2F8A5, 'M', '惇'), - (0x2F8A6, 'M', '慈'), - ] - -def _seg_78() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2F8A7, 'M', '慌'), - (0x2F8A8, 'M', '慎'), - (0x2F8A9, 'M', '慌'), - (0x2F8AA, 'M', '慺'), - (0x2F8AB, 'M', '憎'), - (0x2F8AC, 'M', '憲'), - (0x2F8AD, 'M', '憤'), - (0x2F8AE, 'M', '憯'), - (0x2F8AF, 'M', '懞'), - (0x2F8B0, 'M', '懲'), - (0x2F8B1, 'M', '懶'), - (0x2F8B2, 'M', '成'), - (0x2F8B3, 'M', '戛'), - (0x2F8B4, 'M', '扝'), - (0x2F8B5, 'M', '抱'), - (0x2F8B6, 'M', '拔'), - (0x2F8B7, 'M', '捐'), - (0x2F8B8, 'M', '𢬌'), - (0x2F8B9, 'M', '挽'), - (0x2F8BA, 'M', '拼'), - (0x2F8BB, 'M', '捨'), - (0x2F8BC, 'M', '掃'), - (0x2F8BD, 'M', '揤'), - (0x2F8BE, 'M', '𢯱'), - (0x2F8BF, 'M', '搢'), - (0x2F8C0, 'M', '揅'), - (0x2F8C1, 'M', '掩'), - (0x2F8C2, 'M', '㨮'), - (0x2F8C3, 'M', '摩'), - (0x2F8C4, 'M', '摾'), - (0x2F8C5, 'M', '撝'), - (0x2F8C6, 'M', '摷'), - (0x2F8C7, 'M', '㩬'), - (0x2F8C8, 'M', '敏'), - (0x2F8C9, 'M', '敬'), - (0x2F8CA, 'M', '𣀊'), - (0x2F8CB, 'M', '旣'), - (0x2F8CC, 'M', '書'), - (0x2F8CD, 'M', '晉'), - (0x2F8CE, 'M', '㬙'), - (0x2F8CF, 'M', '暑'), - (0x2F8D0, 'M', '㬈'), - (0x2F8D1, 'M', '㫤'), - (0x2F8D2, 'M', '冒'), - (0x2F8D3, 'M', '冕'), - (0x2F8D4, 'M', '最'), - (0x2F8D5, 'M', '暜'), - (0x2F8D6, 'M', '肭'), - (0x2F8D7, 'M', '䏙'), - (0x2F8D8, 'M', '朗'), - (0x2F8D9, 'M', '望'), - (0x2F8DA, 'M', '朡'), - (0x2F8DB, 'M', '杞'), - (0x2F8DC, 'M', '杓'), - (0x2F8DD, 'M', '𣏃'), - (0x2F8DE, 'M', '㭉'), - (0x2F8DF, 'M', '柺'), - (0x2F8E0, 'M', '枅'), - (0x2F8E1, 'M', '桒'), - (0x2F8E2, 'M', '梅'), - (0x2F8E3, 'M', '𣑭'), - (0x2F8E4, 'M', '梎'), - (0x2F8E5, 'M', '栟'), - (0x2F8E6, 'M', '椔'), - (0x2F8E7, 'M', '㮝'), - (0x2F8E8, 'M', '楂'), - (0x2F8E9, 'M', '榣'), - (0x2F8EA, 'M', '槪'), - (0x2F8EB, 'M', '檨'), - (0x2F8EC, 'M', '𣚣'), - (0x2F8ED, 'M', '櫛'), - (0x2F8EE, 'M', '㰘'), - (0x2F8EF, 'M', '次'), - (0x2F8F0, 'M', '𣢧'), - (0x2F8F1, 'M', '歔'), - (0x2F8F2, 'M', '㱎'), - (0x2F8F3, 'M', '歲'), - (0x2F8F4, 'M', '殟'), - (0x2F8F5, 'M', '殺'), - (0x2F8F6, 'M', '殻'), - (0x2F8F7, 'M', '𣪍'), - (0x2F8F8, 'M', '𡴋'), - (0x2F8F9, 'M', '𣫺'), - (0x2F8FA, 'M', '汎'), - (0x2F8FB, 'M', '𣲼'), - (0x2F8FC, 'M', '沿'), - (0x2F8FD, 'M', '泍'), - (0x2F8FE, 'M', '汧'), - (0x2F8FF, 'M', '洖'), - (0x2F900, 'M', '派'), - (0x2F901, 'M', '海'), - (0x2F902, 'M', '流'), - (0x2F903, 'M', '浩'), - (0x2F904, 'M', '浸'), - (0x2F905, 'M', '涅'), - (0x2F906, 'M', '𣴞'), - (0x2F907, 'M', '洴'), - (0x2F908, 'M', '港'), - (0x2F909, 'M', '湮'), - (0x2F90A, 'M', '㴳'), - ] - -def _seg_79() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2F90B, 'M', '滋'), - (0x2F90C, 'M', '滇'), - (0x2F90D, 'M', '𣻑'), - (0x2F90E, 'M', '淹'), - (0x2F90F, 'M', '潮'), - (0x2F910, 'M', '𣽞'), - (0x2F911, 'M', '𣾎'), - (0x2F912, 'M', '濆'), - (0x2F913, 'M', '瀹'), - (0x2F914, 'M', '瀞'), - (0x2F915, 'M', '瀛'), - (0x2F916, 'M', '㶖'), - (0x2F917, 'M', '灊'), - (0x2F918, 'M', '災'), - (0x2F919, 'M', '灷'), - (0x2F91A, 'M', '炭'), - (0x2F91B, 'M', '𠔥'), - (0x2F91C, 'M', '煅'), - (0x2F91D, 'M', '𤉣'), - (0x2F91E, 'M', '熜'), - (0x2F91F, 'X'), - (0x2F920, 'M', '爨'), - (0x2F921, 'M', '爵'), - (0x2F922, 'M', '牐'), - (0x2F923, 'M', '𤘈'), - (0x2F924, 'M', '犀'), - (0x2F925, 'M', '犕'), - (0x2F926, 'M', '𤜵'), - (0x2F927, 'M', '𤠔'), - (0x2F928, 'M', '獺'), - (0x2F929, 'M', '王'), - (0x2F92A, 'M', '㺬'), - (0x2F92B, 'M', '玥'), - (0x2F92C, 'M', '㺸'), - (0x2F92E, 'M', '瑇'), - (0x2F92F, 'M', '瑜'), - (0x2F930, 'M', '瑱'), - (0x2F931, 'M', '璅'), - (0x2F932, 'M', '瓊'), - (0x2F933, 'M', '㼛'), - (0x2F934, 'M', '甤'), - (0x2F935, 'M', '𤰶'), - (0x2F936, 'M', '甾'), - (0x2F937, 'M', '𤲒'), - (0x2F938, 'M', '異'), - (0x2F939, 'M', '𢆟'), - (0x2F93A, 'M', '瘐'), - (0x2F93B, 'M', '𤾡'), - (0x2F93C, 'M', '𤾸'), - (0x2F93D, 'M', '𥁄'), - (0x2F93E, 'M', '㿼'), - (0x2F93F, 'M', '䀈'), - (0x2F940, 'M', '直'), - (0x2F941, 'M', '𥃳'), - (0x2F942, 'M', '𥃲'), - (0x2F943, 'M', '𥄙'), - (0x2F944, 'M', '𥄳'), - (0x2F945, 'M', '眞'), - (0x2F946, 'M', '真'), - (0x2F948, 'M', '睊'), - (0x2F949, 'M', '䀹'), - (0x2F94A, 'M', '瞋'), - (0x2F94B, 'M', '䁆'), - (0x2F94C, 'M', '䂖'), - (0x2F94D, 'M', '𥐝'), - (0x2F94E, 'M', '硎'), - (0x2F94F, 'M', '碌'), - (0x2F950, 'M', '磌'), - (0x2F951, 'M', '䃣'), - (0x2F952, 'M', '𥘦'), - (0x2F953, 'M', '祖'), - (0x2F954, 'M', '𥚚'), - (0x2F955, 'M', '𥛅'), - (0x2F956, 'M', '福'), - (0x2F957, 'M', '秫'), - (0x2F958, 'M', '䄯'), - (0x2F959, 'M', '穀'), - (0x2F95A, 'M', '穊'), - (0x2F95B, 'M', '穏'), - (0x2F95C, 'M', '𥥼'), - (0x2F95D, 'M', '𥪧'), - (0x2F95F, 'X'), - (0x2F960, 'M', '䈂'), - (0x2F961, 'M', '𥮫'), - (0x2F962, 'M', '篆'), - (0x2F963, 'M', '築'), - (0x2F964, 'M', '䈧'), - (0x2F965, 'M', '𥲀'), - (0x2F966, 'M', '糒'), - (0x2F967, 'M', '䊠'), - (0x2F968, 'M', '糨'), - (0x2F969, 'M', '糣'), - (0x2F96A, 'M', '紀'), - (0x2F96B, 'M', '𥾆'), - (0x2F96C, 'M', '絣'), - (0x2F96D, 'M', '䌁'), - (0x2F96E, 'M', '緇'), - (0x2F96F, 'M', '縂'), - (0x2F970, 'M', '繅'), - (0x2F971, 'M', '䌴'), - ] - -def _seg_80() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2F972, 'M', '𦈨'), - (0x2F973, 'M', '𦉇'), - (0x2F974, 'M', '䍙'), - (0x2F975, 'M', '𦋙'), - (0x2F976, 'M', '罺'), - (0x2F977, 'M', '𦌾'), - (0x2F978, 'M', '羕'), - (0x2F979, 'M', '翺'), - (0x2F97A, 'M', '者'), - (0x2F97B, 'M', '𦓚'), - (0x2F97C, 'M', '𦔣'), - (0x2F97D, 'M', '聠'), - (0x2F97E, 'M', '𦖨'), - (0x2F97F, 'M', '聰'), - (0x2F980, 'M', '𣍟'), - (0x2F981, 'M', '䏕'), - (0x2F982, 'M', '育'), - (0x2F983, 'M', '脃'), - (0x2F984, 'M', '䐋'), - (0x2F985, 'M', '脾'), - (0x2F986, 'M', '媵'), - (0x2F987, 'M', '𦞧'), - (0x2F988, 'M', '𦞵'), - (0x2F989, 'M', '𣎓'), - (0x2F98A, 'M', '𣎜'), - (0x2F98B, 'M', '舁'), - (0x2F98C, 'M', '舄'), - (0x2F98D, 'M', '辞'), - (0x2F98E, 'M', '䑫'), - (0x2F98F, 'M', '芑'), - (0x2F990, 'M', '芋'), - (0x2F991, 'M', '芝'), - (0x2F992, 'M', '劳'), - (0x2F993, 'M', '花'), - (0x2F994, 'M', '芳'), - (0x2F995, 'M', '芽'), - (0x2F996, 'M', '苦'), - (0x2F997, 'M', '𦬼'), - (0x2F998, 'M', '若'), - (0x2F999, 'M', '茝'), - (0x2F99A, 'M', '荣'), - (0x2F99B, 'M', '莭'), - (0x2F99C, 'M', '茣'), - (0x2F99D, 'M', '莽'), - (0x2F99E, 'M', '菧'), - (0x2F99F, 'M', '著'), - (0x2F9A0, 'M', '荓'), - (0x2F9A1, 'M', '菊'), - (0x2F9A2, 'M', '菌'), - (0x2F9A3, 'M', '菜'), - (0x2F9A4, 'M', '𦰶'), - (0x2F9A5, 'M', '𦵫'), - (0x2F9A6, 'M', '𦳕'), - (0x2F9A7, 'M', '䔫'), - (0x2F9A8, 'M', '蓱'), - (0x2F9A9, 'M', '蓳'), - (0x2F9AA, 'M', '蔖'), - (0x2F9AB, 'M', '𧏊'), - (0x2F9AC, 'M', '蕤'), - (0x2F9AD, 'M', '𦼬'), - (0x2F9AE, 'M', '䕝'), - (0x2F9AF, 'M', '䕡'), - (0x2F9B0, 'M', '𦾱'), - (0x2F9B1, 'M', '𧃒'), - (0x2F9B2, 'M', '䕫'), - (0x2F9B3, 'M', '虐'), - (0x2F9B4, 'M', '虜'), - (0x2F9B5, 'M', '虧'), - (0x2F9B6, 'M', '虩'), - (0x2F9B7, 'M', '蚩'), - (0x2F9B8, 'M', '蚈'), - (0x2F9B9, 'M', '蜎'), - (0x2F9BA, 'M', '蛢'), - (0x2F9BB, 'M', '蝹'), - (0x2F9BC, 'M', '蜨'), - (0x2F9BD, 'M', '蝫'), - (0x2F9BE, 'M', '螆'), - (0x2F9BF, 'X'), - (0x2F9C0, 'M', '蟡'), - (0x2F9C1, 'M', '蠁'), - (0x2F9C2, 'M', '䗹'), - (0x2F9C3, 'M', '衠'), - (0x2F9C4, 'M', '衣'), - (0x2F9C5, 'M', '𧙧'), - (0x2F9C6, 'M', '裗'), - (0x2F9C7, 'M', '裞'), - (0x2F9C8, 'M', '䘵'), - (0x2F9C9, 'M', '裺'), - (0x2F9CA, 'M', '㒻'), - (0x2F9CB, 'M', '𧢮'), - (0x2F9CC, 'M', '𧥦'), - (0x2F9CD, 'M', '䚾'), - (0x2F9CE, 'M', '䛇'), - (0x2F9CF, 'M', '誠'), - (0x2F9D0, 'M', '諭'), - (0x2F9D1, 'M', '變'), - (0x2F9D2, 'M', '豕'), - (0x2F9D3, 'M', '𧲨'), - (0x2F9D4, 'M', '貫'), - (0x2F9D5, 'M', '賁'), - ] - -def _seg_81() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2F9D6, 'M', '贛'), - (0x2F9D7, 'M', '起'), - (0x2F9D8, 'M', '𧼯'), - (0x2F9D9, 'M', '𠠄'), - (0x2F9DA, 'M', '跋'), - (0x2F9DB, 'M', '趼'), - (0x2F9DC, 'M', '跰'), - (0x2F9DD, 'M', '𠣞'), - (0x2F9DE, 'M', '軔'), - (0x2F9DF, 'M', '輸'), - (0x2F9E0, 'M', '𨗒'), - (0x2F9E1, 'M', '𨗭'), - (0x2F9E2, 'M', '邔'), - (0x2F9E3, 'M', '郱'), - (0x2F9E4, 'M', '鄑'), - (0x2F9E5, 'M', '𨜮'), - (0x2F9E6, 'M', '鄛'), - (0x2F9E7, 'M', '鈸'), - (0x2F9E8, 'M', '鋗'), - (0x2F9E9, 'M', '鋘'), - (0x2F9EA, 'M', '鉼'), - (0x2F9EB, 'M', '鏹'), - (0x2F9EC, 'M', '鐕'), - (0x2F9ED, 'M', '𨯺'), - (0x2F9EE, 'M', '開'), - (0x2F9EF, 'M', '䦕'), - (0x2F9F0, 'M', '閷'), - (0x2F9F1, 'M', '𨵷'), - (0x2F9F2, 'M', '䧦'), - (0x2F9F3, 'M', '雃'), - (0x2F9F4, 'M', '嶲'), - (0x2F9F5, 'M', '霣'), - (0x2F9F6, 'M', '𩅅'), - (0x2F9F7, 'M', '𩈚'), - (0x2F9F8, 'M', '䩮'), - (0x2F9F9, 'M', '䩶'), - (0x2F9FA, 'M', '韠'), - (0x2F9FB, 'M', '𩐊'), - (0x2F9FC, 'M', '䪲'), - (0x2F9FD, 'M', '𩒖'), - (0x2F9FE, 'M', '頋'), - (0x2FA00, 'M', '頩'), - (0x2FA01, 'M', '𩖶'), - (0x2FA02, 'M', '飢'), - (0x2FA03, 'M', '䬳'), - (0x2FA04, 'M', '餩'), - (0x2FA05, 'M', '馧'), - (0x2FA06, 'M', '駂'), - (0x2FA07, 'M', '駾'), - (0x2FA08, 'M', '䯎'), - (0x2FA09, 'M', '𩬰'), - (0x2FA0A, 'M', '鬒'), - (0x2FA0B, 'M', '鱀'), - (0x2FA0C, 'M', '鳽'), - (0x2FA0D, 'M', '䳎'), - (0x2FA0E, 'M', '䳭'), - (0x2FA0F, 'M', '鵧'), - (0x2FA10, 'M', '𪃎'), - (0x2FA11, 'M', '䳸'), - (0x2FA12, 'M', '𪄅'), - (0x2FA13, 'M', '𪈎'), - (0x2FA14, 'M', '𪊑'), - (0x2FA15, 'M', '麻'), - (0x2FA16, 'M', '䵖'), - (0x2FA17, 'M', '黹'), - (0x2FA18, 'M', '黾'), - (0x2FA19, 'M', '鼅'), - (0x2FA1A, 'M', '鼏'), - (0x2FA1B, 'M', '鼖'), - (0x2FA1C, 'M', '鼻'), - (0x2FA1D, 'M', '𪘀'), - (0x2FA1E, 'X'), - (0x30000, 'V'), - (0x3134B, 'X'), - (0x31350, 'V'), - (0x323B0, 'X'), - (0xE0100, 'I'), - (0xE01F0, 'X'), - ] - -uts46data = tuple( - _seg_0() - + _seg_1() - + _seg_2() - + _seg_3() - + _seg_4() - + _seg_5() - + _seg_6() - + _seg_7() - + _seg_8() - + _seg_9() - + _seg_10() - + _seg_11() - + _seg_12() - + _seg_13() - + _seg_14() - + _seg_15() - + _seg_16() - + _seg_17() - + _seg_18() - + _seg_19() - + _seg_20() - + _seg_21() - + _seg_22() - + _seg_23() - + _seg_24() - + _seg_25() - + _seg_26() - + _seg_27() - + _seg_28() - + _seg_29() - + _seg_30() - + _seg_31() - + _seg_32() - + _seg_33() - + _seg_34() - + _seg_35() - + _seg_36() - + _seg_37() - + _seg_38() - + _seg_39() - + _seg_40() - + _seg_41() - + _seg_42() - + _seg_43() - + _seg_44() - + _seg_45() - + _seg_46() - + _seg_47() - + _seg_48() - + _seg_49() - + _seg_50() - + _seg_51() - + _seg_52() - + _seg_53() - + _seg_54() - + _seg_55() - + _seg_56() - + _seg_57() - + _seg_58() - + _seg_59() - + _seg_60() - + _seg_61() - + _seg_62() - + _seg_63() - + _seg_64() - + _seg_65() - + _seg_66() - + _seg_67() - + _seg_68() - + _seg_69() - + _seg_70() - + _seg_71() - + _seg_72() - + _seg_73() - + _seg_74() - + _seg_75() - + _seg_76() - + _seg_77() - + _seg_78() - + _seg_79() - + _seg_80() - + _seg_81() -) # type: Tuple[Union[Tuple[int, str], Tuple[int, str, str]], ...] diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/packaging/markers.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/packaging/markers.py deleted file mode 100644 index 540e7a4dc79d02a820e291b57c43335d5aa25a41..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/packaging/markers.py +++ /dev/null @@ -1,304 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - -import operator -import os -import platform -import sys -from typing import Any, Callable, Dict, List, Optional, Tuple, Union - -from pip._vendor.pyparsing import ( # noqa: N817 - Forward, - Group, - Literal as L, - ParseException, - ParseResults, - QuotedString, - ZeroOrMore, - stringEnd, - stringStart, -) - -from .specifiers import InvalidSpecifier, Specifier - -__all__ = [ - "InvalidMarker", - "UndefinedComparison", - "UndefinedEnvironmentName", - "Marker", - "default_environment", -] - -Operator = Callable[[str, str], bool] - - -class InvalidMarker(ValueError): - """ - An invalid marker was found, users should refer to PEP 508. - """ - - -class UndefinedComparison(ValueError): - """ - An invalid operation was attempted on a value that doesn't support it. - """ - - -class UndefinedEnvironmentName(ValueError): - """ - A name was attempted to be used that does not exist inside of the - environment. - """ - - -class Node: - def __init__(self, value: Any) -> None: - self.value = value - - def __str__(self) -> str: - return str(self.value) - - def __repr__(self) -> str: - return f"<{self.__class__.__name__}('{self}')>" - - def serialize(self) -> str: - raise NotImplementedError - - -class Variable(Node): - def serialize(self) -> str: - return str(self) - - -class Value(Node): - def serialize(self) -> str: - return f'"{self}"' - - -class Op(Node): - def serialize(self) -> str: - return str(self) - - -VARIABLE = ( - L("implementation_version") - | L("platform_python_implementation") - | L("implementation_name") - | L("python_full_version") - | L("platform_release") - | L("platform_version") - | L("platform_machine") - | L("platform_system") - | L("python_version") - | L("sys_platform") - | L("os_name") - | L("os.name") # PEP-345 - | L("sys.platform") # PEP-345 - | L("platform.version") # PEP-345 - | L("platform.machine") # PEP-345 - | L("platform.python_implementation") # PEP-345 - | L("python_implementation") # undocumented setuptools legacy - | L("extra") # PEP-508 -) -ALIASES = { - "os.name": "os_name", - "sys.platform": "sys_platform", - "platform.version": "platform_version", - "platform.machine": "platform_machine", - "platform.python_implementation": "platform_python_implementation", - "python_implementation": "platform_python_implementation", -} -VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0]))) - -VERSION_CMP = ( - L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<") -) - -MARKER_OP = VERSION_CMP | L("not in") | L("in") -MARKER_OP.setParseAction(lambda s, l, t: Op(t[0])) - -MARKER_VALUE = QuotedString("'") | QuotedString('"') -MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0])) - -BOOLOP = L("and") | L("or") - -MARKER_VAR = VARIABLE | MARKER_VALUE - -MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR) -MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0])) - -LPAREN = L("(").suppress() -RPAREN = L(")").suppress() - -MARKER_EXPR = Forward() -MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN) -MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR) - -MARKER = stringStart + MARKER_EXPR + stringEnd - - -def _coerce_parse_result(results: Union[ParseResults, List[Any]]) -> List[Any]: - if isinstance(results, ParseResults): - return [_coerce_parse_result(i) for i in results] - else: - return results - - -def _format_marker( - marker: Union[List[str], Tuple[Node, ...], str], first: Optional[bool] = True -) -> str: - - assert isinstance(marker, (list, tuple, str)) - - # Sometimes we have a structure like [[...]] which is a single item list - # where the single item is itself it's own list. In that case we want skip - # the rest of this function so that we don't get extraneous () on the - # outside. - if ( - isinstance(marker, list) - and len(marker) == 1 - and isinstance(marker[0], (list, tuple)) - ): - return _format_marker(marker[0]) - - if isinstance(marker, list): - inner = (_format_marker(m, first=False) for m in marker) - if first: - return " ".join(inner) - else: - return "(" + " ".join(inner) + ")" - elif isinstance(marker, tuple): - return " ".join([m.serialize() for m in marker]) - else: - return marker - - -_operators: Dict[str, Operator] = { - "in": lambda lhs, rhs: lhs in rhs, - "not in": lambda lhs, rhs: lhs not in rhs, - "<": operator.lt, - "<=": operator.le, - "==": operator.eq, - "!=": operator.ne, - ">=": operator.ge, - ">": operator.gt, -} - - -def _eval_op(lhs: str, op: Op, rhs: str) -> bool: - try: - spec = Specifier("".join([op.serialize(), rhs])) - except InvalidSpecifier: - pass - else: - return spec.contains(lhs) - - oper: Optional[Operator] = _operators.get(op.serialize()) - if oper is None: - raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.") - - return oper(lhs, rhs) - - -class Undefined: - pass - - -_undefined = Undefined() - - -def _get_env(environment: Dict[str, str], name: str) -> str: - value: Union[str, Undefined] = environment.get(name, _undefined) - - if isinstance(value, Undefined): - raise UndefinedEnvironmentName( - f"{name!r} does not exist in evaluation environment." - ) - - return value - - -def _evaluate_markers(markers: List[Any], environment: Dict[str, str]) -> bool: - groups: List[List[bool]] = [[]] - - for marker in markers: - assert isinstance(marker, (list, tuple, str)) - - if isinstance(marker, list): - groups[-1].append(_evaluate_markers(marker, environment)) - elif isinstance(marker, tuple): - lhs, op, rhs = marker - - if isinstance(lhs, Variable): - lhs_value = _get_env(environment, lhs.value) - rhs_value = rhs.value - else: - lhs_value = lhs.value - rhs_value = _get_env(environment, rhs.value) - - groups[-1].append(_eval_op(lhs_value, op, rhs_value)) - else: - assert marker in ["and", "or"] - if marker == "or": - groups.append([]) - - return any(all(item) for item in groups) - - -def format_full_version(info: "sys._version_info") -> str: - version = "{0.major}.{0.minor}.{0.micro}".format(info) - kind = info.releaselevel - if kind != "final": - version += kind[0] + str(info.serial) - return version - - -def default_environment() -> Dict[str, str]: - iver = format_full_version(sys.implementation.version) - implementation_name = sys.implementation.name - return { - "implementation_name": implementation_name, - "implementation_version": iver, - "os_name": os.name, - "platform_machine": platform.machine(), - "platform_release": platform.release(), - "platform_system": platform.system(), - "platform_version": platform.version(), - "python_full_version": platform.python_version(), - "platform_python_implementation": platform.python_implementation(), - "python_version": ".".join(platform.python_version_tuple()[:2]), - "sys_platform": sys.platform, - } - - -class Marker: - def __init__(self, marker: str) -> None: - try: - self._markers = _coerce_parse_result(MARKER.parseString(marker)) - except ParseException as e: - raise InvalidMarker( - f"Invalid marker: {marker!r}, parse error at " - f"{marker[e.loc : e.loc + 8]!r}" - ) - - def __str__(self) -> str: - return _format_marker(self._markers) - - def __repr__(self) -> str: - return f"" - - def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool: - """Evaluate a marker. - - Return the boolean from evaluating the given marker against the - environment. environment is an optional argument to override all or - part of the determined environment. - - The environment is determined from the current Python process. - """ - current_environment = default_environment() - if environment is not None: - current_environment.update(environment) - - return _evaluate_markers(self._markers, current_environment) diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/highlighter.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/highlighter.py deleted file mode 100644 index c2646794a98578bdb735f5047dbc6b1d50b90230..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/highlighter.py +++ /dev/null @@ -1,232 +0,0 @@ -import re -from abc import ABC, abstractmethod -from typing import List, Union - -from .text import Span, Text - - -def _combine_regex(*regexes: str) -> str: - """Combine a number of regexes in to a single regex. - - Returns: - str: New regex with all regexes ORed together. - """ - return "|".join(regexes) - - -class Highlighter(ABC): - """Abstract base class for highlighters.""" - - def __call__(self, text: Union[str, Text]) -> Text: - """Highlight a str or Text instance. - - Args: - text (Union[str, ~Text]): Text to highlight. - - Raises: - TypeError: If not called with text or str. - - Returns: - Text: A test instance with highlighting applied. - """ - if isinstance(text, str): - highlight_text = Text(text) - elif isinstance(text, Text): - highlight_text = text.copy() - else: - raise TypeError(f"str or Text instance required, not {text!r}") - self.highlight(highlight_text) - return highlight_text - - @abstractmethod - def highlight(self, text: Text) -> None: - """Apply highlighting in place to text. - - Args: - text (~Text): A text object highlight. - """ - - -class NullHighlighter(Highlighter): - """A highlighter object that doesn't highlight. - - May be used to disable highlighting entirely. - - """ - - def highlight(self, text: Text) -> None: - """Nothing to do""" - - -class RegexHighlighter(Highlighter): - """Applies highlighting from a list of regular expressions.""" - - highlights: List[str] = [] - base_style: str = "" - - def highlight(self, text: Text) -> None: - """Highlight :class:`rich.text.Text` using regular expressions. - - Args: - text (~Text): Text to highlighted. - - """ - - highlight_regex = text.highlight_regex - for re_highlight in self.highlights: - highlight_regex(re_highlight, style_prefix=self.base_style) - - -class ReprHighlighter(RegexHighlighter): - """Highlights the text typically produced from ``__repr__`` methods.""" - - base_style = "repr." - highlights = [ - r"(?P<)(?P[-\w.:|]*)(?P[\w\W]*)(?P>)", - r'(?P[\w_]{1,50})=(?P"?[\w_]+"?)?', - r"(?P[][{}()])", - _combine_regex( - r"(?P[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})", - r"(?P([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4})", - r"(?P(?:[0-9A-Fa-f]{1,2}-){7}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{1,2}:){7}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{4}\.){3}[0-9A-Fa-f]{4})", - r"(?P(?:[0-9A-Fa-f]{1,2}-){5}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{1,2}:){5}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{4}\.){2}[0-9A-Fa-f]{4})", - r"(?P[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12})", - r"(?P[\w.]*?)\(", - r"\b(?PTrue)\b|\b(?PFalse)\b|\b(?PNone)\b", - r"(?P\.\.\.)", - r"(?P(?(?\B(/[-\w._+]+)*\/)(?P[-\w._+]*)?", - r"(?b?'''.*?(?(file|https|http|ws|wss)://[-0-9a-zA-Z$_+!`(),.?/;:&=%#]*)", - ), - ] - - -class JSONHighlighter(RegexHighlighter): - """Highlights JSON""" - - # Captures the start and end of JSON strings, handling escaped quotes - JSON_STR = r"(?b?\".*?(?[\{\[\(\)\]\}])", - r"\b(?Ptrue)\b|\b(?Pfalse)\b|\b(?Pnull)\b", - r"(?P(? None: - super().highlight(text) - - # Additional work to handle highlighting JSON keys - plain = text.plain - append = text.spans.append - whitespace = self.JSON_WHITESPACE - for match in re.finditer(self.JSON_STR, plain): - start, end = match.span() - cursor = end - while cursor < len(plain): - char = plain[cursor] - cursor += 1 - if char == ":": - append(Span(start, end, "json.key")) - elif char in whitespace: - continue - break - - -class ISO8601Highlighter(RegexHighlighter): - """Highlights the ISO8601 date time strings. - Regex reference: https://www.oreilly.com/library/view/regular-expressions-cookbook/9781449327453/ch04s07.html - """ - - base_style = "iso8601." - highlights = [ - # - # Dates - # - # Calendar month (e.g. 2008-08). The hyphen is required - r"^(?P[0-9]{4})-(?P1[0-2]|0[1-9])$", - # Calendar date w/o hyphens (e.g. 20080830) - r"^(?P(?P[0-9]{4})(?P1[0-2]|0[1-9])(?P3[01]|0[1-9]|[12][0-9]))$", - # Ordinal date (e.g. 2008-243). The hyphen is optional - r"^(?P(?P[0-9]{4})-?(?P36[0-6]|3[0-5][0-9]|[12][0-9]{2}|0[1-9][0-9]|00[1-9]))$", - # - # Weeks - # - # Week of the year (e.g., 2008-W35). The hyphen is optional - r"^(?P(?P[0-9]{4})-?W(?P5[0-3]|[1-4][0-9]|0[1-9]))$", - # Week date (e.g., 2008-W35-6). The hyphens are optional - r"^(?P(?P[0-9]{4})-?W(?P5[0-3]|[1-4][0-9]|0[1-9])-?(?P[1-7]))$", - # - # Times - # - # Hours and minutes (e.g., 17:21). The colon is optional - r"^(?P(?P2[0-3]|[01][0-9]):?(?P[0-5][0-9]))$", - # Hours, minutes, and seconds w/o colons (e.g., 172159) - r"^(?P(?P2[0-3]|[01][0-9])(?P[0-5][0-9])(?P[0-5][0-9]))$", - # Time zone designator (e.g., Z, +07 or +07:00). The colons and the minutes are optional - r"^(?P(Z|[+-](?:2[0-3]|[01][0-9])(?::?(?:[0-5][0-9]))?))$", - # Hours, minutes, and seconds with time zone designator (e.g., 17:21:59+07:00). - # All the colons are optional. The minutes in the time zone designator are also optional - r"^(?P(?P2[0-3]|[01][0-9])(?P[0-5][0-9])(?P[0-5][0-9]))(?PZ|[+-](?:2[0-3]|[01][0-9])(?::?(?:[0-5][0-9]))?)$", - # - # Date and Time - # - # Calendar date with hours, minutes, and seconds (e.g., 2008-08-30 17:21:59 or 20080830 172159). - # A space is required between the date and the time. The hyphens and colons are optional. - # This regex matches dates and times that specify some hyphens or colons but omit others. - # This does not follow ISO 8601 - r"^(?P(?P[0-9]{4})(?P-)?(?P1[0-2]|0[1-9])(?(hyphen)-)(?P3[01]|0[1-9]|[12][0-9])) (?P(?P2[0-3]|[01][0-9])(?(hyphen):)(?P[0-5][0-9])(?(hyphen):)(?P[0-5][0-9]))$", - # - # XML Schema dates and times - # - # Date, with optional time zone (e.g., 2008-08-30 or 2008-08-30+07:00). - # Hyphens are required. This is the XML Schema 'date' type - r"^(?P(?P-?(?:[1-9][0-9]*)?[0-9]{4})-(?P1[0-2]|0[1-9])-(?P3[01]|0[1-9]|[12][0-9]))(?PZ|[+-](?:2[0-3]|[01][0-9]):[0-5][0-9])?$", - # Time, with optional fractional seconds and time zone (e.g., 01:45:36 or 01:45:36.123+07:00). - # There is no limit on the number of digits for the fractional seconds. This is the XML Schema 'time' type - r"^(?P(?P2[0-3]|[01][0-9]):(?P[0-5][0-9]):(?P[0-5][0-9])(?P\.[0-9]+)?)(?PZ|[+-](?:2[0-3]|[01][0-9]):[0-5][0-9])?$", - # Date and time, with optional fractional seconds and time zone (e.g., 2008-08-30T01:45:36 or 2008-08-30T01:45:36.123Z). - # This is the XML Schema 'dateTime' type - r"^(?P(?P-?(?:[1-9][0-9]*)?[0-9]{4})-(?P1[0-2]|0[1-9])-(?P3[01]|0[1-9]|[12][0-9]))T(?P(?P2[0-3]|[01][0-9]):(?P[0-5][0-9]):(?P[0-5][0-9])(?P\.[0-9]+)?)(?PZ|[+-](?:2[0-3]|[01][0-9]):[0-5][0-9])?$", - ] - - -if __name__ == "__main__": # pragma: no cover - from .console import Console - - console = Console() - console.print("[bold green]hello world![/bold green]") - console.print("'[bold green]hello world![/bold green]'") - - console.print(" /foo") - console.print("/foo/") - console.print("/foo/bar") - console.print("foo/bar/baz") - - console.print("/foo/bar/baz?foo=bar+egg&egg=baz") - console.print("/foo/bar/baz/") - console.print("/foo/bar/baz/egg") - console.print("/foo/bar/baz/egg.py") - console.print("/foo/bar/baz/egg.py word") - console.print(" /foo/bar/baz/egg.py word") - console.print("foo /foo/bar/baz/egg.py word") - console.print("foo /foo/bar/ba._++z/egg+.py word") - console.print("https://example.org?foo=bar#header") - - console.print(1234567.34) - console.print(1 / 2) - console.print(-1 / 123123123123) - - console.print( - "127.0.1.1 bar 192.168.1.4 2001:0db8:85a3:0000:0000:8a2e:0370:7334 foo" - ) - import json - - console.print_json(json.dumps(obj={"name": "apple", "count": 1}), indent=None) diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/actions.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/actions.py deleted file mode 100644 index f72c66e743146c7a5b70a5440e9ab5459f10245b..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/actions.py +++ /dev/null @@ -1,207 +0,0 @@ -# actions.py - -from .exceptions import ParseException -from .util import col - - -class OnlyOnce: - """ - Wrapper for parse actions, to ensure they are only called once. - """ - - def __init__(self, method_call): - from .core import _trim_arity - - self.callable = _trim_arity(method_call) - self.called = False - - def __call__(self, s, l, t): - if not self.called: - results = self.callable(s, l, t) - self.called = True - return results - raise ParseException(s, l, "OnlyOnce obj called multiple times w/out reset") - - def reset(self): - """ - Allow the associated parse action to be called once more. - """ - - self.called = False - - -def match_only_at_col(n): - """ - Helper method for defining parse actions that require matching at - a specific column in the input text. - """ - - def verify_col(strg, locn, toks): - if col(locn, strg) != n: - raise ParseException(strg, locn, "matched token not at column {}".format(n)) - - return verify_col - - -def replace_with(repl_str): - """ - Helper method for common parse actions that simply return - a literal value. Especially useful when used with - :class:`transform_string` (). - - Example:: - - num = Word(nums).set_parse_action(lambda toks: int(toks[0])) - na = one_of("N/A NA").set_parse_action(replace_with(math.nan)) - term = na | num - - term[1, ...].parse_string("324 234 N/A 234") # -> [324, 234, nan, 234] - """ - return lambda s, l, t: [repl_str] - - -def remove_quotes(s, l, t): - """ - Helper parse action for removing quotation marks from parsed - quoted strings. - - Example:: - - # by default, quotation marks are included in parsed results - quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"] - - # use remove_quotes to strip quotation marks from parsed results - quoted_string.set_parse_action(remove_quotes) - quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"] - """ - return t[0][1:-1] - - -def with_attribute(*args, **attr_dict): - """ - Helper to create a validating parse action to be used with start - tags created with :class:`make_xml_tags` or - :class:`make_html_tags`. Use ``with_attribute`` to qualify - a starting tag with a required attribute value, to avoid false - matches on common tags such as ```` or ``
    ``. - - Call ``with_attribute`` with a series of attribute names and - values. Specify the list of filter attributes names and values as: - - - keyword arguments, as in ``(align="right")``, or - - as an explicit dict with ``**`` operator, when an attribute - name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}`` - - a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align", "right"))`` - - For attribute names with a namespace prefix, you must use the second - form. Attribute names are matched insensitive to upper/lower case. - - If just testing for ``class`` (with or without a namespace), use - :class:`with_class`. - - To verify that the attribute exists, but without specifying a value, - pass ``with_attribute.ANY_VALUE`` as the value. - - Example:: - - html = ''' -
    - Some text -
    1 4 0 1 0
    -
    1,3 2,3 1,1
    -
    this has no type
    -
    - - ''' - div,div_end = make_html_tags("div") - - # only match div tag having a type attribute with value "grid" - div_grid = div().set_parse_action(with_attribute(type="grid")) - grid_expr = div_grid + SkipTo(div | div_end)("body") - for grid_header in grid_expr.search_string(html): - print(grid_header.body) - - # construct a match with any div tag having a type attribute, regardless of the value - div_any_type = div().set_parse_action(with_attribute(type=with_attribute.ANY_VALUE)) - div_expr = div_any_type + SkipTo(div | div_end)("body") - for div_header in div_expr.search_string(html): - print(div_header.body) - - prints:: - - 1 4 0 1 0 - - 1 4 0 1 0 - 1,3 2,3 1,1 - """ - if args: - attrs = args[:] - else: - attrs = attr_dict.items() - attrs = [(k, v) for k, v in attrs] - - def pa(s, l, tokens): - for attrName, attrValue in attrs: - if attrName not in tokens: - raise ParseException(s, l, "no matching attribute " + attrName) - if attrValue != with_attribute.ANY_VALUE and tokens[attrName] != attrValue: - raise ParseException( - s, - l, - "attribute {!r} has value {!r}, must be {!r}".format( - attrName, tokens[attrName], attrValue - ), - ) - - return pa - - -with_attribute.ANY_VALUE = object() - - -def with_class(classname, namespace=""): - """ - Simplified version of :class:`with_attribute` when - matching on a div class - made difficult because ``class`` is - a reserved word in Python. - - Example:: - - html = ''' -
    - Some text -
    1 4 0 1 0
    -
    1,3 2,3 1,1
    -
    this <div> has no class
    -
    - - ''' - div,div_end = make_html_tags("div") - div_grid = div().set_parse_action(with_class("grid")) - - grid_expr = div_grid + SkipTo(div | div_end)("body") - for grid_header in grid_expr.search_string(html): - print(grid_header.body) - - div_any_type = div().set_parse_action(with_class(withAttribute.ANY_VALUE)) - div_expr = div_any_type + SkipTo(div | div_end)("body") - for div_header in div_expr.search_string(html): - print(div_header.body) - - prints:: - - 1 4 0 1 0 - - 1 4 0 1 0 - 1,3 2,3 1,1 - """ - classattr = "{}:class".format(namespace) if namespace else "class" - return with_attribute(**{classattr: classname}) - - -# pre-PEP8 compatibility symbols -replaceWith = replace_with -removeQuotes = remove_quotes -withAttribute = with_attribute -withClass = with_class -matchOnlyAtCol = match_only_at_col diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/modeling/test_mmdet.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/modeling/test_mmdet.py deleted file mode 100644 index a743b0b67d5ab664257040621d28c1b1b4451709..0000000000000000000000000000000000000000 --- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/modeling/test_mmdet.py +++ /dev/null @@ -1,186 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import unittest - -from detectron2.layers import ShapeSpec -from detectron2.modeling.mmdet_wrapper import MMDetBackbone, MMDetDetector - -try: - import mmdet.models # noqa - - HAS_MMDET = True -except ImportError: - HAS_MMDET = False - - -@unittest.skipIf(not HAS_MMDET, "mmdet not available") -class TestMMDetWrapper(unittest.TestCase): - def test_backbone(self): - MMDetBackbone( - backbone=dict( - type="DetectoRS_ResNet", - conv_cfg=dict(type="ConvAWS"), - sac=dict(type="SAC", use_deform=True), - stage_with_sac=(False, True, True, True), - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type="BN", requires_grad=True), - norm_eval=True, - style="pytorch", - ), - neck=dict( - type="FPN", - in_channels=[256, 512, 1024, 2048], - out_channels=256, - num_outs=5, - ), - # skip pretrained model for tests - # pretrained_backbone="torchvision://resnet50", - output_shapes=[ShapeSpec(channels=256, stride=s) for s in [4, 8, 16, 32, 64]], - output_names=["p2", "p3", "p4", "p5", "p6"], - ) - - def test_detector(self): - # a basic R50 Mask R-CNN - MMDetDetector( - detector=dict( - type="MaskRCNN", - backbone=dict( - type="ResNet", - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type="BN", requires_grad=True), - norm_eval=True, - style="pytorch", - # skip pretrained model for tests - # init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')) - ), - neck=dict( - type="FPN", in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5 - ), - rpn_head=dict( - type="RPNHead", - in_channels=256, - feat_channels=256, - anchor_generator=dict( - type="AnchorGenerator", - scales=[8], - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64], - ), - bbox_coder=dict( - type="DeltaXYWHBBoxCoder", - target_means=[0.0, 0.0, 0.0, 0.0], - target_stds=[1.0, 1.0, 1.0, 1.0], - ), - loss_cls=dict(type="CrossEntropyLoss", use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type="L1Loss", loss_weight=1.0), - ), - roi_head=dict( - type="StandardRoIHead", - bbox_roi_extractor=dict( - type="SingleRoIExtractor", - roi_layer=dict(type="RoIAlign", output_size=7, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32], - ), - bbox_head=dict( - type="Shared2FCBBoxHead", - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type="DeltaXYWHBBoxCoder", - target_means=[0.0, 0.0, 0.0, 0.0], - target_stds=[0.1, 0.1, 0.2, 0.2], - ), - reg_class_agnostic=False, - loss_cls=dict(type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type="L1Loss", loss_weight=1.0), - ), - mask_roi_extractor=dict( - type="SingleRoIExtractor", - roi_layer=dict(type="RoIAlign", output_size=14, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32], - ), - mask_head=dict( - type="FCNMaskHead", - num_convs=4, - in_channels=256, - conv_out_channels=256, - num_classes=80, - loss_mask=dict(type="CrossEntropyLoss", use_mask=True, loss_weight=1.0), - ), - ), - # model training and testing settings - train_cfg=dict( - rpn=dict( - assigner=dict( - type="MaxIoUAssigner", - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - match_low_quality=True, - ignore_iof_thr=-1, - ), - sampler=dict( - type="RandomSampler", - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False, - ), - allowed_border=-1, - pos_weight=-1, - debug=False, - ), - rpn_proposal=dict( - nms_pre=2000, - max_per_img=1000, - nms=dict(type="nms", iou_threshold=0.7), - min_bbox_size=0, - ), - rcnn=dict( - assigner=dict( - type="MaxIoUAssigner", - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0.5, - match_low_quality=True, - ignore_iof_thr=-1, - ), - sampler=dict( - type="RandomSampler", - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True, - ), - mask_size=28, - pos_weight=-1, - debug=False, - ), - ), - test_cfg=dict( - rpn=dict( - nms_pre=1000, - max_per_img=1000, - nms=dict(type="nms", iou_threshold=0.7), - min_bbox_size=0, - ), - rcnn=dict( - score_thr=0.05, - nms=dict(type="nms", iou_threshold=0.5), - max_per_img=100, - mask_thr_binary=0.5, - ), - ), - ), - pixel_mean=[1, 2, 3], - pixel_std=[1, 2, 3], - ) diff --git a/spaces/Benson/text-generation/Examples/Cmo Hacer Un Android Sin Verificacin.md b/spaces/Benson/text-generation/Examples/Cmo Hacer Un Android Sin Verificacin.md deleted file mode 100644 index 613086a6182a5d6a502316e4d88c238abd397be8..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Cmo Hacer Un Android Sin Verificacin.md +++ /dev/null @@ -1,37 +0,0 @@ -
    -

    Forza Horizon 5 APK Descargar para Android sin verificación

    -

    Si eres un fan de los juegos de carreras, es posible que hayas oído hablar de Forza Horizon 5, la última entrega de la popular serie Forza. Este juego te permite explorar los vibrantes y diversos paisajes del mundo abierto de México con cientos de los mejores coches del mundo. Pero lo que si desea jugar este juego en su dispositivo Android sin pasar por el proceso de verificación? En este artículo, le mostraremos cómo descargar Forza Horizon 5 APK para Android sin verificación, y cuáles son los pros y los contras de hacerlo.

    -

    ¿Qué es Forza Horizon 5?

    -

    Una breve introducción al juego y sus características

    -

    Forza Horizon 5 es un juego de carreras desarrollado por Playground Games y publicado por Xbox Game Studios. Es la quinta entrada principal en la sub-serie Forza Horizon, que es un spin-off de la serie Forza Motorsport. El juego fue lanzado el 8 de noviembre de 2021, para Windows, Xbox One, Xbox Series X/S y Xbox Cloud Gaming.

    -

    Cómo hacer un Android sin verificación


    DOWNLOAD ►►► https://bltlly.com/2v6KZU



    -

    El juego presenta un mundo abierto dinámico y en constante evolución ambientado en México, donde puedes dirigir expediciones impresionantes a través de varios terrenos, como desiertos, selvas, ciudades, ruinas, playas, cañones y volcanes. También puedes participar en cientos de desafíos que te recompensan por participar en las actividades que te gustan, como las carreras, la deriva, el retraso en el crecimiento, la exploración y más. También puedes personalizar tu propio personaje, colección de coches, lista de reproducción de música y sitio del festival.

    -

    El juego también es compatible con los modos multijugador en línea, donde puede formar equipo con otros jugadores y entrar en el Horizon árcade para una serie de desafíos divertidos y exagerados. También puedes crear tus propios eventos y compartirlos con la comunidad. Además, el juego ofrece dos expansiones que añaden nuevos coches, pistas y modos de juego: Hot Wheels y Rally.

    -

    Los requisitos y disponibilidad del juego en diferentes plataformas

    - -
      -
    • OS: Windows 10 versión 15063.0 o superior
    • -
    • Procesador: Intel i3-4170 @ 3.7Ghz o Intel i5-750 @ 2.67Ghz
    • -
    • Memoria: 8 GB RAM
    • -
    • Gráficos: NVidia GTX 650 Ti o AMD R7 250x
    • -
    • DirectX: Versión 12
    • -
    • Almacenamiento: 80 GB de espacio disponible
    • -
    -

    Para jugar a Forza Horizon 5 en Xbox One o Xbox Series X/S, necesitas una suscripción Xbox Live Gold o una suscripción Xbox Game Pass Ultimate. También puedes jugar en tu dispositivo Android a través de Xbox Cloud Gaming, que requiere un controlador compatible y una conexión a Internet estable.

    -

    Puedes comprar Forza Horizon 5 de varias fuentes, como Steam, Xbox o Uptodown. Sin embargo, si desea descargar un archivo APK para Forza Horizon 5 para su versión actualizada y compatible con su dispositivo y Android. Un archivo APK es una versión no oficial y no verificada de una aplicación Android que se descarga desde un sitio web de terceros o de origen. Un archivo APK puede ser inseguro, desactualizado o incompatible con su dispositivo o versión de Android.

    -

    ¿Es legal descargar un archivo APK para Forza Horizon 5?

    -

    Depende de las leyes y regulaciones de su país o región. En algunos lugares, puede ser legal descargar un archivo APK para Forza Horizon 5 siempre y cuando usted es dueño de una copia legítima del juego en otra plataforma. En otros lugares, puede ser ilegal descargar un archivo APK para Forza Horizon 5, ya que puede violar los derechos de propiedad intelectual del desarrollador o editor del juego. Por lo tanto, debe comprobar el estado legal de la descarga de un archivo APK para Forza Horizon 5 en su ubicación antes de hacerlo.

    -

    ¿Cómo puedo actualizar el archivo APK para Forza Horizon 5?

    - -

    ¿Cómo puedo desinstalar el archivo APK para Forza Horizon 5?

    -

    Para desinstalar el archivo APK para Forza Horizon 5 desde su dispositivo, debe seguir estos pasos:

    -

    -
      -
    1. Ir a Configuración > Aplicaciones y encontrar Forza Horizon 5 en la lista de aplicaciones instaladas.
    2. -
    3. Toque en Forza Horizon 5 y seleccione Desinstalar.
    4. -
    5. Confirme su acción y espere a que se complete el proceso de desinstalación.
    6. -
    -

    ¿Dónde puedo encontrar más información sobre Forza Horizon 5?

    -

    Si quieres saber más sobre Forza Horizon 5, puedes visitar el sitio web oficial del juego, donde puedes encontrar noticias, trailers, capturas de pantalla, características y más. También puedes seguir las cuentas oficiales de redes sociales del juego, donde puedes obtener actualizaciones, consejos e interactuar con otros fans. También puedes ver vídeos de gameplay y reseñas en YouTube o Twitch, donde puedes ver cómo se ve el juego y cómo se juega en diferentes plataformas.

    64aa2da5cf
    -
    -
    \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Descargar Chicos Stumble 2023 Apk.md b/spaces/Benson/text-generation/Examples/Descargar Chicos Stumble 2023 Apk.md deleted file mode 100644 index fd79958eb5f31a83a09f23b5e128182bdca73378..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Descargar Chicos Stumble 2023 Apk.md +++ /dev/null @@ -1,75 +0,0 @@ - -

    Descargar Stumble Guys 2023 APK: Cómo unirse a la fiesta en su dispositivo Android

    -

    ¿Te encanta jugar juegos de fiesta con tus amigos en línea? ¿Te gusta tropezar con diferentes niveles de caos y diversión? ¿Quieres experimentar el último juego knockout en tu dispositivo Android? Si respondiste sí a cualquiera de estas preguntas, entonces usted debe descargar Stumble Guys 2023 APK ahora mismo!

    -

    descargar chicos stumble 2023 apk


    DOWNLOAD ->>->>->> https://bltlly.com/2v6MIK



    -

    ¿Qué es Stumble Guys?

    -

    Stumble Guys es un juego masivo de eliminación de fiesta multijugador con hasta 32 jugadores en línea. Puedes unirte ronda tras ronda de caos creciente para tropezar a través de diferentes niveles hasta que un vencedor sea coronado. También puedes invitar a tus amigos y competir contra millones de jugadores de todo el mundo.

    -

    Un partido multijugador masivo juego knockout

    -

    Stumble Guys no es tu típico juego de battle royale. Es más como un juego de fiesta donde tienes que correr, correr, deslizarte y esquivar a tus oponentes y obstáculos que se aproximan. Tienes que sobrevivir tanto como puedas y ser el último en pie. También puedes formar equipo con tus amigos y jugar en diferentes modos como 4v4, capturar la bandera o rey de la colina.

    -

    Un diseño colorido y loco

    -

    Stumble Guys tiene un diseño colorido y loco que te hará sonreír y reír. El juego tiene un estilo de dibujos animados que es brillante y alegre. Los niveles son variados y creativos, desde islas tropicales hasta montañas nevadas. Los personajes son lindos y divertidos, con diferentes trajes y accesorios. También puedes personalizar tu propio personaje con diferentes pieles, sombreros, gafas, zapatos y más.

    -

    -

    Un juego cómicamente físico

    - -

    Muchas opciones de personalización

    -

    Stumble Guys tiene muchas opciones de personalización que te harán destacar entre la multitud. Puede elegir entre cientos de pieles, sombreros, gafas, zapatos y otros artículos para crear su propio carácter único. También puede recoger tarjetas y pegatinas para desbloquear más artículos y recompensas. También puede consultar la tienda web para ofertas exclusivas y ofertas que solo están disponibles en el sitio web oficial.

    -

    ¿Por qué descargar Stumble Guys 2023 APK?

    -

    Si ya eres un fan de Stumble Guys, es posible que se pregunte por qué debe descargar Stumble Guys 2023 APK en lugar de solo actualizar el juego de la Google Play Store. Bueno, hay algunas buenas razones por las que deberías hacer eso.

    -

    La última versión del juego

    -

    Stumble Guys 2023 APK es la última versión del juego que ha sido lanzado en junio de 2023. Tiene todas las nuevas características y mejoras que se han añadido al juego desde su lanzamiento en agosto de 2020. También tiene todas las correcciones de errores y optimizaciones que se han hecho para garantizar un juego suave y estable.

    -

    Las nuevas características y mejoras

    -

    Stumble Guys 2023 APK tiene algunas nuevas características y mejoras que harán que su experiencia de juego aún mejor. Algunos de ellos son:

    -
  11. Un nuevo nivel llamado Stumble City que se inspira en el entorno urbano. Tienes que navegar por calles concurridas, rascacielos, subterráneos y parques mientras evitas autos, trenes, palomas y otros peligros.
  12. -
  13. Un nuevo modo llamado Stumble Royale que es un giro en el género clásico battle royale. Tienes que sobrevivir tanto como puedas en un mapa que se encoge mientras recoges armas, municiones y paquetes de salud. También puedes usar vehículos, trampas y explosivos para eliminar a tus enemigos.
  14. - -
  15. Un nuevo sistema llamado Stumble Rewards que te da más incentivos para jugar y ganar. Puedes ganar monedas, gemas, tarjetas, pegatinas y otros objetos completando misiones diarias, desafíos semanales y eventos de temporada. También puede obtener recompensas de bonificación al ver anuncios, invitar a amigos o unirse al club web.
  16. - -

    Ofertas y ofertas web exclusivas

    -

    Stumble Guys 2023 APK también tiene algunas ofertas web exclusivas y ofertas que solo se puede obtener mediante la descarga del juego desde el sitio web oficial. Algunos de ellos son:

    -
      -
    • Un Stumble Pass gratuito que te da acceso a contenido y funciones premium por un tiempo limitado. Puedes desbloquear más niveles, modos, pieles, sombreros, gafas, zapatos y otros artículos jugando el juego y ganando estrellas. También puede actualizar al Stumble Pass Plus para obtener más beneficios y recompensas.
    • -
    • Un descuento del 50% en el Stumble Bundle que le da una gran cantidad de monedas, gemas, tarjetas, pegatinas y otros artículos a un precio bajo. Puedes utilizarlos para comprar más pieles, sombreros, gafas, zapatos y otros artículos en la tienda web o en la tienda del juego. También puedes usarlos para desbloquear más niveles, modos, potenciadores y gadgets.
    • -
    • Una oferta especial en el Stumble Club que le da una membresía de por vida para un pago único. Puede disfrutar de acceso ilimitado a todo el contenido premium y características del juego sin ningún tipo de anuncios o interrupciones. También puede obtener actualizaciones exclusivas, noticias, consejos, trucos y secretos de los desarrolladores y la comunidad.
    • -
    -

    Cómo descargar Stumble Guys 2023 APK?

    -

    Si usted está convencido de que Stumble Guys 2023 APK es la mejor versión del juego para usted, es posible que se pregunte cómo descargarlo en su dispositivo Android. Bueno, no es muy difícil si sigues estos sencillos pasos:

    -

    Paso 1: Habilitar fuentes desconocidas en el dispositivo

    - -
      -
    1. Ir a la configuración de su dispositivo y toque en la seguridad o la privacidad.
    2. -
    3. Encontrar la opción que dice fuentes desconocidas o instalar aplicaciones desconocidas y alternar en.
    4. -
    5. Confirme su elección tocando OK o Permitir.
    6. -
    -

    Paso 2: Encontrar una fuente confiable para el archivo APK

    -

    El siguiente paso es encontrar una fuente confiable para el archivo APK de Stumble Guys 2023. Hay muchos sitios web que afirman ofrecer archivos APK para su descarga gratuita, pero no todos ellos son confiables o seguros. Algunos de ellos pueden contener malware o virus que pueden dañar tu dispositivo o robar tus datos. Para evitar esto:

    -
  17. Vaya al sitio web oficial de Stumble Guys en https://stumbleguys.com y busque el botón de descarga. Esta es la fuente más segura y confiable para el archivo APK de Stumble Guys 2023.
  18. -
  19. Alternativamente, puede utilizar un sitio web de terceros de confianza que ofrece archivos APK para descargar. Algunos de los populares son APKPure, APKMirror y APKMonk. Asegúrese de comprobar las calificaciones, reseñas y comentarios de los usuarios antes de descargar cualquier archivo APK de estos sitios web.
  20. -
  21. Evite cualquier sitio web que le pida que llene encuestas, ingrese su información personal o descargue aplicaciones o software adicionales antes de darle el archivo APK. Estos son generalmente estafas o intentos de phishing que pueden comprometer su seguridad y privacidad.
  22. -
-

Paso 3: Descargar e instalar el archivo APK

-

Una vez que haya encontrado una fuente confiable para el archivo APK de Stumble Guys 2023, puede proceder a descargarlo e instalarlo en su dispositivo. Para hacer esto:

-
    -
  1. Toque en el botón de descarga o enlace y esperar a que el archivo APK para ser descargado en su dispositivo. Puede comprobar el progreso de la descarga en la barra de notificaciones o en el navegador.
  2. -
  3. Una vez que la descarga se ha completado, toque en el archivo APK o abrirlo con su administrador de archivos. Puede ver un mensaje de advertencia que dice que este tipo de archivo puede dañar su dispositivo. Ignórelo y toque Instalar de todos modos o Confiar.
  4. - -
-

Paso 4: Iniciar el juego y disfrutar de

-

Felicidades! Usted ha descargado con éxito e instalado Stumble Guys 2023 APK en su dispositivo Android. Ahora puede lanzar el juego y disfrutar de todas las nuevas características y mejoras que tiene para ofrecer. También puede unirse al club web y obtener actualizaciones exclusivas, noticias, consejos, trucos y secretos de los desarrolladores y la comunidad.

-

Conclusión

-

Stumble Guys es uno de los juegos de fiesta más divertidos y adictivos que puedes jugar en tu dispositivo Android. Se trata de un partido masivo multijugador knockout juego con hasta 32 jugadores en línea. Puedes unirte ronda tras ronda de caos creciente para tropezar a través de diferentes niveles hasta que un vencedor sea coronado. También puedes invitar a tus amigos y competir contra millones de jugadores de todo el mundo.

-

Si desea experimentar el último juego knockout en su dispositivo Android, usted debe descargar Stumble Guys 2023 APK ahora mismo. Es la última versión del juego que tiene todas las nuevas características y mejoras que se han añadido al juego desde su lanzamiento en agosto de 2020. También tiene algunas ofertas web exclusivas y ofertas que solo se pueden obtener mediante la descarga del juego desde el sitio web oficial.

-

Para descargar Stumble Guys 2023 APK, solo tienes que seguir estos sencillos pasos: habilitar fuentes desconocidas en su dispositivo, encontrar una fuente confiable para el archivo APK, descargar e instalar el archivo APK, y lanzar el juego y disfrutar. No es muy difícil si sigues estos pasos cuidadosamente.

-

Entonces, ¿qué estás esperando? Descargar Stumble Guys 2023 APK hoy y unirse a la fiesta en su dispositivo Android!

-

Preguntas frecuentes

-

Aquí están algunas de las preguntas más frecuentes sobre Stumble Guys 2023 APK:

-

Q: ¿Es Stumble Guys 2023 APK seguro para descargar?

- -

Q: ¿Es Stumble Guys 2023 APK libre para jugar?

-

A: Sí, Stumble Guys 2023 APK es gratis para jugar. Puedes descargarlo e instalarlo en tu dispositivo sin pagar nada. Sin embargo, hay algunas compras en la aplicación y anuncios que pueden mejorar su experiencia de juego o apoyar a los desarrolladores.

-

Q: ¿Cuáles son los requisitos para Stumble Guys 2023 APK?

-

A: Para jugar Stumble Guys 2023 APK en su dispositivo, es necesario tener una versión de Android de 5.0 o superior y un mínimo de 100 MB de espacio de almacenamiento libre. También es necesario tener una conexión a Internet estable ya que el juego es solo en línea.

Q: ¿Cómo puedo actualizar Stumble Guys 2023 APK? -

A: Para actualizar Stumble Guys 2023 APK, puede consultar el sitio web oficial o el sitio web de terceros donde descargó el archivo APK para cualquier versión nueva o actualizaciones. También puede habilitar la función de actualización automática en su dispositivo para recibir notificaciones de las actualizaciones automáticamente. Sin embargo, es posible que tenga que desinstalar y reinstalar el archivo APK cada vez que haya una actualización importante.

-

Q: ¿Cómo puedo contactar a los desarrolladores de Stumble Guys 2023 APK?

-

A: Si usted tiene alguna pregunta, comentarios, sugerencias, o problemas con respecto Stumble Guys 2023 APK, puede ponerse en contacto con los desarrolladores del juego enviando un correo electrónico a support@stumbleguys.com o visitando sus páginas de redes sociales en Facebook, Twitter, Instagram o YouTube. También puedes unirte a su servidor de Discord o a la comunidad de Reddit para chatear con otros jugadores y obtener más información y consejos sobre el juego.

64aa2da5cf
-
-
\ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Descargar Colegial Pelea Sin Sensor Apk.md b/spaces/Benson/text-generation/Examples/Descargar Colegial Pelea Sin Sensor Apk.md deleted file mode 100644 index 5e9dae7273f25b820a0e6f6e664075b69553acef..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Descargar Colegial Pelea Sin Sensor Apk.md +++ /dev/null @@ -1,74 +0,0 @@ - -

Descargar Colegio pelea sin sensor Apk: Una guía para los usuarios de Android

-

Si estás buscando un juego divertido y realista que te permita experimentar la vida de un estudiante universitario, entonces deberías probar College Brawl. Este juego es una simulación de la vida en el campus, donde puedes hacer amigos, enemigos, romance y drama. También puedes personalizar tu personaje, elegir tu especialidad, unirte a clubes y participar en varias actividades. Sin embargo, hay un inconveniente: tienes que lidiar con los problemas y conflictos que surgen en tu universidad. Tienes que luchar, negociar o cooperar con otros estudiantes para sobrevivir y tener éxito en tu vida académica y social.

-

College Brawl es un juego que no es para los débiles de corazón. Contiene temas maduros, violencia, blasfemia y desnudez. Si no se siente cómodo con estos elementos, entonces es posible que desee saltarse este juego. Sin embargo, si usted está buscando una versión más realista y sin censura de la vida universitaria, entonces es posible que desee descargar College Brawl No Sensor Apk. Esta es una versión modificada del juego original que elimina la censura y añade más características y contenido. Puedes disfrutar del juego sin restricciones ni limitaciones.

-

descargar colegial pelea sin sensor apk


DOWNLOAD ✔ https://bltlly.com/2v6MG6



-

En este artículo, le mostraremos cómo descargar College Brawl No Sensor Apk de una fuente de confianza, cómo instalarlo en su dispositivo Android, y cómo jugar y disfrutar de sus características. Sigue estos pasos cuidadosamente y podrás experimentar el mejor juego de simulación de vida universitaria.

-

Cómo descargar College Brawl No Sensor Apk de una fuente de confianza

- -

Uno de los mejores sitios web que recomendamos para descargar College Brawl No Sensor Apk es [Bungdus.com]( 1 ). Este sitio web es conocido por proporcionar juegos y aplicaciones de alta calidad para usuarios de Android. Tiene una gran colección de juegos y aplicaciones que son probados y verificados por su equipo de expertos. Puede descargar College Brawl No Sensor Apk desde este sitio web sin ningún tipo de preocupaciones o molestias.

-

Para descargar College Brawl No Sensor Apk de [Bungdus.com]( 1 ), siga estos pasos:

-
    -
  1. Abra su navegador web y vaya a [Bungdus.com]( 1 ).
  2. -
  3. En la página de inicio, escriba "Pelea de la universidad" en el cuadro de búsqueda y pulse enter.
  4. -
  5. De los resultados de la búsqueda, haga clic en el enlace que dice "Descargar College Brawl Mod Apk Nosensor Terbaru 2023".
  6. -
  7. En la siguiente página, desplácese hacia abajo hasta que vea un botón verde que dice "Descargar ahora". Haga clic en él.
  8. -
  9. Se abrirá una nueva pestaña con un temporizador de cuenta atrás. Espere unos segundos hasta que el temporizador llegue a cero.
  10. -
  11. Haga clic en el botón que dice "Descargar archivo" para comenzar a descargar el archivo apk.
  12. -
  13. Guarde el archivo apk en su ubicación preferida en su dispositivo.
  14. -
-

Cómo instalar College Brawl No Sensor Apk en su dispositivo Android

-

Después de haber descargado College Brawl No Sensor Apk de [Bungdus.com], es necesario instalarlo en su dispositivo Android. Sin embargo, antes de poder hacer eso, debe habilitar la instalación de aplicaciones de fuentes desconocidas en su dispositivo. Esto se debe a College Brawl No Sensor Apk no está disponible en el Google Play Store y se considera una aplicación de terceros. Por lo tanto, debe dar permiso a su dispositivo para instalarlo.

-

Para habilitar la instalación de aplicaciones de fuentes desconocidas en tu dispositivo Android, sigue estos pasos:

-
    -
  1. Ir a la aplicación Configuración en su dispositivo y toque en Seguridad o Privacidad.
  2. -
  3. Encontrar la opción que dice "Fuentes desconocidas" o "Instalar aplicaciones desconocidas" y alternar en.
  4. - -
-

Ahora, usted está listo para instalar College Brawl No Sensor Apk en su dispositivo. Para hacer eso, siga estos pasos:

-
    -
  1. Busque el archivo apk que descargó de [Bungdus.com] y toque en él.
  2. -
  3. Aparecerá un mensaje de confirmación. Toque en Instalar para iniciar el proceso de instalación.
  4. -
  5. Espere unos minutos hasta que se complete la instalación.
  6. -
  7. Toque en Abrir para iniciar el juego o Listo para salir del instalador.
  8. -
-

Cómo jugar Colegio pelea sin sensor Apk y disfrutar de sus características

-

Felicidades! Usted ha instalado con éxito College Brawl No Sensor Apk en su dispositivo Android. Ahora, puedes jugar el juego y disfrutar de sus características. Aquí hay algunos consejos y trucos para ayudarte a empezar:

-

-
    -
  • Cuando inicies el juego por primera vez, se te pedirá que crees tu personaje. Puedes elegir tu género, nombre, apariencia y personalidad. También puedes personalizar tu ropa, accesorios y peinado.
  • -
  • Después de crear tu personaje, serás llevado al menú principal. Aquí, usted puede optar por iniciar un nuevo juego, cargar un juego guardado, o acceder a la configuración. También puedes ver tus estadísticas, inventario, logros y amigos.
  • -
  • Si empiezas un nuevo juego, se te pedirá que elijas tu especialidad. Puedes elegir entre diferentes campos de estudio, como artes, ciencias, negocios, ingeniería o derecho. Tu especialidad afectará tus clases, actividades y oportunidades profesionales.
  • -
  • También se le pedirá que elija su dormitorio. Puede elegir entre diferentes tipos de dormitorios, como mixto, de un solo sexo, de lujo o barato. Su dormitorio afectará su comodidad, privacidad y vida social.
  • -
  • Una vez que haya elegido su especialidad y dormitorio, comenzará su vida universitaria. Tendrá que equilibrar su vida académica, social y personal. Tendrás que asistir a clases, hacer tareas, tomar exámenes, unirte a clubes, hacer amigos, citas, fiestas, peleas y más.
  • - -
  • Puede explorar el campus e interactuar con varios personajes y objetos. También puede usar su teléfono para acceder a varias aplicaciones y características. Puede llamar o enviar mensajes de texto a otros personajes, revisar su correo electrónico o cuentas de redes sociales, jugar juegos o ver videos en línea.
  • -
-

Conclusión: Resumir los principales puntos y beneficios de la descarga de College Brawl No Sensor Apk

-

En conclusión, Colegio pelea sin sensor Apk es un juego que le permite experimentar la vida de un estudiante universitario de una manera realista y sin censura. Puedes crear tu propio personaje y personalizarlo según tus preferencias. Puede elegir su especialidad y dormitorio y dar forma a su vida académica y social. Puedes participar en varias actividades y eventos y tomar decisiones que afectarán tu futuro y tus relaciones. También puedes disfrutar del juego sin censura ni limitaciones.

-

Si desea descargar College Brawl No Sensor Apk gratis de una fuente de confianza, entonces usted debe visitar [Bungdus.com]. Este sitio web ofrece juegos y aplicaciones modded de alta calidad para usuarios de Android. Puede descargar College Brawl No Sensor Apk desde este sitio web sin ningún tipo de preocupaciones o molestias.

-

Esperamos que este artículo le ha ayudado a aprender cómo descargar College Brawl No Sensor Apk de [Bungdus.com], cómo instalarlo en su dispositivo Android, y cómo jugar y disfrutar de sus características. Si tiene alguna pregunta o comentario sobre este artículo o el juego o el sitio web, no dude en dejar un comentario a continuación. Nos encantaría saber de ti y ayudarte. ¡Gracias por leer y tener un gran día!

-

Preguntas frecuentes

-

Aquí están algunas de las preguntas más frecuentes sobre College Brawl No Sensor Apk:

-

¿Cuál es la diferencia entre Pelea de Colegio y Pelea de Colegio Sin Sensor?

- -

College Brawl No Sensor es una versión modificada del juego que elimina la censura y añade más características y contenido. Es una versión más realista y sin censura de la vida universitaria. Puedes disfrutar del juego sin restricciones ni limitaciones. Por ejemplo, tiene escenas claras y detalladas, contenido completo y sin cortar, y funciones desbloqueadas.

-

¿Es seguro y legal descargar College Brawl No Sensor Apk?

-

College Brawl No Sensor Apk es seguro y legal para descargar, siempre y cuando se descarga desde una fuente de confianza como [Bungdus.com]. Este sitio web ofrece juegos y aplicaciones modded de alta calidad para usuarios de Android. Cuenta con un equipo de expertos que prueban y verifican los archivos apk antes de subirlos al sitio web. Puede descargar College Brawl No Sensor Apk desde este sitio web sin ningún tipo de preocupaciones o molestias.

-

Sin embargo, usted debe tener en cuenta que la descarga de College Brawl No Sensor Apk podría violar los términos y condiciones del desarrollador de juegos original. Por lo tanto, debe descargarlo y usarlo bajo su propio riesgo y discreción. No nos hacemos responsables de las consecuencias o daños que puedan producirse al descargar o usar College Brawl No Sensor Apk.

-

¿Cuáles son los requisitos mínimos para ejecutar College Brawl No Sensor Apk en su dispositivo Android?

-

Para ejecutar College Brawl No Sensor Apk en su dispositivo Android, es necesario tener los siguientes requisitos mínimos:

-
    -
  • Un dispositivo Android con la versión 4.4 o superior.
  • -
  • Una conexión a Internet estable.
  • -
  • Al menos 1 GB de espacio de almacenamiento libre.
  • -
  • Al menos 2 GB de RAM.
  • -
-

Si su dispositivo cumple con estos requisitos, entonces usted debe ser capaz de ejecutar College Brawl No Sensor Apk sin problemas y sin ningún problema.

-

¿Cómo puedo actualizar College Brawl No Sensor Apk a la última versión?

- -

Alternativamente, también puede buscar actualizaciones dentro del juego. Puede ir al menú de configuración y tocar el botón de actualización. Si hay una nueva actualización disponible, puedes descargarla directamente del juego e instalarla en tu dispositivo.

-

¿Cómo puedo contactar con el desarrollador de College Brawl No Sensor Apk para obtener información o apoyo?

-

Si desea ponerse en contacto con el desarrollador de College Brawl No Sensor Apk para obtener información o apoyo, puede hacerlo enviando un correo electrónico a [collegebrawlnosensor@gmail.com]. También puede visitar su sitio web oficial en [collegebrawlnosensor.com] o sus cuentas de redes sociales en Facebook, Twitter, Instagram o YouTube. También puedes dejar un comentario en [Bungdus.com] o en este artículo y trataremos de reenviarlo a ellos.

64aa2da5cf
-
-
\ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Descargar Dungeon Quest Mod Apk.md b/spaces/Benson/text-generation/Examples/Descargar Dungeon Quest Mod Apk.md deleted file mode 100644 index 355c633be1c1c78a3a0778035303154f313cd9f1..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Descargar Dungeon Quest Mod Apk.md +++ /dev/null @@ -1,64 +0,0 @@ -
-

Descargar Dungeon Quest Mod Apk y disfrutar de una aventura de RPG lleno de botín

-

Si estás buscando un juego de rol divertido y adictivo que puedas jugar sin conexión, entonces deberías probar Dungeon Quest. Este juego te llevará en un viaje épico para encontrar el mejor botín y derrotar a todos los enemigos en su camino. Y si usted quiere hacer su aventura aún más emocionante, se puede descargar Dungeon Quest mod apk y disfrutar de recursos ilimitados, compras gratis, y más. En este artículo, te diremos qué es Dungeon Quest, por qué deberías descargar su mod apk, cómo instalarlo y algunos consejos y trucos para jugarlo.

-

¿Qué es Dungeon Quest?

-

Dungeon Quest es un juego de rol de acción sin conexión que fue desarrollado por Shiny Box Games. Está disponible para dispositivos Android, iOS y Apple TV. Estas son algunas de las características de este juego:

-

Descargar Dungeon Quest mod apk


DOWNLOAD ✪ https://bltlly.com/2v6JdX



-

Un juego de rol de acción sin conexión para todos

-

Puedes jugar a Dungeon Quest todo el tiempo que quieras sin contenido ni muros de pago. Usted no necesita una conexión a Internet para disfrutar de este juego, por lo que puede jugar en cualquier momento y en cualquier lugar. También puedes personalizar la apariencia, el equipo, las habilidades y los talentos de tu personaje para adaptarlos a tu estilo de juego.

-

Un juego con botín aleatorio, mazmorras generadas y jefes legendarios

-

En Dungeon Quest, nunca lucharás en la misma mazmorra dos veces. El juego tiene pisos generados aleatoriamente ilimitados que desafiarán tus habilidades y estrategia. También encontrarás increíbles botines aleatorios que puedes equipar y usar en combate. Y al final de cada acto, te enfrentarás a uno de los cuatro jefes legendarios que pondrán a prueba tu fuerza.

-

Un juego con tres clases, sistema de elaboración, sistema de habilidades y sistema de mascotas

- -

¿Por qué descargar Dungeon Quest mod apk?

-

Dungeon Quest ya es un juego divertido y divertido, pero si quieres hacerlo aún mejor, puedes descargar su apk mod. Con este apk mod, puede obtener acceso a algunas características increíbles que harán que su juego más fácil y más emocionante. Estos son algunos de los beneficios de descargar Dungeon Quest mod apk:

-

Consigue oro y cristales ilimitados para mejorar tu equipo y habilidades

-

El oro y los cristales son las principales monedas en Dungeon Quest. Los necesitas para comprar objetos, mejorar tu equipo, desbloquear habilidades y mucho más. Con Dungeon Quest mod apk, puede obtener oro ilimitado y cristales que se puede utilizar tanto como quieras. Usted no tiene que preocuparse por quedarse sin recursos o moler por ellos.

-

Obtén compras gratuitas y acceso a artículos y características premium

-

Dungeon Quest tiene algunos elementos y características que requieren dinero real o compras en el juego. Por ejemplo, puedes comprar mascotas premium, disfraces, espacios de inventario y más. Con Dungeon Quest mod apk, usted puede obtener compras gratuitas y el acceso a todos los artículos premium y características sin gastar dinero. Puedes disfrutar de la experiencia completa del juego sin limitaciones.

-

Obtén resistencia y salud ilimitadas para sobrevivir más tiempo en batallas

-

La resistencia y la salud son vitales para tu supervivencia en Dungeon Quest. Necesitas resistencia para usar tus habilidades y habilidades, y necesitas salud para soportar el daño de los enemigos. Con Dungeon Quest mod apk, puede obtener la resistencia y la salud ilimitada que nunca se agotará. Puedes usar tus habilidades tanto como quieras y recibir tanto daño como puedas sin morir.

-

Cómo descargar e instalar Dungeon Quest mod apk?

-

Descargar e instalar Dungeon Quest mod apk es muy fácil y simple. Solo tienes que seguir estos pasos:

-

Descargar el archivo apk mod de una fuente de confianza

- -

Descargar Dungeon Quest mod apk aquí

-

-

Habilitar fuentes desconocidas en la configuración del dispositivo

-

Lo siguiente que debe hacer es permitir que su dispositivo instale aplicaciones de fuentes desconocidas. Esto es porque Dungeon Quest mod apk no es de la tienda oficial de Google Play o App Store. Para hacer esto, vaya a la configuración del dispositivo, luego a la seguridad y habilite fuentes desconocidas. Esto le permitirá instalar aplicaciones desde fuentes externas.

-

Instalar el archivo apk mod y lanzar el juego

-

Lo último que tienes que hacer es instalar el archivo apk mod que has descargado. Busque el archivo en el almacenamiento del dispositivo y, a continuación, toque en él para iniciar el proceso de instalación. Siga las instrucciones de la pantalla y espere a que termine la instalación. Una vez que se hace, se puede iniciar el juego y disfrutar de Dungeon Quest mod apk.

-

Consejos y trucos para jugar Dungeon Quest

-

Dungeon Quest es un juego divertido y adictivo, pero también puede ser desafiante y complejo. Para ayudarte a empezar y mejorar tu juego, aquí hay algunos consejos y trucos que puedes usar:

-

Prioriza la misión principal y completa misiones diarias para recompensas

-

La misión principal es la mejor manera de progresar en Dungeon Quest. Te guiará a través de los diferentes actos, mazmorras y jefes del juego. También te recompensará con oro, cristales, equipo y más. Siempre debes seguir la misión principal y completarla lo antes posible.

-

Las misiones diarias son otra gran manera de ganar recompensas en Dungeon Quest. Son tareas sencillas que puedes hacer todos los días, como matar a un cierto número de enemigos, usar cierta habilidad o encontrar un determinado objeto. Te recompensarán con oro, cristales, piedras mitológicas y más. Siempre debes revisar tus misiones diarias y completarlas antes de que expiren.

-

Enfócate en una clase y estudia a cada héroe para la mejor formación

- -

También debes estudiar a cada héroe que pertenece a tu clase. Cada héroe tiene un rol y una habilidad diferentes que pueden afectar tu juego. Por ejemplo, algunos héroes son buenos para hacer daño, mientras que otros son buenos para curar o pulir. Deberías aprender cómo funciona cada héroe y cómo utilizarlo eficazmente en combate.

-

Únete a un gremio y usa mercenarios y mascotas para ayudarte en el combate

-

Dungeon Quest no es un juego en solitario. Puedes unirte a un gremio e interactuar con otros jugadores que comparten tu pasión por el juego. Puedes chatear con ellos, comerciar con ellos o ayudarlos en sus misiones. También puedes participar en eventos y competiciones de gremios para obtener más recompensas y diversión.

-

También puedes usar mercenarios y mascotas para ayudarte en el combate. Los mercenarios son otros héroes que puedes contratar por una tarifa para unirte a tu aventura. Lucharán junto a ti y utilizarán sus habilidades para ayudarte a derrotar a los enemigos. Las mascotas son criaturas lindas que puedes adoptar o comprar para seguirte. También lucharán contigo y te proporcionarán bonificaciones o efectos pasivos.

-

Progresa en la torre infinita y ponte a prueba con diferentes dificultades

-

Dungeon Quest tiene un modo de torre infinita que te permite subir a una torre sin fin de pisos generados aleatoriamente. Cada piso tiene diferentes enemigos, trampas, rompecabezas y recompensas. Cuanto más alto vayas, más difícil será, pero mejor será el botín. También puede elegir diferentes dificultades para desafiarse y ganar más recompensas. El modo torre infinita es una gran manera de poner a prueba tus habilidades y divertirse en Dungeon Quest.

-

Conclusión

- -

Preguntas frecuentes

-

Aquí están algunas de las preguntas más frecuentes sobre Dungeon Quest y su mod apk:

-

Es Dungeon Quest mod apk seguro de usar?

-

Sí, Dungeon Quest mod apk es seguro de usar siempre y cuando se descarga desde una fuente de confianza. Sin embargo, siempre debe tener cuidado al instalar aplicaciones de fuentes desconocidas, ya que pueden contener virus o malware que pueden dañar su dispositivo. También debe hacer una copia de seguridad de sus datos antes de instalar el apk mod, en caso de que algo salga mal.

-

¿Me prohibirán por usar Dungeon Quest mod apk?

-

No, usted no será prohibido para el uso de Dungeon Quest mod apk. Esto se debe a que Dungeon Quest es un juego fuera de línea que no requiere una conexión a Internet o una cuenta para jugar. Por lo tanto, no hay manera para los desarrolladores o los servidores de juegos para detectar o prohibir el uso de la apk mod. Puedes jugar sin preocupaciones.

-

¿Puedo jugar Dungeon Quest con mis amigos?

-

Sí, puedes jugar a Dungeon Quest con tus amigos. Aunque Dungeon Quest es un juego offline, tiene un modo multijugador que te permite jugar con otros jugadores online. Puedes unirte o crear una habitación e invitar a tus amigos a unirse a ti. También puedes chatear con ellos y cooperar con ellos en combate. Jugar a Dungeon Quest con tus amigos es muy divertido y gratificante.

-

¿Cómo puedo actualizar Dungeon Quest mod apk?

-

Para actualizar Dungeon Quest mod apk, es necesario descargar la última versión del archivo apk mod de la misma fuente que lo descargó de antes. A continuación, es necesario desinstalar la versión anterior de la apk mod e instalar el nuevo. También debe comprobar si la nueva versión del apk mod es compatible con su dispositivo y la versión del juego.

-

¿Cuáles son algunos otros juegos como Dungeon Quest?

-

Si te gusta Dungeon Quest, también te pueden gustar otros juegos similares. Algunos de estos juegos son:

-
    - -
  • Eternium: Un juego clásico de hack-and-slash RPG con gráficos impresionantes, misiones impulsadas por historias y sistema de elaboración.
  • -
  • Nonstop Knight: Un juego de rol casual con mazmorras sin fin, botín y mejoras.
  • -

64aa2da5cf
-
-
\ No newline at end of file diff --git a/spaces/BetterAPI/BetterChat_new/src/lib/server/abortedGenerations.ts b/spaces/BetterAPI/BetterChat_new/src/lib/server/abortedGenerations.ts deleted file mode 100644 index 575cf637bfef812c40905e35570ba3ca1a31b241..0000000000000000000000000000000000000000 --- a/spaces/BetterAPI/BetterChat_new/src/lib/server/abortedGenerations.ts +++ /dev/null @@ -1,29 +0,0 @@ -// Shouldn't be needed if we dove into sveltekit internals, see https://github.com/huggingface/chat-ui/pull/88#issuecomment-1523173850 - -import { setTimeout } from "node:timers/promises"; -import { collections } from "./database"; - -let closed = false; -process.on("SIGINT", () => { - closed = true; -}); - -export let abortedGenerations: Map = new Map(); - -async function maintainAbortedGenerations() { - while (!closed) { - await setTimeout(1000); - - try { - const aborts = await collections.abortedGenerations.find({}).sort({ createdAt: 1 }).toArray(); - - abortedGenerations = new Map( - aborts.map(({ conversationId, createdAt }) => [conversationId.toString(), createdAt]) - ); - } catch (err) { - console.error(err); - } - } -} - -maintainAbortedGenerations(); diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/distlib/locators.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/distlib/locators.py deleted file mode 100644 index 966ebc0e37d6104a8e0e1fefe9dc526f39409ce2..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/distlib/locators.py +++ /dev/null @@ -1,1300 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (C) 2012-2015 Vinay Sajip. -# Licensed to the Python Software Foundation under a contributor agreement. -# See LICENSE.txt and CONTRIBUTORS.txt. -# - -import gzip -from io import BytesIO -import json -import logging -import os -import posixpath -import re -try: - import threading -except ImportError: # pragma: no cover - import dummy_threading as threading -import zlib - -from . import DistlibException -from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url, - queue, quote, unescape, build_opener, - HTTPRedirectHandler as BaseRedirectHandler, text_type, - Request, HTTPError, URLError) -from .database import Distribution, DistributionPath, make_dist -from .metadata import Metadata, MetadataInvalidError -from .util import (cached_property, ensure_slash, split_filename, get_project_data, - parse_requirement, parse_name_and_version, ServerProxy, - normalize_name) -from .version import get_scheme, UnsupportedVersionError -from .wheel import Wheel, is_compatible - -logger = logging.getLogger(__name__) - -HASHER_HASH = re.compile(r'^(\w+)=([a-f0-9]+)') -CHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I) -HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml') -DEFAULT_INDEX = 'https://pypi.org/pypi' - -def get_all_distribution_names(url=None): - """ - Return all distribution names known by an index. - :param url: The URL of the index. - :return: A list of all known distribution names. - """ - if url is None: - url = DEFAULT_INDEX - client = ServerProxy(url, timeout=3.0) - try: - return client.list_packages() - finally: - client('close')() - -class RedirectHandler(BaseRedirectHandler): - """ - A class to work around a bug in some Python 3.2.x releases. - """ - # There's a bug in the base version for some 3.2.x - # (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header - # returns e.g. /abc, it bails because it says the scheme '' - # is bogus, when actually it should use the request's - # URL for the scheme. See Python issue #13696. - def http_error_302(self, req, fp, code, msg, headers): - # Some servers (incorrectly) return multiple Location headers - # (so probably same goes for URI). Use first header. - newurl = None - for key in ('location', 'uri'): - if key in headers: - newurl = headers[key] - break - if newurl is None: # pragma: no cover - return - urlparts = urlparse(newurl) - if urlparts.scheme == '': - newurl = urljoin(req.get_full_url(), newurl) - if hasattr(headers, 'replace_header'): - headers.replace_header(key, newurl) - else: - headers[key] = newurl - return BaseRedirectHandler.http_error_302(self, req, fp, code, msg, - headers) - - http_error_301 = http_error_303 = http_error_307 = http_error_302 - -class Locator(object): - """ - A base class for locators - things that locate distributions. - """ - source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz') - binary_extensions = ('.egg', '.exe', '.whl') - excluded_extensions = ('.pdf',) - - # A list of tags indicating which wheels you want to match. The default - # value of None matches against the tags compatible with the running - # Python. If you want to match other values, set wheel_tags on a locator - # instance to a list of tuples (pyver, abi, arch) which you want to match. - wheel_tags = None - - downloadable_extensions = source_extensions + ('.whl',) - - def __init__(self, scheme='default'): - """ - Initialise an instance. - :param scheme: Because locators look for most recent versions, they - need to know the version scheme to use. This specifies - the current PEP-recommended scheme - use ``'legacy'`` - if you need to support existing distributions on PyPI. - """ - self._cache = {} - self.scheme = scheme - # Because of bugs in some of the handlers on some of the platforms, - # we use our own opener rather than just using urlopen. - self.opener = build_opener(RedirectHandler()) - # If get_project() is called from locate(), the matcher instance - # is set from the requirement passed to locate(). See issue #18 for - # why this can be useful to know. - self.matcher = None - self.errors = queue.Queue() - - def get_errors(self): - """ - Return any errors which have occurred. - """ - result = [] - while not self.errors.empty(): # pragma: no cover - try: - e = self.errors.get(False) - result.append(e) - except self.errors.Empty: - continue - self.errors.task_done() - return result - - def clear_errors(self): - """ - Clear any errors which may have been logged. - """ - # Just get the errors and throw them away - self.get_errors() - - def clear_cache(self): - self._cache.clear() - - def _get_scheme(self): - return self._scheme - - def _set_scheme(self, value): - self._scheme = value - - scheme = property(_get_scheme, _set_scheme) - - def _get_project(self, name): - """ - For a given project, get a dictionary mapping available versions to Distribution - instances. - - This should be implemented in subclasses. - - If called from a locate() request, self.matcher will be set to a - matcher for the requirement to satisfy, otherwise it will be None. - """ - raise NotImplementedError('Please implement in the subclass') - - def get_distribution_names(self): - """ - Return all the distribution names known to this locator. - """ - raise NotImplementedError('Please implement in the subclass') - - def get_project(self, name): - """ - For a given project, get a dictionary mapping available versions to Distribution - instances. - - This calls _get_project to do all the work, and just implements a caching layer on top. - """ - if self._cache is None: # pragma: no cover - result = self._get_project(name) - elif name in self._cache: - result = self._cache[name] - else: - self.clear_errors() - result = self._get_project(name) - self._cache[name] = result - return result - - def score_url(self, url): - """ - Give an url a score which can be used to choose preferred URLs - for a given project release. - """ - t = urlparse(url) - basename = posixpath.basename(t.path) - compatible = True - is_wheel = basename.endswith('.whl') - is_downloadable = basename.endswith(self.downloadable_extensions) - if is_wheel: - compatible = is_compatible(Wheel(basename), self.wheel_tags) - return (t.scheme == 'https', 'pypi.org' in t.netloc, - is_downloadable, is_wheel, compatible, basename) - - def prefer_url(self, url1, url2): - """ - Choose one of two URLs where both are candidates for distribution - archives for the same version of a distribution (for example, - .tar.gz vs. zip). - - The current implementation favours https:// URLs over http://, archives - from PyPI over those from other locations, wheel compatibility (if a - wheel) and then the archive name. - """ - result = url2 - if url1: - s1 = self.score_url(url1) - s2 = self.score_url(url2) - if s1 > s2: - result = url1 - if result != url2: - logger.debug('Not replacing %r with %r', url1, url2) - else: - logger.debug('Replacing %r with %r', url1, url2) - return result - - def split_filename(self, filename, project_name): - """ - Attempt to split a filename in project name, version and Python version. - """ - return split_filename(filename, project_name) - - def convert_url_to_download_info(self, url, project_name): - """ - See if a URL is a candidate for a download URL for a project (the URL - has typically been scraped from an HTML page). - - If it is, a dictionary is returned with keys "name", "version", - "filename" and "url"; otherwise, None is returned. - """ - def same_project(name1, name2): - return normalize_name(name1) == normalize_name(name2) - - result = None - scheme, netloc, path, params, query, frag = urlparse(url) - if frag.lower().startswith('egg='): # pragma: no cover - logger.debug('%s: version hint in fragment: %r', - project_name, frag) - m = HASHER_HASH.match(frag) - if m: - algo, digest = m.groups() - else: - algo, digest = None, None - origpath = path - if path and path[-1] == '/': # pragma: no cover - path = path[:-1] - if path.endswith('.whl'): - try: - wheel = Wheel(path) - if not is_compatible(wheel, self.wheel_tags): - logger.debug('Wheel not compatible: %s', path) - else: - if project_name is None: - include = True - else: - include = same_project(wheel.name, project_name) - if include: - result = { - 'name': wheel.name, - 'version': wheel.version, - 'filename': wheel.filename, - 'url': urlunparse((scheme, netloc, origpath, - params, query, '')), - 'python-version': ', '.join( - ['.'.join(list(v[2:])) for v in wheel.pyver]), - } - except Exception as e: # pragma: no cover - logger.warning('invalid path for wheel: %s', path) - elif not path.endswith(self.downloadable_extensions): # pragma: no cover - logger.debug('Not downloadable: %s', path) - else: # downloadable extension - path = filename = posixpath.basename(path) - for ext in self.downloadable_extensions: - if path.endswith(ext): - path = path[:-len(ext)] - t = self.split_filename(path, project_name) - if not t: # pragma: no cover - logger.debug('No match for project/version: %s', path) - else: - name, version, pyver = t - if not project_name or same_project(project_name, name): - result = { - 'name': name, - 'version': version, - 'filename': filename, - 'url': urlunparse((scheme, netloc, origpath, - params, query, '')), - #'packagetype': 'sdist', - } - if pyver: # pragma: no cover - result['python-version'] = pyver - break - if result and algo: - result['%s_digest' % algo] = digest - return result - - def _get_digest(self, info): - """ - Get a digest from a dictionary by looking at a "digests" dictionary - or keys of the form 'algo_digest'. - - Returns a 2-tuple (algo, digest) if found, else None. Currently - looks only for SHA256, then MD5. - """ - result = None - if 'digests' in info: - digests = info['digests'] - for algo in ('sha256', 'md5'): - if algo in digests: - result = (algo, digests[algo]) - break - if not result: - for algo in ('sha256', 'md5'): - key = '%s_digest' % algo - if key in info: - result = (algo, info[key]) - break - return result - - def _update_version_data(self, result, info): - """ - Update a result dictionary (the final result from _get_project) with a - dictionary for a specific version, which typically holds information - gleaned from a filename or URL for an archive for the distribution. - """ - name = info.pop('name') - version = info.pop('version') - if version in result: - dist = result[version] - md = dist.metadata - else: - dist = make_dist(name, version, scheme=self.scheme) - md = dist.metadata - dist.digest = digest = self._get_digest(info) - url = info['url'] - result['digests'][url] = digest - if md.source_url != info['url']: - md.source_url = self.prefer_url(md.source_url, url) - result['urls'].setdefault(version, set()).add(url) - dist.locator = self - result[version] = dist - - def locate(self, requirement, prereleases=False): - """ - Find the most recent distribution which matches the given - requirement. - - :param requirement: A requirement of the form 'foo (1.0)' or perhaps - 'foo (>= 1.0, < 2.0, != 1.3)' - :param prereleases: If ``True``, allow pre-release versions - to be located. Otherwise, pre-release versions - are not returned. - :return: A :class:`Distribution` instance, or ``None`` if no such - distribution could be located. - """ - result = None - r = parse_requirement(requirement) - if r is None: # pragma: no cover - raise DistlibException('Not a valid requirement: %r' % requirement) - scheme = get_scheme(self.scheme) - self.matcher = matcher = scheme.matcher(r.requirement) - logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__) - versions = self.get_project(r.name) - if len(versions) > 2: # urls and digests keys are present - # sometimes, versions are invalid - slist = [] - vcls = matcher.version_class - for k in versions: - if k in ('urls', 'digests'): - continue - try: - if not matcher.match(k): - pass # logger.debug('%s did not match %r', matcher, k) - else: - if prereleases or not vcls(k).is_prerelease: - slist.append(k) - # else: - # logger.debug('skipping pre-release ' - # 'version %s of %s', k, matcher.name) - except Exception: # pragma: no cover - logger.warning('error matching %s with %r', matcher, k) - pass # slist.append(k) - if len(slist) > 1: - slist = sorted(slist, key=scheme.key) - if slist: - logger.debug('sorted list: %s', slist) - version = slist[-1] - result = versions[version] - if result: - if r.extras: - result.extras = r.extras - result.download_urls = versions.get('urls', {}).get(version, set()) - d = {} - sd = versions.get('digests', {}) - for url in result.download_urls: - if url in sd: # pragma: no cover - d[url] = sd[url] - result.digests = d - self.matcher = None - return result - - -class PyPIRPCLocator(Locator): - """ - This locator uses XML-RPC to locate distributions. It therefore - cannot be used with simple mirrors (that only mirror file content). - """ - def __init__(self, url, **kwargs): - """ - Initialise an instance. - - :param url: The URL to use for XML-RPC. - :param kwargs: Passed to the superclass constructor. - """ - super(PyPIRPCLocator, self).__init__(**kwargs) - self.base_url = url - self.client = ServerProxy(url, timeout=3.0) - - def get_distribution_names(self): - """ - Return all the distribution names known to this locator. - """ - return set(self.client.list_packages()) - - def _get_project(self, name): - result = {'urls': {}, 'digests': {}} - versions = self.client.package_releases(name, True) - for v in versions: - urls = self.client.release_urls(name, v) - data = self.client.release_data(name, v) - metadata = Metadata(scheme=self.scheme) - metadata.name = data['name'] - metadata.version = data['version'] - metadata.license = data.get('license') - metadata.keywords = data.get('keywords', []) - metadata.summary = data.get('summary') - dist = Distribution(metadata) - if urls: - info = urls[0] - metadata.source_url = info['url'] - dist.digest = self._get_digest(info) - dist.locator = self - result[v] = dist - for info in urls: - url = info['url'] - digest = self._get_digest(info) - result['urls'].setdefault(v, set()).add(url) - result['digests'][url] = digest - return result - -class PyPIJSONLocator(Locator): - """ - This locator uses PyPI's JSON interface. It's very limited in functionality - and probably not worth using. - """ - def __init__(self, url, **kwargs): - super(PyPIJSONLocator, self).__init__(**kwargs) - self.base_url = ensure_slash(url) - - def get_distribution_names(self): - """ - Return all the distribution names known to this locator. - """ - raise NotImplementedError('Not available from this locator') - - def _get_project(self, name): - result = {'urls': {}, 'digests': {}} - url = urljoin(self.base_url, '%s/json' % quote(name)) - try: - resp = self.opener.open(url) - data = resp.read().decode() # for now - d = json.loads(data) - md = Metadata(scheme=self.scheme) - data = d['info'] - md.name = data['name'] - md.version = data['version'] - md.license = data.get('license') - md.keywords = data.get('keywords', []) - md.summary = data.get('summary') - dist = Distribution(md) - dist.locator = self - urls = d['urls'] - result[md.version] = dist - for info in d['urls']: - url = info['url'] - dist.download_urls.add(url) - dist.digests[url] = self._get_digest(info) - result['urls'].setdefault(md.version, set()).add(url) - result['digests'][url] = self._get_digest(info) - # Now get other releases - for version, infos in d['releases'].items(): - if version == md.version: - continue # already done - omd = Metadata(scheme=self.scheme) - omd.name = md.name - omd.version = version - odist = Distribution(omd) - odist.locator = self - result[version] = odist - for info in infos: - url = info['url'] - odist.download_urls.add(url) - odist.digests[url] = self._get_digest(info) - result['urls'].setdefault(version, set()).add(url) - result['digests'][url] = self._get_digest(info) -# for info in urls: -# md.source_url = info['url'] -# dist.digest = self._get_digest(info) -# dist.locator = self -# for info in urls: -# url = info['url'] -# result['urls'].setdefault(md.version, set()).add(url) -# result['digests'][url] = self._get_digest(info) - except Exception as e: - self.errors.put(text_type(e)) - logger.exception('JSON fetch failed: %s', e) - return result - - -class Page(object): - """ - This class represents a scraped HTML page. - """ - # The following slightly hairy-looking regex just looks for the contents of - # an anchor link, which has an attribute "href" either immediately preceded - # or immediately followed by a "rel" attribute. The attribute values can be - # declared with double quotes, single quotes or no quotes - which leads to - # the length of the expression. - _href = re.compile(""" -(rel\\s*=\\s*(?:"(?P[^"]*)"|'(?P[^']*)'|(?P[^>\\s\n]*))\\s+)? -href\\s*=\\s*(?:"(?P[^"]*)"|'(?P[^']*)'|(?P[^>\\s\n]*)) -(\\s+rel\\s*=\\s*(?:"(?P[^"]*)"|'(?P[^']*)'|(?P[^>\\s\n]*)))? -""", re.I | re.S | re.X) - _base = re.compile(r"""]+)""", re.I | re.S) - - def __init__(self, data, url): - """ - Initialise an instance with the Unicode page contents and the URL they - came from. - """ - self.data = data - self.base_url = self.url = url - m = self._base.search(self.data) - if m: - self.base_url = m.group(1) - - _clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I) - - @cached_property - def links(self): - """ - Return the URLs of all the links on a page together with information - about their "rel" attribute, for determining which ones to treat as - downloads and which ones to queue for further scraping. - """ - def clean(url): - "Tidy up an URL." - scheme, netloc, path, params, query, frag = urlparse(url) - return urlunparse((scheme, netloc, quote(path), - params, query, frag)) - - result = set() - for match in self._href.finditer(self.data): - d = match.groupdict('') - rel = (d['rel1'] or d['rel2'] or d['rel3'] or - d['rel4'] or d['rel5'] or d['rel6']) - url = d['url1'] or d['url2'] or d['url3'] - url = urljoin(self.base_url, url) - url = unescape(url) - url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url) - result.add((url, rel)) - # We sort the result, hoping to bring the most recent versions - # to the front - result = sorted(result, key=lambda t: t[0], reverse=True) - return result - - -class SimpleScrapingLocator(Locator): - """ - A locator which scrapes HTML pages to locate downloads for a distribution. - This runs multiple threads to do the I/O; performance is at least as good - as pip's PackageFinder, which works in an analogous fashion. - """ - - # These are used to deal with various Content-Encoding schemes. - decoders = { - 'deflate': zlib.decompress, - 'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(b)).read(), - 'none': lambda b: b, - } - - def __init__(self, url, timeout=None, num_workers=10, **kwargs): - """ - Initialise an instance. - :param url: The root URL to use for scraping. - :param timeout: The timeout, in seconds, to be applied to requests. - This defaults to ``None`` (no timeout specified). - :param num_workers: The number of worker threads you want to do I/O, - This defaults to 10. - :param kwargs: Passed to the superclass. - """ - super(SimpleScrapingLocator, self).__init__(**kwargs) - self.base_url = ensure_slash(url) - self.timeout = timeout - self._page_cache = {} - self._seen = set() - self._to_fetch = queue.Queue() - self._bad_hosts = set() - self.skip_externals = False - self.num_workers = num_workers - self._lock = threading.RLock() - # See issue #45: we need to be resilient when the locator is used - # in a thread, e.g. with concurrent.futures. We can't use self._lock - # as it is for coordinating our internal threads - the ones created - # in _prepare_threads. - self._gplock = threading.RLock() - self.platform_check = False # See issue #112 - - def _prepare_threads(self): - """ - Threads are created only when get_project is called, and terminate - before it returns. They are there primarily to parallelise I/O (i.e. - fetching web pages). - """ - self._threads = [] - for i in range(self.num_workers): - t = threading.Thread(target=self._fetch) - t.daemon = True - t.start() - self._threads.append(t) - - def _wait_threads(self): - """ - Tell all the threads to terminate (by sending a sentinel value) and - wait for them to do so. - """ - # Note that you need two loops, since you can't say which - # thread will get each sentinel - for t in self._threads: - self._to_fetch.put(None) # sentinel - for t in self._threads: - t.join() - self._threads = [] - - def _get_project(self, name): - result = {'urls': {}, 'digests': {}} - with self._gplock: - self.result = result - self.project_name = name - url = urljoin(self.base_url, '%s/' % quote(name)) - self._seen.clear() - self._page_cache.clear() - self._prepare_threads() - try: - logger.debug('Queueing %s', url) - self._to_fetch.put(url) - self._to_fetch.join() - finally: - self._wait_threads() - del self.result - return result - - platform_dependent = re.compile(r'\b(linux_(i\d86|x86_64|arm\w+)|' - r'win(32|_amd64)|macosx_?\d+)\b', re.I) - - def _is_platform_dependent(self, url): - """ - Does an URL refer to a platform-specific download? - """ - return self.platform_dependent.search(url) - - def _process_download(self, url): - """ - See if an URL is a suitable download for a project. - - If it is, register information in the result dictionary (for - _get_project) about the specific version it's for. - - Note that the return value isn't actually used other than as a boolean - value. - """ - if self.platform_check and self._is_platform_dependent(url): - info = None - else: - info = self.convert_url_to_download_info(url, self.project_name) - logger.debug('process_download: %s -> %s', url, info) - if info: - with self._lock: # needed because self.result is shared - self._update_version_data(self.result, info) - return info - - def _should_queue(self, link, referrer, rel): - """ - Determine whether a link URL from a referring page and with a - particular "rel" attribute should be queued for scraping. - """ - scheme, netloc, path, _, _, _ = urlparse(link) - if path.endswith(self.source_extensions + self.binary_extensions + - self.excluded_extensions): - result = False - elif self.skip_externals and not link.startswith(self.base_url): - result = False - elif not referrer.startswith(self.base_url): - result = False - elif rel not in ('homepage', 'download'): - result = False - elif scheme not in ('http', 'https', 'ftp'): - result = False - elif self._is_platform_dependent(link): - result = False - else: - host = netloc.split(':', 1)[0] - if host.lower() == 'localhost': - result = False - else: - result = True - logger.debug('should_queue: %s (%s) from %s -> %s', link, rel, - referrer, result) - return result - - def _fetch(self): - """ - Get a URL to fetch from the work queue, get the HTML page, examine its - links for download candidates and candidates for further scraping. - - This is a handy method to run in a thread. - """ - while True: - url = self._to_fetch.get() - try: - if url: - page = self.get_page(url) - if page is None: # e.g. after an error - continue - for link, rel in page.links: - if link not in self._seen: - try: - self._seen.add(link) - if (not self._process_download(link) and - self._should_queue(link, url, rel)): - logger.debug('Queueing %s from %s', link, url) - self._to_fetch.put(link) - except MetadataInvalidError: # e.g. invalid versions - pass - except Exception as e: # pragma: no cover - self.errors.put(text_type(e)) - finally: - # always do this, to avoid hangs :-) - self._to_fetch.task_done() - if not url: - #logger.debug('Sentinel seen, quitting.') - break - - def get_page(self, url): - """ - Get the HTML for an URL, possibly from an in-memory cache. - - XXX TODO Note: this cache is never actually cleared. It's assumed that - the data won't get stale over the lifetime of a locator instance (not - necessarily true for the default_locator). - """ - # http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api - scheme, netloc, path, _, _, _ = urlparse(url) - if scheme == 'file' and os.path.isdir(url2pathname(path)): - url = urljoin(ensure_slash(url), 'index.html') - - if url in self._page_cache: - result = self._page_cache[url] - logger.debug('Returning %s from cache: %s', url, result) - else: - host = netloc.split(':', 1)[0] - result = None - if host in self._bad_hosts: - logger.debug('Skipping %s due to bad host %s', url, host) - else: - req = Request(url, headers={'Accept-encoding': 'identity'}) - try: - logger.debug('Fetching %s', url) - resp = self.opener.open(req, timeout=self.timeout) - logger.debug('Fetched %s', url) - headers = resp.info() - content_type = headers.get('Content-Type', '') - if HTML_CONTENT_TYPE.match(content_type): - final_url = resp.geturl() - data = resp.read() - encoding = headers.get('Content-Encoding') - if encoding: - decoder = self.decoders[encoding] # fail if not found - data = decoder(data) - encoding = 'utf-8' - m = CHARSET.search(content_type) - if m: - encoding = m.group(1) - try: - data = data.decode(encoding) - except UnicodeError: # pragma: no cover - data = data.decode('latin-1') # fallback - result = Page(data, final_url) - self._page_cache[final_url] = result - except HTTPError as e: - if e.code != 404: - logger.exception('Fetch failed: %s: %s', url, e) - except URLError as e: # pragma: no cover - logger.exception('Fetch failed: %s: %s', url, e) - with self._lock: - self._bad_hosts.add(host) - except Exception as e: # pragma: no cover - logger.exception('Fetch failed: %s: %s', url, e) - finally: - self._page_cache[url] = result # even if None (failure) - return result - - _distname_re = re.compile(']*>([^<]+)<') - - def get_distribution_names(self): - """ - Return all the distribution names known to this locator. - """ - result = set() - page = self.get_page(self.base_url) - if not page: - raise DistlibException('Unable to get %s' % self.base_url) - for match in self._distname_re.finditer(page.data): - result.add(match.group(1)) - return result - -class DirectoryLocator(Locator): - """ - This class locates distributions in a directory tree. - """ - - def __init__(self, path, **kwargs): - """ - Initialise an instance. - :param path: The root of the directory tree to search. - :param kwargs: Passed to the superclass constructor, - except for: - * recursive - if True (the default), subdirectories are - recursed into. If False, only the top-level directory - is searched, - """ - self.recursive = kwargs.pop('recursive', True) - super(DirectoryLocator, self).__init__(**kwargs) - path = os.path.abspath(path) - if not os.path.isdir(path): # pragma: no cover - raise DistlibException('Not a directory: %r' % path) - self.base_dir = path - - def should_include(self, filename, parent): - """ - Should a filename be considered as a candidate for a distribution - archive? As well as the filename, the directory which contains it - is provided, though not used by the current implementation. - """ - return filename.endswith(self.downloadable_extensions) - - def _get_project(self, name): - result = {'urls': {}, 'digests': {}} - for root, dirs, files in os.walk(self.base_dir): - for fn in files: - if self.should_include(fn, root): - fn = os.path.join(root, fn) - url = urlunparse(('file', '', - pathname2url(os.path.abspath(fn)), - '', '', '')) - info = self.convert_url_to_download_info(url, name) - if info: - self._update_version_data(result, info) - if not self.recursive: - break - return result - - def get_distribution_names(self): - """ - Return all the distribution names known to this locator. - """ - result = set() - for root, dirs, files in os.walk(self.base_dir): - for fn in files: - if self.should_include(fn, root): - fn = os.path.join(root, fn) - url = urlunparse(('file', '', - pathname2url(os.path.abspath(fn)), - '', '', '')) - info = self.convert_url_to_download_info(url, None) - if info: - result.add(info['name']) - if not self.recursive: - break - return result - -class JSONLocator(Locator): - """ - This locator uses special extended metadata (not available on PyPI) and is - the basis of performant dependency resolution in distlib. Other locators - require archive downloads before dependencies can be determined! As you - might imagine, that can be slow. - """ - def get_distribution_names(self): - """ - Return all the distribution names known to this locator. - """ - raise NotImplementedError('Not available from this locator') - - def _get_project(self, name): - result = {'urls': {}, 'digests': {}} - data = get_project_data(name) - if data: - for info in data.get('files', []): - if info['ptype'] != 'sdist' or info['pyversion'] != 'source': - continue - # We don't store summary in project metadata as it makes - # the data bigger for no benefit during dependency - # resolution - dist = make_dist(data['name'], info['version'], - summary=data.get('summary', - 'Placeholder for summary'), - scheme=self.scheme) - md = dist.metadata - md.source_url = info['url'] - # TODO SHA256 digest - if 'digest' in info and info['digest']: - dist.digest = ('md5', info['digest']) - md.dependencies = info.get('requirements', {}) - dist.exports = info.get('exports', {}) - result[dist.version] = dist - result['urls'].setdefault(dist.version, set()).add(info['url']) - return result - -class DistPathLocator(Locator): - """ - This locator finds installed distributions in a path. It can be useful for - adding to an :class:`AggregatingLocator`. - """ - def __init__(self, distpath, **kwargs): - """ - Initialise an instance. - - :param distpath: A :class:`DistributionPath` instance to search. - """ - super(DistPathLocator, self).__init__(**kwargs) - assert isinstance(distpath, DistributionPath) - self.distpath = distpath - - def _get_project(self, name): - dist = self.distpath.get_distribution(name) - if dist is None: - result = {'urls': {}, 'digests': {}} - else: - result = { - dist.version: dist, - 'urls': {dist.version: set([dist.source_url])}, - 'digests': {dist.version: set([None])} - } - return result - - -class AggregatingLocator(Locator): - """ - This class allows you to chain and/or merge a list of locators. - """ - def __init__(self, *locators, **kwargs): - """ - Initialise an instance. - - :param locators: The list of locators to search. - :param kwargs: Passed to the superclass constructor, - except for: - * merge - if False (the default), the first successful - search from any of the locators is returned. If True, - the results from all locators are merged (this can be - slow). - """ - self.merge = kwargs.pop('merge', False) - self.locators = locators - super(AggregatingLocator, self).__init__(**kwargs) - - def clear_cache(self): - super(AggregatingLocator, self).clear_cache() - for locator in self.locators: - locator.clear_cache() - - def _set_scheme(self, value): - self._scheme = value - for locator in self.locators: - locator.scheme = value - - scheme = property(Locator.scheme.fget, _set_scheme) - - def _get_project(self, name): - result = {} - for locator in self.locators: - d = locator.get_project(name) - if d: - if self.merge: - files = result.get('urls', {}) - digests = result.get('digests', {}) - # next line could overwrite result['urls'], result['digests'] - result.update(d) - df = result.get('urls') - if files and df: - for k, v in files.items(): - if k in df: - df[k] |= v - else: - df[k] = v - dd = result.get('digests') - if digests and dd: - dd.update(digests) - else: - # See issue #18. If any dists are found and we're looking - # for specific constraints, we only return something if - # a match is found. For example, if a DirectoryLocator - # returns just foo (1.0) while we're looking for - # foo (>= 2.0), we'll pretend there was nothing there so - # that subsequent locators can be queried. Otherwise we - # would just return foo (1.0) which would then lead to a - # failure to find foo (>= 2.0), because other locators - # weren't searched. Note that this only matters when - # merge=False. - if self.matcher is None: - found = True - else: - found = False - for k in d: - if self.matcher.match(k): - found = True - break - if found: - result = d - break - return result - - def get_distribution_names(self): - """ - Return all the distribution names known to this locator. - """ - result = set() - for locator in self.locators: - try: - result |= locator.get_distribution_names() - except NotImplementedError: - pass - return result - - -# We use a legacy scheme simply because most of the dists on PyPI use legacy -# versions which don't conform to PEP 440. -default_locator = AggregatingLocator( - # JSONLocator(), # don't use as PEP 426 is withdrawn - SimpleScrapingLocator('https://pypi.org/simple/', - timeout=3.0), - scheme='legacy') - -locate = default_locator.locate - - -class DependencyFinder(object): - """ - Locate dependencies for distributions. - """ - - def __init__(self, locator=None): - """ - Initialise an instance, using the specified locator - to locate distributions. - """ - self.locator = locator or default_locator - self.scheme = get_scheme(self.locator.scheme) - - def add_distribution(self, dist): - """ - Add a distribution to the finder. This will update internal information - about who provides what. - :param dist: The distribution to add. - """ - logger.debug('adding distribution %s', dist) - name = dist.key - self.dists_by_name[name] = dist - self.dists[(name, dist.version)] = dist - for p in dist.provides: - name, version = parse_name_and_version(p) - logger.debug('Add to provided: %s, %s, %s', name, version, dist) - self.provided.setdefault(name, set()).add((version, dist)) - - def remove_distribution(self, dist): - """ - Remove a distribution from the finder. This will update internal - information about who provides what. - :param dist: The distribution to remove. - """ - logger.debug('removing distribution %s', dist) - name = dist.key - del self.dists_by_name[name] - del self.dists[(name, dist.version)] - for p in dist.provides: - name, version = parse_name_and_version(p) - logger.debug('Remove from provided: %s, %s, %s', name, version, dist) - s = self.provided[name] - s.remove((version, dist)) - if not s: - del self.provided[name] - - def get_matcher(self, reqt): - """ - Get a version matcher for a requirement. - :param reqt: The requirement - :type reqt: str - :return: A version matcher (an instance of - :class:`distlib.version.Matcher`). - """ - try: - matcher = self.scheme.matcher(reqt) - except UnsupportedVersionError: # pragma: no cover - # XXX compat-mode if cannot read the version - name = reqt.split()[0] - matcher = self.scheme.matcher(name) - return matcher - - def find_providers(self, reqt): - """ - Find the distributions which can fulfill a requirement. - - :param reqt: The requirement. - :type reqt: str - :return: A set of distribution which can fulfill the requirement. - """ - matcher = self.get_matcher(reqt) - name = matcher.key # case-insensitive - result = set() - provided = self.provided - if name in provided: - for version, provider in provided[name]: - try: - match = matcher.match(version) - except UnsupportedVersionError: - match = False - - if match: - result.add(provider) - break - return result - - def try_to_replace(self, provider, other, problems): - """ - Attempt to replace one provider with another. This is typically used - when resolving dependencies from multiple sources, e.g. A requires - (B >= 1.0) while C requires (B >= 1.1). - - For successful replacement, ``provider`` must meet all the requirements - which ``other`` fulfills. - - :param provider: The provider we are trying to replace with. - :param other: The provider we're trying to replace. - :param problems: If False is returned, this will contain what - problems prevented replacement. This is currently - a tuple of the literal string 'cantreplace', - ``provider``, ``other`` and the set of requirements - that ``provider`` couldn't fulfill. - :return: True if we can replace ``other`` with ``provider``, else - False. - """ - rlist = self.reqts[other] - unmatched = set() - for s in rlist: - matcher = self.get_matcher(s) - if not matcher.match(provider.version): - unmatched.add(s) - if unmatched: - # can't replace other with provider - problems.add(('cantreplace', provider, other, - frozenset(unmatched))) - result = False - else: - # can replace other with provider - self.remove_distribution(other) - del self.reqts[other] - for s in rlist: - self.reqts.setdefault(provider, set()).add(s) - self.add_distribution(provider) - result = True - return result - - def find(self, requirement, meta_extras=None, prereleases=False): - """ - Find a distribution and all distributions it depends on. - - :param requirement: The requirement specifying the distribution to - find, or a Distribution instance. - :param meta_extras: A list of meta extras such as :test:, :build: and - so on. - :param prereleases: If ``True``, allow pre-release versions to be - returned - otherwise, don't return prereleases - unless they're all that's available. - - Return a set of :class:`Distribution` instances and a set of - problems. - - The distributions returned should be such that they have the - :attr:`required` attribute set to ``True`` if they were - from the ``requirement`` passed to ``find()``, and they have the - :attr:`build_time_dependency` attribute set to ``True`` unless they - are post-installation dependencies of the ``requirement``. - - The problems should be a tuple consisting of the string - ``'unsatisfied'`` and the requirement which couldn't be satisfied - by any distribution known to the locator. - """ - - self.provided = {} - self.dists = {} - self.dists_by_name = {} - self.reqts = {} - - meta_extras = set(meta_extras or []) - if ':*:' in meta_extras: - meta_extras.remove(':*:') - # :meta: and :run: are implicitly included - meta_extras |= set([':test:', ':build:', ':dev:']) - - if isinstance(requirement, Distribution): - dist = odist = requirement - logger.debug('passed %s as requirement', odist) - else: - dist = odist = self.locator.locate(requirement, - prereleases=prereleases) - if dist is None: - raise DistlibException('Unable to locate %r' % requirement) - logger.debug('located %s', odist) - dist.requested = True - problems = set() - todo = set([dist]) - install_dists = set([odist]) - while todo: - dist = todo.pop() - name = dist.key # case-insensitive - if name not in self.dists_by_name: - self.add_distribution(dist) - else: - #import pdb; pdb.set_trace() - other = self.dists_by_name[name] - if other != dist: - self.try_to_replace(dist, other, problems) - - ireqts = dist.run_requires | dist.meta_requires - sreqts = dist.build_requires - ereqts = set() - if meta_extras and dist in install_dists: - for key in ('test', 'build', 'dev'): - e = ':%s:' % key - if e in meta_extras: - ereqts |= getattr(dist, '%s_requires' % key) - all_reqts = ireqts | sreqts | ereqts - for r in all_reqts: - providers = self.find_providers(r) - if not providers: - logger.debug('No providers found for %r', r) - provider = self.locator.locate(r, prereleases=prereleases) - # If no provider is found and we didn't consider - # prereleases, consider them now. - if provider is None and not prereleases: - provider = self.locator.locate(r, prereleases=True) - if provider is None: - logger.debug('Cannot satisfy %r', r) - problems.add(('unsatisfied', r)) - else: - n, v = provider.key, provider.version - if (n, v) not in self.dists: - todo.add(provider) - providers.add(provider) - if r in ireqts and dist in install_dists: - install_dists.add(provider) - logger.debug('Adding %s to install_dists', - provider.name_and_version) - for p in providers: - name = p.key - if name not in self.dists_by_name: - self.reqts.setdefault(p, set()).add(r) - else: - other = self.dists_by_name[name] - if other != p: - # see if other can be replaced by p - self.try_to_replace(p, other, problems) - - dists = set(self.dists.values()) - for dist in dists: - dist.build_time_dependency = dist not in install_dists - if dist.build_time_dependency: - logger.debug('%s is a build-time dependency only.', - dist.name_and_version) - logger.debug('find done for %s', odist) - return dists, problems diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/util/proxy.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/util/proxy.py deleted file mode 100644 index 2199cc7b7f004009493d032720c36d6568f9d89e..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/util/proxy.py +++ /dev/null @@ -1,57 +0,0 @@ -from .ssl_ import create_urllib3_context, resolve_cert_reqs, resolve_ssl_version - - -def connection_requires_http_tunnel( - proxy_url=None, proxy_config=None, destination_scheme=None -): - """ - Returns True if the connection requires an HTTP CONNECT through the proxy. - - :param URL proxy_url: - URL of the proxy. - :param ProxyConfig proxy_config: - Proxy configuration from poolmanager.py - :param str destination_scheme: - The scheme of the destination. (i.e https, http, etc) - """ - # If we're not using a proxy, no way to use a tunnel. - if proxy_url is None: - return False - - # HTTP destinations never require tunneling, we always forward. - if destination_scheme == "http": - return False - - # Support for forwarding with HTTPS proxies and HTTPS destinations. - if ( - proxy_url.scheme == "https" - and proxy_config - and proxy_config.use_forwarding_for_https - ): - return False - - # Otherwise always use a tunnel. - return True - - -def create_proxy_ssl_context( - ssl_version, cert_reqs, ca_certs=None, ca_cert_dir=None, ca_cert_data=None -): - """ - Generates a default proxy ssl context if one hasn't been provided by the - user. - """ - ssl_context = create_urllib3_context( - ssl_version=resolve_ssl_version(ssl_version), - cert_reqs=resolve_cert_reqs(cert_reqs), - ) - - if ( - not ca_certs - and not ca_cert_dir - and not ca_cert_data - and hasattr(ssl_context, "load_default_certs") - ): - ssl_context.load_default_certs() - - return ssl_context diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/connectionpool.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/connectionpool.py deleted file mode 100644 index c23d736b186f50eb723eebbd6dfce281d91c2353..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/connectionpool.py +++ /dev/null @@ -1,1110 +0,0 @@ -from __future__ import absolute_import - -import errno -import logging -import re -import socket -import sys -import warnings -from socket import error as SocketError -from socket import timeout as SocketTimeout - -from .connection import ( - BaseSSLError, - BrokenPipeError, - DummyConnection, - HTTPConnection, - HTTPException, - HTTPSConnection, - VerifiedHTTPSConnection, - port_by_scheme, -) -from .exceptions import ( - ClosedPoolError, - EmptyPoolError, - HeaderParsingError, - HostChangedError, - InsecureRequestWarning, - LocationValueError, - MaxRetryError, - NewConnectionError, - ProtocolError, - ProxyError, - ReadTimeoutError, - SSLError, - TimeoutError, -) -from .packages import six -from .packages.six.moves import queue -from .request import RequestMethods -from .response import HTTPResponse -from .util.connection import is_connection_dropped -from .util.proxy import connection_requires_http_tunnel -from .util.queue import LifoQueue -from .util.request import set_file_position -from .util.response import assert_header_parsing -from .util.retry import Retry -from .util.ssl_match_hostname import CertificateError -from .util.timeout import Timeout -from .util.url import Url, _encode_target -from .util.url import _normalize_host as normalize_host -from .util.url import get_host, parse_url - -xrange = six.moves.xrange - -log = logging.getLogger(__name__) - -_Default = object() - - -# Pool objects -class ConnectionPool(object): - """ - Base class for all connection pools, such as - :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`. - - .. note:: - ConnectionPool.urlopen() does not normalize or percent-encode target URIs - which is useful if your target server doesn't support percent-encoded - target URIs. - """ - - scheme = None - QueueCls = LifoQueue - - def __init__(self, host, port=None): - if not host: - raise LocationValueError("No host specified.") - - self.host = _normalize_host(host, scheme=self.scheme) - self._proxy_host = host.lower() - self.port = port - - def __str__(self): - return "%s(host=%r, port=%r)" % (type(self).__name__, self.host, self.port) - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.close() - # Return False to re-raise any potential exceptions - return False - - def close(self): - """ - Close all pooled connections and disable the pool. - """ - pass - - -# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252 -_blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK} - - -class HTTPConnectionPool(ConnectionPool, RequestMethods): - """ - Thread-safe connection pool for one host. - - :param host: - Host used for this HTTP Connection (e.g. "localhost"), passed into - :class:`http.client.HTTPConnection`. - - :param port: - Port used for this HTTP Connection (None is equivalent to 80), passed - into :class:`http.client.HTTPConnection`. - - :param strict: - Causes BadStatusLine to be raised if the status line can't be parsed - as a valid HTTP/1.0 or 1.1 status line, passed into - :class:`http.client.HTTPConnection`. - - .. note:: - Only works in Python 2. This parameter is ignored in Python 3. - - :param timeout: - Socket timeout in seconds for each individual connection. This can - be a float or integer, which sets the timeout for the HTTP request, - or an instance of :class:`urllib3.util.Timeout` which gives you more - fine-grained control over request timeouts. After the constructor has - been parsed, this is always a `urllib3.util.Timeout` object. - - :param maxsize: - Number of connections to save that can be reused. More than 1 is useful - in multithreaded situations. If ``block`` is set to False, more - connections will be created but they will not be saved once they've - been used. - - :param block: - If set to True, no more than ``maxsize`` connections will be used at - a time. When no free connections are available, the call will block - until a connection has been released. This is a useful side effect for - particular multithreaded situations where one does not want to use more - than maxsize connections per host to prevent flooding. - - :param headers: - Headers to include with all requests, unless other headers are given - explicitly. - - :param retries: - Retry configuration to use by default with requests in this pool. - - :param _proxy: - Parsed proxy URL, should not be used directly, instead, see - :class:`urllib3.ProxyManager` - - :param _proxy_headers: - A dictionary with proxy headers, should not be used directly, - instead, see :class:`urllib3.ProxyManager` - - :param \\**conn_kw: - Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`, - :class:`urllib3.connection.HTTPSConnection` instances. - """ - - scheme = "http" - ConnectionCls = HTTPConnection - ResponseCls = HTTPResponse - - def __init__( - self, - host, - port=None, - strict=False, - timeout=Timeout.DEFAULT_TIMEOUT, - maxsize=1, - block=False, - headers=None, - retries=None, - _proxy=None, - _proxy_headers=None, - _proxy_config=None, - **conn_kw - ): - ConnectionPool.__init__(self, host, port) - RequestMethods.__init__(self, headers) - - self.strict = strict - - if not isinstance(timeout, Timeout): - timeout = Timeout.from_float(timeout) - - if retries is None: - retries = Retry.DEFAULT - - self.timeout = timeout - self.retries = retries - - self.pool = self.QueueCls(maxsize) - self.block = block - - self.proxy = _proxy - self.proxy_headers = _proxy_headers or {} - self.proxy_config = _proxy_config - - # Fill the queue up so that doing get() on it will block properly - for _ in xrange(maxsize): - self.pool.put(None) - - # These are mostly for testing and debugging purposes. - self.num_connections = 0 - self.num_requests = 0 - self.conn_kw = conn_kw - - if self.proxy: - # Enable Nagle's algorithm for proxies, to avoid packet fragmentation. - # We cannot know if the user has added default socket options, so we cannot replace the - # list. - self.conn_kw.setdefault("socket_options", []) - - self.conn_kw["proxy"] = self.proxy - self.conn_kw["proxy_config"] = self.proxy_config - - def _new_conn(self): - """ - Return a fresh :class:`HTTPConnection`. - """ - self.num_connections += 1 - log.debug( - "Starting new HTTP connection (%d): %s:%s", - self.num_connections, - self.host, - self.port or "80", - ) - - conn = self.ConnectionCls( - host=self.host, - port=self.port, - timeout=self.timeout.connect_timeout, - strict=self.strict, - **self.conn_kw - ) - return conn - - def _get_conn(self, timeout=None): - """ - Get a connection. Will return a pooled connection if one is available. - - If no connections are available and :prop:`.block` is ``False``, then a - fresh connection is returned. - - :param timeout: - Seconds to wait before giving up and raising - :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and - :prop:`.block` is ``True``. - """ - conn = None - try: - conn = self.pool.get(block=self.block, timeout=timeout) - - except AttributeError: # self.pool is None - raise ClosedPoolError(self, "Pool is closed.") - - except queue.Empty: - if self.block: - raise EmptyPoolError( - self, - "Pool reached maximum size and no more connections are allowed.", - ) - pass # Oh well, we'll create a new connection then - - # If this is a persistent connection, check if it got disconnected - if conn and is_connection_dropped(conn): - log.debug("Resetting dropped connection: %s", self.host) - conn.close() - if getattr(conn, "auto_open", 1) == 0: - # This is a proxied connection that has been mutated by - # http.client._tunnel() and cannot be reused (since it would - # attempt to bypass the proxy) - conn = None - - return conn or self._new_conn() - - def _put_conn(self, conn): - """ - Put a connection back into the pool. - - :param conn: - Connection object for the current host and port as returned by - :meth:`._new_conn` or :meth:`._get_conn`. - - If the pool is already full, the connection is closed and discarded - because we exceeded maxsize. If connections are discarded frequently, - then maxsize should be increased. - - If the pool is closed, then the connection will be closed and discarded. - """ - try: - self.pool.put(conn, block=False) - return # Everything is dandy, done. - except AttributeError: - # self.pool is None. - pass - except queue.Full: - # This should never happen if self.block == True - log.warning( - "Connection pool is full, discarding connection: %s. Connection pool size: %s", - self.host, - self.pool.qsize(), - ) - # Connection never got put back into the pool, close it. - if conn: - conn.close() - - def _validate_conn(self, conn): - """ - Called right before a request is made, after the socket is created. - """ - pass - - def _prepare_proxy(self, conn): - # Nothing to do for HTTP connections. - pass - - def _get_timeout(self, timeout): - """Helper that always returns a :class:`urllib3.util.Timeout`""" - if timeout is _Default: - return self.timeout.clone() - - if isinstance(timeout, Timeout): - return timeout.clone() - else: - # User passed us an int/float. This is for backwards compatibility, - # can be removed later - return Timeout.from_float(timeout) - - def _raise_timeout(self, err, url, timeout_value): - """Is the error actually a timeout? Will raise a ReadTimeout or pass""" - - if isinstance(err, SocketTimeout): - raise ReadTimeoutError( - self, url, "Read timed out. (read timeout=%s)" % timeout_value - ) - - # See the above comment about EAGAIN in Python 3. In Python 2 we have - # to specifically catch it and throw the timeout error - if hasattr(err, "errno") and err.errno in _blocking_errnos: - raise ReadTimeoutError( - self, url, "Read timed out. (read timeout=%s)" % timeout_value - ) - - # Catch possible read timeouts thrown as SSL errors. If not the - # case, rethrow the original. We need to do this because of: - # http://bugs.python.org/issue10272 - if "timed out" in str(err) or "did not complete (read)" in str( - err - ): # Python < 2.7.4 - raise ReadTimeoutError( - self, url, "Read timed out. (read timeout=%s)" % timeout_value - ) - - def _make_request( - self, conn, method, url, timeout=_Default, chunked=False, **httplib_request_kw - ): - """ - Perform a request on a given urllib connection object taken from our - pool. - - :param conn: - a connection from one of our connection pools - - :param timeout: - Socket timeout in seconds for the request. This can be a - float or integer, which will set the same timeout value for - the socket connect and the socket read, or an instance of - :class:`urllib3.util.Timeout`, which gives you more fine-grained - control over your timeouts. - """ - self.num_requests += 1 - - timeout_obj = self._get_timeout(timeout) - timeout_obj.start_connect() - conn.timeout = Timeout.resolve_default_timeout(timeout_obj.connect_timeout) - - # Trigger any extra validation we need to do. - try: - self._validate_conn(conn) - except (SocketTimeout, BaseSSLError) as e: - # Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout. - self._raise_timeout(err=e, url=url, timeout_value=conn.timeout) - raise - - # conn.request() calls http.client.*.request, not the method in - # urllib3.request. It also calls makefile (recv) on the socket. - try: - if chunked: - conn.request_chunked(method, url, **httplib_request_kw) - else: - conn.request(method, url, **httplib_request_kw) - - # We are swallowing BrokenPipeError (errno.EPIPE) since the server is - # legitimately able to close the connection after sending a valid response. - # With this behaviour, the received response is still readable. - except BrokenPipeError: - # Python 3 - pass - except IOError as e: - # Python 2 and macOS/Linux - # EPIPE and ESHUTDOWN are BrokenPipeError on Python 2, and EPROTOTYPE is needed on macOS - # https://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/ - if e.errno not in { - errno.EPIPE, - errno.ESHUTDOWN, - errno.EPROTOTYPE, - }: - raise - - # Reset the timeout for the recv() on the socket - read_timeout = timeout_obj.read_timeout - - # App Engine doesn't have a sock attr - if getattr(conn, "sock", None): - # In Python 3 socket.py will catch EAGAIN and return None when you - # try and read into the file pointer created by http.client, which - # instead raises a BadStatusLine exception. Instead of catching - # the exception and assuming all BadStatusLine exceptions are read - # timeouts, check for a zero timeout before making the request. - if read_timeout == 0: - raise ReadTimeoutError( - self, url, "Read timed out. (read timeout=%s)" % read_timeout - ) - if read_timeout is Timeout.DEFAULT_TIMEOUT: - conn.sock.settimeout(socket.getdefaulttimeout()) - else: # None or a value - conn.sock.settimeout(read_timeout) - - # Receive the response from the server - try: - try: - # Python 2.7, use buffering of HTTP responses - httplib_response = conn.getresponse(buffering=True) - except TypeError: - # Python 3 - try: - httplib_response = conn.getresponse() - except BaseException as e: - # Remove the TypeError from the exception chain in - # Python 3 (including for exceptions like SystemExit). - # Otherwise it looks like a bug in the code. - six.raise_from(e, None) - except (SocketTimeout, BaseSSLError, SocketError) as e: - self._raise_timeout(err=e, url=url, timeout_value=read_timeout) - raise - - # AppEngine doesn't have a version attr. - http_version = getattr(conn, "_http_vsn_str", "HTTP/?") - log.debug( - '%s://%s:%s "%s %s %s" %s %s', - self.scheme, - self.host, - self.port, - method, - url, - http_version, - httplib_response.status, - httplib_response.length, - ) - - try: - assert_header_parsing(httplib_response.msg) - except (HeaderParsingError, TypeError) as hpe: # Platform-specific: Python 3 - log.warning( - "Failed to parse headers (url=%s): %s", - self._absolute_url(url), - hpe, - exc_info=True, - ) - - return httplib_response - - def _absolute_url(self, path): - return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url - - def close(self): - """ - Close all pooled connections and disable the pool. - """ - if self.pool is None: - return - # Disable access to the pool - old_pool, self.pool = self.pool, None - - try: - while True: - conn = old_pool.get(block=False) - if conn: - conn.close() - - except queue.Empty: - pass # Done. - - def is_same_host(self, url): - """ - Check if the given ``url`` is a member of the same host as this - connection pool. - """ - if url.startswith("/"): - return True - - # TODO: Add optional support for socket.gethostbyname checking. - scheme, host, port = get_host(url) - if host is not None: - host = _normalize_host(host, scheme=scheme) - - # Use explicit default port for comparison when none is given - if self.port and not port: - port = port_by_scheme.get(scheme) - elif not self.port and port == port_by_scheme.get(scheme): - port = None - - return (scheme, host, port) == (self.scheme, self.host, self.port) - - def urlopen( - self, - method, - url, - body=None, - headers=None, - retries=None, - redirect=True, - assert_same_host=True, - timeout=_Default, - pool_timeout=None, - release_conn=None, - chunked=False, - body_pos=None, - **response_kw - ): - """ - Get a connection from the pool and perform an HTTP request. This is the - lowest level call for making a request, so you'll need to specify all - the raw details. - - .. note:: - - More commonly, it's appropriate to use a convenience method provided - by :class:`.RequestMethods`, such as :meth:`request`. - - .. note:: - - `release_conn` will only behave as expected if - `preload_content=False` because we want to make - `preload_content=False` the default behaviour someday soon without - breaking backwards compatibility. - - :param method: - HTTP request method (such as GET, POST, PUT, etc.) - - :param url: - The URL to perform the request on. - - :param body: - Data to send in the request body, either :class:`str`, :class:`bytes`, - an iterable of :class:`str`/:class:`bytes`, or a file-like object. - - :param headers: - Dictionary of custom headers to send, such as User-Agent, - If-None-Match, etc. If None, pool headers are used. If provided, - these headers completely replace any pool-specific headers. - - :param retries: - Configure the number of retries to allow before raising a - :class:`~urllib3.exceptions.MaxRetryError` exception. - - Pass ``None`` to retry until you receive a response. Pass a - :class:`~urllib3.util.retry.Retry` object for fine-grained control - over different types of retries. - Pass an integer number to retry connection errors that many times, - but no other types of errors. Pass zero to never retry. - - If ``False``, then retries are disabled and any exception is raised - immediately. Also, instead of raising a MaxRetryError on redirects, - the redirect response will be returned. - - :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int. - - :param redirect: - If True, automatically handle redirects (status codes 301, 302, - 303, 307, 308). Each redirect counts as a retry. Disabling retries - will disable redirect, too. - - :param assert_same_host: - If ``True``, will make sure that the host of the pool requests is - consistent else will raise HostChangedError. When ``False``, you can - use the pool on an HTTP proxy and request foreign hosts. - - :param timeout: - If specified, overrides the default timeout for this one - request. It may be a float (in seconds) or an instance of - :class:`urllib3.util.Timeout`. - - :param pool_timeout: - If set and the pool is set to block=True, then this method will - block for ``pool_timeout`` seconds and raise EmptyPoolError if no - connection is available within the time period. - - :param release_conn: - If False, then the urlopen call will not release the connection - back into the pool once a response is received (but will release if - you read the entire contents of the response such as when - `preload_content=True`). This is useful if you're not preloading - the response's content immediately. You will need to call - ``r.release_conn()`` on the response ``r`` to return the connection - back into the pool. If None, it takes the value of - ``response_kw.get('preload_content', True)``. - - :param chunked: - If True, urllib3 will send the body using chunked transfer - encoding. Otherwise, urllib3 will send the body using the standard - content-length form. Defaults to False. - - :param int body_pos: - Position to seek to in file-like body in the event of a retry or - redirect. Typically this won't need to be set because urllib3 will - auto-populate the value when needed. - - :param \\**response_kw: - Additional parameters are passed to - :meth:`urllib3.response.HTTPResponse.from_httplib` - """ - - parsed_url = parse_url(url) - destination_scheme = parsed_url.scheme - - if headers is None: - headers = self.headers - - if not isinstance(retries, Retry): - retries = Retry.from_int(retries, redirect=redirect, default=self.retries) - - if release_conn is None: - release_conn = response_kw.get("preload_content", True) - - # Check host - if assert_same_host and not self.is_same_host(url): - raise HostChangedError(self, url, retries) - - # Ensure that the URL we're connecting to is properly encoded - if url.startswith("/"): - url = six.ensure_str(_encode_target(url)) - else: - url = six.ensure_str(parsed_url.url) - - conn = None - - # Track whether `conn` needs to be released before - # returning/raising/recursing. Update this variable if necessary, and - # leave `release_conn` constant throughout the function. That way, if - # the function recurses, the original value of `release_conn` will be - # passed down into the recursive call, and its value will be respected. - # - # See issue #651 [1] for details. - # - # [1] - release_this_conn = release_conn - - http_tunnel_required = connection_requires_http_tunnel( - self.proxy, self.proxy_config, destination_scheme - ) - - # Merge the proxy headers. Only done when not using HTTP CONNECT. We - # have to copy the headers dict so we can safely change it without those - # changes being reflected in anyone else's copy. - if not http_tunnel_required: - headers = headers.copy() - headers.update(self.proxy_headers) - - # Must keep the exception bound to a separate variable or else Python 3 - # complains about UnboundLocalError. - err = None - - # Keep track of whether we cleanly exited the except block. This - # ensures we do proper cleanup in finally. - clean_exit = False - - # Rewind body position, if needed. Record current position - # for future rewinds in the event of a redirect/retry. - body_pos = set_file_position(body, body_pos) - - try: - # Request a connection from the queue. - timeout_obj = self._get_timeout(timeout) - conn = self._get_conn(timeout=pool_timeout) - - conn.timeout = timeout_obj.connect_timeout - - is_new_proxy_conn = self.proxy is not None and not getattr( - conn, "sock", None - ) - if is_new_proxy_conn and http_tunnel_required: - self._prepare_proxy(conn) - - # Make the request on the httplib connection object. - httplib_response = self._make_request( - conn, - method, - url, - timeout=timeout_obj, - body=body, - headers=headers, - chunked=chunked, - ) - - # If we're going to release the connection in ``finally:``, then - # the response doesn't need to know about the connection. Otherwise - # it will also try to release it and we'll have a double-release - # mess. - response_conn = conn if not release_conn else None - - # Pass method to Response for length checking - response_kw["request_method"] = method - - # Import httplib's response into our own wrapper object - response = self.ResponseCls.from_httplib( - httplib_response, - pool=self, - connection=response_conn, - retries=retries, - **response_kw - ) - - # Everything went great! - clean_exit = True - - except EmptyPoolError: - # Didn't get a connection from the pool, no need to clean up - clean_exit = True - release_this_conn = False - raise - - except ( - TimeoutError, - HTTPException, - SocketError, - ProtocolError, - BaseSSLError, - SSLError, - CertificateError, - ) as e: - # Discard the connection for these exceptions. It will be - # replaced during the next _get_conn() call. - clean_exit = False - - def _is_ssl_error_message_from_http_proxy(ssl_error): - # We're trying to detect the message 'WRONG_VERSION_NUMBER' but - # SSLErrors are kinda all over the place when it comes to the message, - # so we try to cover our bases here! - message = " ".join(re.split("[^a-z]", str(ssl_error).lower())) - return ( - "wrong version number" in message or "unknown protocol" in message - ) - - # Try to detect a common user error with proxies which is to - # set an HTTP proxy to be HTTPS when it should be 'http://' - # (ie {'http': 'http://proxy', 'https': 'https://proxy'}) - # Instead we add a nice error message and point to a URL. - if ( - isinstance(e, BaseSSLError) - and self.proxy - and _is_ssl_error_message_from_http_proxy(e) - and conn.proxy - and conn.proxy.scheme == "https" - ): - e = ProxyError( - "Your proxy appears to only use HTTP and not HTTPS, " - "try changing your proxy URL to be HTTP. See: " - "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html" - "#https-proxy-error-http-proxy", - SSLError(e), - ) - elif isinstance(e, (BaseSSLError, CertificateError)): - e = SSLError(e) - elif isinstance(e, (SocketError, NewConnectionError)) and self.proxy: - e = ProxyError("Cannot connect to proxy.", e) - elif isinstance(e, (SocketError, HTTPException)): - e = ProtocolError("Connection aborted.", e) - - retries = retries.increment( - method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2] - ) - retries.sleep() - - # Keep track of the error for the retry warning. - err = e - - finally: - if not clean_exit: - # We hit some kind of exception, handled or otherwise. We need - # to throw the connection away unless explicitly told not to. - # Close the connection, set the variable to None, and make sure - # we put the None back in the pool to avoid leaking it. - conn = conn and conn.close() - release_this_conn = True - - if release_this_conn: - # Put the connection back to be reused. If the connection is - # expired then it will be None, which will get replaced with a - # fresh connection during _get_conn. - self._put_conn(conn) - - if not conn: - # Try again - log.warning( - "Retrying (%r) after connection broken by '%r': %s", retries, err, url - ) - return self.urlopen( - method, - url, - body, - headers, - retries, - redirect, - assert_same_host, - timeout=timeout, - pool_timeout=pool_timeout, - release_conn=release_conn, - chunked=chunked, - body_pos=body_pos, - **response_kw - ) - - # Handle redirect? - redirect_location = redirect and response.get_redirect_location() - if redirect_location: - if response.status == 303: - method = "GET" - - try: - retries = retries.increment(method, url, response=response, _pool=self) - except MaxRetryError: - if retries.raise_on_redirect: - response.drain_conn() - raise - return response - - response.drain_conn() - retries.sleep_for_retry(response) - log.debug("Redirecting %s -> %s", url, redirect_location) - return self.urlopen( - method, - redirect_location, - body, - headers, - retries=retries, - redirect=redirect, - assert_same_host=assert_same_host, - timeout=timeout, - pool_timeout=pool_timeout, - release_conn=release_conn, - chunked=chunked, - body_pos=body_pos, - **response_kw - ) - - # Check if we should retry the HTTP response. - has_retry_after = bool(response.headers.get("Retry-After")) - if retries.is_retry(method, response.status, has_retry_after): - try: - retries = retries.increment(method, url, response=response, _pool=self) - except MaxRetryError: - if retries.raise_on_status: - response.drain_conn() - raise - return response - - response.drain_conn() - retries.sleep(response) - log.debug("Retry: %s", url) - return self.urlopen( - method, - url, - body, - headers, - retries=retries, - redirect=redirect, - assert_same_host=assert_same_host, - timeout=timeout, - pool_timeout=pool_timeout, - release_conn=release_conn, - chunked=chunked, - body_pos=body_pos, - **response_kw - ) - - return response - - -class HTTPSConnectionPool(HTTPConnectionPool): - """ - Same as :class:`.HTTPConnectionPool`, but HTTPS. - - :class:`.HTTPSConnection` uses one of ``assert_fingerprint``, - ``assert_hostname`` and ``host`` in this order to verify connections. - If ``assert_hostname`` is False, no verification is done. - - The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``, - ``ca_cert_dir``, ``ssl_version``, ``key_password`` are only used if :mod:`ssl` - is available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade - the connection socket into an SSL socket. - """ - - scheme = "https" - ConnectionCls = HTTPSConnection - - def __init__( - self, - host, - port=None, - strict=False, - timeout=Timeout.DEFAULT_TIMEOUT, - maxsize=1, - block=False, - headers=None, - retries=None, - _proxy=None, - _proxy_headers=None, - key_file=None, - cert_file=None, - cert_reqs=None, - key_password=None, - ca_certs=None, - ssl_version=None, - assert_hostname=None, - assert_fingerprint=None, - ca_cert_dir=None, - **conn_kw - ): - - HTTPConnectionPool.__init__( - self, - host, - port, - strict, - timeout, - maxsize, - block, - headers, - retries, - _proxy, - _proxy_headers, - **conn_kw - ) - - self.key_file = key_file - self.cert_file = cert_file - self.cert_reqs = cert_reqs - self.key_password = key_password - self.ca_certs = ca_certs - self.ca_cert_dir = ca_cert_dir - self.ssl_version = ssl_version - self.assert_hostname = assert_hostname - self.assert_fingerprint = assert_fingerprint - - def _prepare_conn(self, conn): - """ - Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket` - and establish the tunnel if proxy is used. - """ - - if isinstance(conn, VerifiedHTTPSConnection): - conn.set_cert( - key_file=self.key_file, - key_password=self.key_password, - cert_file=self.cert_file, - cert_reqs=self.cert_reqs, - ca_certs=self.ca_certs, - ca_cert_dir=self.ca_cert_dir, - assert_hostname=self.assert_hostname, - assert_fingerprint=self.assert_fingerprint, - ) - conn.ssl_version = self.ssl_version - return conn - - def _prepare_proxy(self, conn): - """ - Establishes a tunnel connection through HTTP CONNECT. - - Tunnel connection is established early because otherwise httplib would - improperly set Host: header to proxy's IP:port. - """ - - conn.set_tunnel(self._proxy_host, self.port, self.proxy_headers) - - if self.proxy.scheme == "https": - conn.tls_in_tls_required = True - - conn.connect() - - def _new_conn(self): - """ - Return a fresh :class:`http.client.HTTPSConnection`. - """ - self.num_connections += 1 - log.debug( - "Starting new HTTPS connection (%d): %s:%s", - self.num_connections, - self.host, - self.port or "443", - ) - - if not self.ConnectionCls or self.ConnectionCls is DummyConnection: - raise SSLError( - "Can't connect to HTTPS URL because the SSL module is not available." - ) - - actual_host = self.host - actual_port = self.port - if self.proxy is not None: - actual_host = self.proxy.host - actual_port = self.proxy.port - - conn = self.ConnectionCls( - host=actual_host, - port=actual_port, - timeout=self.timeout.connect_timeout, - strict=self.strict, - cert_file=self.cert_file, - key_file=self.key_file, - key_password=self.key_password, - **self.conn_kw - ) - - return self._prepare_conn(conn) - - def _validate_conn(self, conn): - """ - Called right before a request is made, after the socket is created. - """ - super(HTTPSConnectionPool, self)._validate_conn(conn) - - # Force connect early to allow us to validate the connection. - if not getattr(conn, "sock", None): # AppEngine might not have `.sock` - conn.connect() - - if not conn.is_verified: - warnings.warn( - ( - "Unverified HTTPS request is being made to host '%s'. " - "Adding certificate verification is strongly advised. See: " - "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html" - "#ssl-warnings" % conn.host - ), - InsecureRequestWarning, - ) - - if getattr(conn, "proxy_is_verified", None) is False: - warnings.warn( - ( - "Unverified HTTPS connection done to an HTTPS proxy. " - "Adding certificate verification is strongly advised. See: " - "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html" - "#ssl-warnings" - ), - InsecureRequestWarning, - ) - - -def connection_from_url(url, **kw): - """ - Given a url, return an :class:`.ConnectionPool` instance of its host. - - This is a shortcut for not having to parse out the scheme, host, and port - of the url before creating an :class:`.ConnectionPool` instance. - - :param url: - Absolute URL string that must include the scheme. Port is optional. - - :param \\**kw: - Passes additional parameters to the constructor of the appropriate - :class:`.ConnectionPool`. Useful for specifying things like - timeout, maxsize, headers, etc. - - Example:: - - >>> conn = connection_from_url('http://google.com/') - >>> r = conn.request('GET', '/') - """ - scheme, host, port = get_host(url) - port = port or port_by_scheme.get(scheme, 80) - if scheme == "https": - return HTTPSConnectionPool(host, port=port, **kw) - else: - return HTTPConnectionPool(host, port=port, **kw) - - -def _normalize_host(host, scheme): - """ - Normalize hosts for comparisons and use with sockets. - """ - - host = normalize_host(host, scheme) - - # httplib doesn't like it when we include brackets in IPv6 addresses - # Specifically, if we include brackets but also pass the port then - # httplib crazily doubles up the square brackets on the Host header. - # Instead, we need to make sure we never pass ``None`` as the port. - # However, for backward compatibility reasons we can't actually - # *assert* that. See http://bugs.python.org/issue28539 - if host.startswith("[") and host.endswith("]"): - host = host[1:-1] - return host diff --git a/spaces/CVPR/LIVE/thrust/thrust/device_new_allocator.h b/spaces/CVPR/LIVE/thrust/thrust/device_new_allocator.h deleted file mode 100644 index 9d7133ba711254d9284200173a453b2155f410c5..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/device_new_allocator.h +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -/*! \file device_new_allocator.h - * \brief An allocator which allocates storage with \p device_new - */ - -#pragma once - -#include -#include -#include -#include -#include -#include -#include - -namespace thrust -{ - -/*! \addtogroup memory_management_classes Memory Management Classes - * \ingroup memory_management - * \{ - */ - -/*! \p device_new_allocator is a device memory allocator that employs the - * \p device_new function for allocation. - * - * \see device_new - * \see device_ptr - * \see http://www.sgi.com/tech/stl/Allocators.html - */ -template - class device_new_allocator -{ - public: - /*! Type of element allocated, \c T. */ - typedef T value_type; - - /*! Pointer to allocation, \c device_ptr. */ - typedef device_ptr pointer; - - /*! \c const pointer to allocation, \c device_ptr. */ - typedef device_ptr const_pointer; - - /*! Reference to allocated element, \c device_reference. */ - typedef device_reference reference; - - /*! \c const reference to allocated element, \c device_reference. */ - typedef device_reference const_reference; - - /*! Type of allocation size, \c std::size_t. */ - typedef std::size_t size_type; - - /*! Type of allocation difference, \c pointer::difference_type. */ - typedef typename pointer::difference_type difference_type; - - /*! The \p rebind metafunction provides the type of a \p device_new_allocator - * instantiated with another type. - * - * \tparam U The other type to use for instantiation. - */ - template - struct rebind - { - /*! The typedef \p other gives the type of the rebound \p device_new_allocator. - */ - typedef device_new_allocator other; - }; // end rebind - - /*! No-argument constructor has no effect. */ - __host__ __device__ - inline device_new_allocator() {} - - /*! No-argument destructor has no effect. */ - __host__ __device__ - inline ~device_new_allocator() {} - - /*! Copy constructor has no effect. */ - __host__ __device__ - inline device_new_allocator(device_new_allocator const&) {} - - /*! Constructor from other \p device_malloc_allocator has no effect. */ - template - __host__ __device__ - inline device_new_allocator(device_new_allocator const&) {} - - /*! Returns the address of an allocated object. - * \return &r. - */ - __host__ __device__ - inline pointer address(reference r) { return &r; } - - /*! Returns the address an allocated object. - * \return &r. - */ - __host__ __device__ - inline const_pointer address(const_reference r) { return &r; } - - /*! Allocates storage for \p cnt objects. - * \param cnt The number of objects to allocate. - * \return A \p pointer to uninitialized storage for \p cnt objects. - * \note Memory allocated by this function must be deallocated with \p deallocate. - */ - __host__ - inline pointer allocate(size_type cnt, - const_pointer = const_pointer(static_cast(0))) - { - if(cnt > this->max_size()) - { - throw std::bad_alloc(); - } // end if - - // use "::operator new" rather than keyword new - return pointer(device_new(cnt)); - } // end allocate() - - /*! Deallocates storage for objects allocated with \p allocate. - * \param p A \p pointer to the storage to deallocate. - * \param cnt The size of the previous allocation. - * \note Memory deallocated by this function must previously have been - * allocated with \p allocate. - */ - __host__ - inline void deallocate(pointer p, size_type cnt) - { - // use "::operator delete" rather than keyword delete - (void)cnt; - device_delete(p); - } // end deallocate() - - /*! Returns the largest value \c n for which allocate(n) might succeed. - * \return The largest value \c n for which allocate(n) might succeed. - */ - __host__ __device__ - inline size_type max_size() const - { - return std::numeric_limits::max THRUST_PREVENT_MACRO_SUBSTITUTION () / sizeof(T); - } // end max_size() - - /*! Compares against another \p device_malloc_allocator for equality. - * \return \c true - */ - __host__ __device__ - inline bool operator==(device_new_allocator const&) { return true; } - - /*! Compares against another \p device_malloc_allocator for inequality. - * \return \c false - */ - __host__ __device__ - inline bool operator!=(device_new_allocator const &a) {return !operator==(a); } -}; // end device_new_allocator - -/*! \} - */ - -} // end thrust - diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/count.h b/spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/count.h deleted file mode 100644 index 5d6f1f748ffea9d1b3a33c764cc2ac307b51a5f8..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/count.h +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a count of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include - -// the purpose of this header is to #include the count.h header -// of the sequential, host, and device systems. It should be #included in any -// code which uses adl to dispatch count - -#include - -// SCons can't see through the #defines below to figure out what this header -// includes, so we fake it out by specifying all possible files we might end up -// including inside an #if 0. -#if 0 -#include -#include -#include -#include -#endif - -#define __THRUST_HOST_SYSTEM_COUNT_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/count.h> -#include __THRUST_HOST_SYSTEM_COUNT_HEADER -#undef __THRUST_HOST_SYSTEM_COUNT_HEADER - -#define __THRUST_DEVICE_SYSTEM_COUNT_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/count.h> -#include __THRUST_DEVICE_SYSTEM_COUNT_HEADER -#undef __THRUST_DEVICE_SYSTEM_COUNT_HEADER - diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/unique.h b/spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/unique.h deleted file mode 100644 index 04388cbc008f031de63fc814b95d11485ec27fac..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/unique.h +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include -#include - -namespace thrust -{ -namespace system -{ -namespace detail -{ -namespace generic -{ - - -template -__host__ __device__ -ForwardIterator unique(thrust::execution_policy &exec, - ForwardIterator first, - ForwardIterator last); - - -template -__host__ __device__ -ForwardIterator unique(thrust::execution_policy &exec, - ForwardIterator first, - ForwardIterator last, - BinaryPredicate binary_pred); - - -template -__host__ __device__ -OutputIterator unique_copy(thrust::execution_policy &exec, - InputIterator first, - InputIterator last, - OutputIterator output); - - -template -__host__ __device__ -OutputIterator unique_copy(thrust::execution_policy &exec, - InputIterator first, - InputIterator last, - OutputIterator output, - BinaryPredicate binary_pred); - - -} // end namespace generic -} // end namespace detail -} // end namespace system -} // end namespace thrust - -#include - diff --git a/spaces/CVPR/time/README.md b/spaces/CVPR/time/README.md deleted file mode 100644 index 3aec05e7b83e903d79963f55321a30aa7fe366e1..0000000000000000000000000000000000000000 --- a/spaces/CVPR/time/README.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: Time -emoji: ⏰ -colorFrom: green -colorTo: green -sdk: gradio -sdk_version: 3.0.17 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference - -This is the demo for It's About Time: Analog Clock Reading in the Wild -Charig Yang, Weidi Xie, Andrew Zisserman -CVPR 2022 - -Project page: https://www.robots.ox.ac.uk/~vgg/research/time/ -Video: https://www.youtube.com/watch?v=6pYOi92XsGQ - -Note the model takes in cropped image (i.e. we don't run object detector on here). diff --git a/spaces/Cecil8352/vits-models/transforms.py b/spaces/Cecil8352/vits-models/transforms.py deleted file mode 100644 index 4793d67ca5a5630e0ffe0f9fb29445c949e64dae..0000000000000000000000000000000000000000 --- a/spaces/Cecil8352/vits-models/transforms.py +++ /dev/null @@ -1,193 +0,0 @@ -import torch -from torch.nn import functional as F - -import numpy as np - - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = { - 'tails': tails, - 'tail_bound': tail_bound - } - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum( - inputs[..., None] >= bin_locations, - dim=-1 - ) - 1 - - -def unconstrained_rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails='linear', - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == 'linear': - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError('{} tails are not implemented.'.format(tails)) - - outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative - ) - - return outputs, logabsdet - -def rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0., right=1., bottom=0., top=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError('Input to a transform is not within its domain') - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError('Minimal bin width too large for the number of bins') - if min_bin_height * num_bins > 1.0: - raise ValueError('Minimal bin height too large for the number of bins') - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (((inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta) - + input_heights * (input_delta - input_derivatives))) - b = (input_heights * input_derivatives - - (inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta)) - c = - input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * (input_delta * theta.pow(2) - + input_derivatives * theta_one_minus_theta) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet diff --git a/spaces/Chomkwoy/Nilkessye/cpool_new/src/top_pool.cpp b/spaces/Chomkwoy/Nilkessye/cpool_new/src/top_pool.cpp deleted file mode 100644 index ccec09b5c4fb529599889f729e65c48cbb8721ce..0000000000000000000000000000000000000000 --- a/spaces/Chomkwoy/Nilkessye/cpool_new/src/top_pool.cpp +++ /dev/null @@ -1,91 +0,0 @@ -// #include -#include - -#include - -std::vector top_pool_forward( - torch::Tensor input -) { - // Initialize output - torch::Tensor output = torch::zeros_like(input); - - // Get height - int64_t height = input.size(2); - - // Copy the last column - torch::Tensor input_temp = input.select(2, height - 1); - torch::Tensor output_temp = output.select(2, height - 1); - output_temp.copy_(input_temp); - - torch::Tensor max_temp; - for (int64_t ind = 1; ind < height; ++ind) { - input_temp = input.select(2, height - ind - 1); - output_temp = output.select(2, height - ind); - max_temp = output.select(2, height - ind - 1); - - torch::max_out(max_temp, input_temp, output_temp); - } - - return { - output - }; -} - -std::vector top_pool_backward( - torch::Tensor input, - torch::Tensor grad_output -) { - auto output = torch::zeros_like(input); - - int32_t batch = input.size(0); - int32_t channel = input.size(1); - int32_t height = input.size(2); - int32_t width = input.size(3); - - // auto max_val = torch::zeros(torch::CUDA(torch::kFloat), {batch, channel, width}); - // auto max_ind = torch::zeros(torch::CUDA(torch::kLong), {batch, channel, width}); - auto max_val = torch::zeros({batch, channel, width}, torch::TensorOptions().dtype(torch::kFloat).device(torch::kCUDA)); - auto max_ind = torch::zeros({batch, channel, width}, torch::TensorOptions().dtype(torch::kLong).device(torch::kCUDA)); - - auto input_temp = input.select(2, height - 1); - max_val.copy_(input_temp); - - max_ind.fill_(height - 1); - - auto output_temp = output.select(2, height - 1); - auto grad_output_temp = grad_output.select(2, height - 1); - output_temp.copy_(grad_output_temp); - - auto un_max_ind = max_ind.unsqueeze(2); - // auto gt_mask = torch::zeros(torch::CUDA(torch::kByte), {batch, channel, width}); - // auto max_temp = torch::zeros(torch::CUDA(torch::kFloat), {batch, channel, width}); - auto gt_mask = torch::zeros({batch, channel, width}, torch::TensorOptions().dtype(torch::kByte).device(torch::kCUDA)); - auto max_temp = torch::zeros({batch, channel, width}, torch::TensorOptions().dtype(torch::kFloat).device(torch::kCUDA)); - - for (int32_t ind = 1; ind < height; ++ind) { - input_temp = input.select(2, height - ind - 1); - torch::gt_out(gt_mask, input_temp, max_val); - - torch::masked_select_out(max_temp, input_temp, gt_mask); - max_val.masked_scatter_(gt_mask, max_temp); - max_ind.masked_fill_(gt_mask, height - ind - 1); - - grad_output_temp = grad_output.select(2, height - ind - 1).unsqueeze(2); - output.scatter_add_(2, un_max_ind, grad_output_temp); - } - - return { - output - }; -} - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { - m.def( - "forward", &top_pool_forward, "Top Pool Forward", - py::call_guard() - ); - m.def( - "backward", &top_pool_backward, "Top Pool Backward", - py::call_guard() - ); -} diff --git a/spaces/CofAI/CalculatorUI/README.md b/spaces/CofAI/CalculatorUI/README.md deleted file mode 100644 index cfd8bd59c6513a80295610beb994d7bf287fed7f..0000000000000000000000000000000000000000 --- a/spaces/CofAI/CalculatorUI/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: CalculatorUI -emoji: ➕️➖️✖️➗️ -colorFrom: gray -colorTo: gray -sdk: static -pinned: false ---- - -Это UI модель калькулятора от CofAI, можете копировать и дорабатывать её, мы не против, даже можете зарабатывать на ней, спасибо! \ No newline at end of file diff --git a/spaces/CofAI/picgen/README.md b/spaces/CofAI/picgen/README.md deleted file mode 100644 index 610aa37aeb8322d2009fee64cbe5a2f3b2a1e05d..0000000000000000000000000000000000000000 --- a/spaces/CofAI/picgen/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: PicGen -emoji: 🖼☕🖼 -colorFrom: green -colorTo: green -sdk: gradio -sdk_version: 3.12.0 -app_file: app.py -pinned: false -license: creativeml-openrail-m -duplicated_from: null ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/evaluation/word/util/logger.py b/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/evaluation/word/util/logger.py deleted file mode 100644 index 0dab12dc305b88e880d1babde3ba3c7825132802..0000000000000000000000000000000000000000 --- a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/evaluation/word/util/logger.py +++ /dev/null @@ -1,133 +0,0 @@ -# A simple torch style logger -# (C) Wei YANG 2017 -from __future__ import absolute_import -# import matplotlib.pyplot as plt -import matplotlib -matplotlib.use('pdf') -import matplotlib.pyplot as plt -import os -import sys -import numpy as np - -__all__ = ['Logger', 'LoggerMonitor', 'savefig'] - -def savefig(fname, dpi=None): - dpi = 150 if dpi == None else dpi - plt.savefig(fname, dpi=dpi) - -def plot_overlap(logger, names=None): - names = logger.names if names == None else names - numbers = logger.numbers - for _, name in enumerate(names): - x = np.arange(len(numbers[name])) - plt.plot(x, np.asarray(numbers[name])) - return [logger.title + '(' + name + ')' for name in names] - -class Logger(object): - '''Save training process to log file with simple plot function.''' - def __init__(self, fpath, title=None, resume=False): - self.file = None - self.resume = resume - self.title = '' if title == None else title - if fpath is not None: - if resume: - self.file = open(fpath, 'r') - name = self.file.readline() - self.names = name.rstrip().split('\t') - self.numbers = {} - for _, name in enumerate(self.names): - self.numbers[name] = [] - - for numbers in self.file: - numbers = numbers.rstrip().split('\t') - for i in range(0, len(numbers)): - self.numbers[self.names[i]].append(numbers[i]) - self.file.close() - self.file = open(fpath, 'a') - else: - self.file = open(fpath, 'w') - - def set_names(self, names): - if self.resume: - pass - # initialize numbers as empty list - self.numbers = {} - self.names = names - for _, name in enumerate(self.names): - self.file.write(name) - self.file.write('\t') - self.numbers[name] = [] - self.file.write('\n') - self.file.flush() - - - def append(self, numbers): - assert len(self.names) == len(numbers), 'Numbers do not match names' - for index, num in enumerate(numbers): - self.file.write("{0:.6f}".format(num)) - self.file.write('\t') - self.numbers[self.names[index]].append(num) - self.file.write('\n') - self.file.flush() - - def plot(self, names=None): - print 'plot' - ''' - names = self.names if names == None else names - numbers = self.numbers - for _, name in enumerate(names): - x = np.arange(len(numbers[name])) - plt.plot(x, np.asarray(numbers[name])) - plt.legend([self.title + '(' + name + ')' for name in names]) - plt.grid(True) - ''' - - def close(self): - if self.file is not None: - self.file.close() - -class LoggerMonitor(object): - '''Load and visualize multiple logs.''' - def __init__ (self, paths): - '''paths is a distionary with {name:filepath} pair''' - self.loggers = [] - for title, path in paths.items(): - logger = Logger(path, title=title, resume=True) - self.loggers.append(logger) - - def plot(self, names=None): - plt.figure() - plt.subplot(121) - legend_text = [] - for logger in self.loggers: - legend_text += plot_overlap(logger, names) - plt.legend(legend_text, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) - plt.grid(True) - -if __name__ == '__main__': - # # Example - # logger = Logger('test.txt') - # logger.set_names(['Train loss', 'Valid loss','Test loss']) - - # length = 100 - # t = np.arange(length) - # train_loss = np.exp(-t / 10.0) + np.random.rand(length) * 0.1 - # valid_loss = np.exp(-t / 10.0) + np.random.rand(length) * 0.1 - # test_loss = np.exp(-t / 10.0) + np.random.rand(length) * 0.1 - - # for i in range(0, length): - # logger.append([train_loss[i], valid_loss[i], test_loss[i]]) - # logger.plot() - - # Example: logger monitor - paths = { - 'resadvnet20':'/home/wyang/code/pytorch-classification/checkpoint/cifar10/resadvnet20/log.txt', - 'resadvnet32':'/home/wyang/code/pytorch-classification/checkpoint/cifar10/resadvnet32/log.txt', - 'resadvnet44':'/home/wyang/code/pytorch-classification/checkpoint/cifar10/resadvnet44/log.txt', - } - - field = ['Valid Acc.'] - - monitor = LoggerMonitor(paths) - monitor.plot(names=field) - savefig('test.eps') \ No newline at end of file diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-3610549a.js b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-3610549a.js deleted file mode 100644 index 8592c28a0ef2444b42fa5e9d304fc94fdcb48e18..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-3610549a.js +++ /dev/null @@ -1,3 +0,0 @@ -import{S as I,e as J,s as K,J as U,K as u,p as j,M as y,n as P,A as E,N as R,O as V,P as D,L as F,Z as Le,ar as je,R as G,G as T,m as Z,V as Y,B as be,C as Ee,av as Q,aj as Ae,X as Ce,k as O,o as X,z as B,v as S,x as q,E as Me,ae as ze,q as Te,r as Be,u as pe,y as ke}from"./index-1d65707a.js";import{U as Se}from"./Upload-9bb55fba.js";import{M as Ue}from"./ModifyUpload-c89cfce3.js";import{B as Ne}from"./Button-f155035a.js";import{B as Fe}from"./BlockLabel-66866176.js";import{E as Oe}from"./Empty-eec13822.js";import{g as Xe}from"./color-90ab3aab.js";import{a as qe}from"./csv-b0b7514a.js";import{Z as x,_ as $,l as ee}from"./linear-58a44b5e.js";import{U as He}from"./UploadText-f599be03.js";import"./Blocks-c9e1499d.js";import"./ModifyUpload.svelte_svelte_type_style_lang-d2acacf0.js";import"./IconButton-d42f3661.js";import"./dsv-576afacd.js";function Pe(l){let e,n,t;return{c(){e=U("svg"),n=U("path"),t=U("path"),u(n,"d","M28.828 3.172a4.094 4.094 0 0 0-5.656 0L4.05 22.292A6.954 6.954 0 0 0 2 27.242V30h2.756a6.952 6.952 0 0 0 4.95-2.05L28.828 8.829a3.999 3.999 0 0 0 0-5.657zM10.91 18.26l2.829 2.829l-2.122 2.121l-2.828-2.828zm-2.619 8.276A4.966 4.966 0 0 1 4.756 28H4v-.759a4.967 4.967 0 0 1 1.464-3.535l1.91-1.91l2.829 2.828zM27.415 7.414l-12.261 12.26l-2.829-2.828l12.262-12.26a2.047 2.047 0 0 1 2.828 0a2 2 0 0 1 0 2.828z"),u(n,"fill","currentColor"),u(t,"d","M6.5 15a3.5 3.5 0 0 1-2.475-5.974l3.5-3.5a1.502 1.502 0 0 0 0-2.121a1.537 1.537 0 0 0-2.121 0L3.415 5.394L2 3.98l1.99-1.988a3.585 3.585 0 0 1 4.95 0a3.504 3.504 0 0 1 0 4.949L5.439 10.44a1.502 1.502 0 0 0 0 2.121a1.537 1.537 0 0 0 2.122 0l4.024-4.024L13 9.95l-4.025 4.024A3.475 3.475 0 0 1 6.5 15z"),u(t,"fill","currentColor"),u(e,"width","1em"),u(e,"height","1em"),u(e,"viewBox","0 0 32 32")},m(a,s){j(a,e,s),y(e,n),y(e,t)},p:P,i:P,o:P,d(a){a&&E(e)}}}let ye=class extends I{constructor(e){super(),J(this,e,null,Pe,K,{})}};function le(l){let e;return Array.isArray(l)?e=l.reduce((n,{values:t})=>[...n,...t.map(({y:a})=>a)],[]):e=l.values,[Math.min(...e),Math.max(...e)]}function te(l,e,n){const t=Object.entries(l[0]).reduce((a,s,o)=>(!e&&o===0||e&&s[0]===e?a.x.name=s[0]:(!n||n&&n.includes(s[0]))&&a.y.push({name:s[0],values:[]}),a),{x:{name:"",values:[]},y:[]});for(let a=0;al[6].call(e))},m(o,_){j(o,e,_),y(e,n),y(e,t),y(e,a),s=je(e,l[6].bind(e))},p(o,[_]){_&8&&F(n,"background",o[3]),_&1&&G(a,o[0]),_&36&&F(e,"top",o[2]-o[5]/2+"px"),_&18&&F(e,"left",o[1]-o[4]-7+"px")},i:P,o:P,d(o){o&&E(e),s()}}}function Ve(l,e,n){let{text:t}=e,{x:a}=e,{y:s}=e,{color:o}=e,_,i;function v(){_=this.offsetWidth,i=this.offsetHeight,n(4,_),n(5,i)}return l.$$set=g=>{"text"in g&&n(0,t=g.text),"x"in g&&n(1,a=g.x),"y"in g&&n(2,s=g.y),"color"in g&&n(3,o=g.color)},[t,a,s,o,_,i,v]}class Ye extends I{constructor(e){super(),J(this,e,Ve,Re,K,{text:0,x:1,y:2,color:3})}}function Ze(l,{color:e,text:n}){let t;function a(i){return t=new Ye({props:{text:n,x:i.pageX,y:i.pageY,color:e},target:document.body}),i}function s(i){t.$set({x:i.pageX,y:i.pageY})}function o(){t.$destroy()}const _=l;return _.addEventListener("mouseover",a),_.addEventListener("mouseleave",o),_.addEventListener("mousemove",s),{destroy(){_.removeEventListener("mouseover",a),_.removeEventListener("mouseleave",o),_.removeEventListener("mousemove",s)}}}function ne(l,e,n){const t=l.slice();t[16]=e[n].name,t[17]=e[n].values;const a=t[8][t[16]];return t[18]=a,t}function ae(l,e,n){const t=l.slice();return t[0]=e[n].x,t[1]=e[n].y,t}function oe(l,e,n){const t=l.slice();t[16]=e[n].name,t[17]=e[n].values;const a=t[8][t[16]];return t[18]=a,t}function se(l,e,n){const t=l.slice();return t[0]=e[n].x,t[1]=e[n].y,t}function re(l,e,n){const t=l.slice();return t[27]=e[n],t}function ie(l,e,n){const t=l.slice();return t[27]=e[n],t}function fe(l,e,n){const t=l.slice();return t[16]=e[n].name,t}function _e(l){let e,n,t,a=l[16]+"",s,o;return{c(){e=R("div"),n=R("span"),t=V(),s=D(a),o=V(),u(n,"class","legend-box svelte-1mjxput"),F(n,"background-color",l[8][l[16]]),u(e,"class","legend-item svelte-1mjxput")},m(_,i){j(_,e,i),y(e,n),y(e,t),y(e,s),y(e,o)},p(_,i){i[0]&260&&F(n,"background-color",_[8][_[16]]),i[0]&4&&a!==(a=_[16]+"")&&G(s,a)},d(_){_&&E(e)}}}function ue(l){let e,n,t,a,s,o,_=l[27]+"",i,v,g;return{c(){e=U("line"),o=U("text"),i=D(_),u(e,"stroke-width","0.5"),u(e,"x1",n=l[5](l[27])),u(e,"x2",t=l[5](l[27])),u(e,"y1",a=l[4](l[9][0]l[9][l[9].length-1]?l[6][1]:l[9][l[9].length-1])),u(e,"stroke","#aaa"),u(o,"class","label-text svelte-1mjxput"),u(o,"text-anchor","middle"),u(o,"x",v=l[5](l[27])),u(o,"y",g=l[4](l[9][0])+30)},m(f,h){j(f,e,h),j(f,o,h),y(o,i)},p(f,h){h[0]&1056&&n!==(n=f[5](f[27]))&&u(e,"x1",n),h[0]&1056&&t!==(t=f[5](f[27]))&&u(e,"x2",t),h[0]&592&&a!==(a=f[4](f[9][0]f[9][f[9].length-1]?f[6][1]:f[9][f[9].length-1]))&&u(e,"y2",s),h[0]&1024&&_!==(_=f[27]+"")&&G(i,_),h[0]&1056&&v!==(v=f[5](f[27]))&&u(o,"x",v),h[0]&528&&g!==(g=f[4](f[9][0])+30)&&u(o,"y",g)},d(f){f&&(E(e),E(o))}}}function ce(l){let e,n,t,a,s,o,_=l[27]+"",i,v,g;return{c(){e=U("line"),o=U("text"),i=D(_),u(e,"stroke-width","0.5"),u(e,"y1",n=l[4](l[27])),u(e,"y2",t=l[4](l[27])),u(e,"x1",a=l[5](l[10][0]l[10][l[10].length-1]?l[7][1]:l[10][l[10].length-1])),u(e,"stroke","#aaa"),u(o,"class","label-text svelte-1mjxput"),u(o,"text-anchor","end"),u(o,"y",v=l[4](l[27])+4),u(o,"x",g=l[5](l[10][0])-20)},m(f,h){j(f,e,h),j(f,o,h),y(o,i)},p(f,h){h[0]&528&&n!==(n=f[4](f[27]))&&u(e,"y1",n),h[0]&528&&t!==(t=f[4](f[27]))&&u(e,"y2",t),h[0]&1184&&a!==(a=f[5](f[10][0]f[10][f[10].length-1]?f[7][1]:f[10][f[10].length-1]))&&u(e,"x2",s),h[0]&512&&_!==(_=f[27]+"")&&G(i,_),h[0]&528&&v!==(v=f[4](f[27])+4)&&u(o,"y",v),h[0]&1056&&g!==(g=f[5](f[10][0])-20)&&u(o,"x",g)},d(f){f&&(E(e),E(o))}}}function me(l){let e,n,t,a,s,o,_=l[6][1]+"",i,v,g;return{c(){e=U("line"),o=U("text"),i=D(_),u(e,"stroke-width","0.5"),u(e,"y1",n=l[4](l[6][1])),u(e,"y2",t=l[4](l[6][1])),u(e,"x1",a=l[5](l[10][0])),u(e,"x2",s=l[5](l[7][1])),u(e,"stroke","#aaa"),u(o,"class","label-text svelte-1mjxput"),u(o,"text-anchor","end"),u(o,"y",v=l[4](l[6][1])+4),u(o,"x",g=l[5](l[10][0])-20)},m(f,h){j(f,e,h),j(f,o,h),y(o,i)},p(f,h){h[0]&80&&n!==(n=f[4](f[6][1]))&&u(e,"y1",n),h[0]&80&&t!==(t=f[4](f[6][1]))&&u(e,"y2",t),h[0]&1056&&a!==(a=f[5](f[10][0]))&&u(e,"x1",a),h[0]&160&&s!==(s=f[5](f[7][1]))&&u(e,"x2",s),h[0]&64&&_!==(_=f[6][1]+"")&&G(i,_),h[0]&80&&v!==(v=f[4](f[6][1])+4)&&u(o,"y",v),h[0]&1056&&g!==(g=f[5](f[10][0])-20)&&u(o,"x",g)},d(f){f&&(E(e),E(o))}}}function he(l){let e,n,t,a;return{c(){e=U("circle"),u(e,"r","3.5"),u(e,"cx",n=l[5](l[0])),u(e,"cy",t=l[4](l[1])),u(e,"stroke-width","1.5"),u(e,"stroke",a=l[18]),u(e,"fill","none")},m(s,o){j(s,e,o)},p(s,o){o[0]&36&&n!==(n=s[5](s[0]))&&u(e,"cx",n),o[0]&20&&t!==(t=s[4](s[1]))&&u(e,"cy",t),o[0]&260&&a!==(a=s[18])&&u(e,"stroke",a)},d(s){s&&E(e)}}}function ge(l){let e,n,t,a=T(l[17]),s=[];for(let o=0;ol[9][l[9].length-1]&&me(l),C=T(l[2]),L=[];for(let c=0;cc[9][c[9].length-1]?d?d.p(c,z):(d=me(c),d.c(),d.m(s,null)):d&&(d.d(1),d=null),z[0]&308){C=T(c[2]);let r;for(r=0;r{b("process",{x:t,y:a})});const k=({x:d,y:C})=>[_(d),i(C)];return l.$$set=d=>{"value"in d&&n(11,f=d.value),"x"in d&&n(0,h=d.x),"y"in d&&n(1,A=d.y),"colors"in d&&n(12,m=d.colors)},l.$$.update=()=>{l.$$.dirty[0]&2051&&n(3,{x:t,y:a}=te(typeof f=="string"?qe(f):f,h,A),t,(n(2,a),n(11,f),n(0,h),n(1,A))),l.$$.dirty[0]&8&&n(7,s=le(t)),l.$$.dirty[0]&4&&n(6,o=le(a)),l.$$.dirty[0]&128&&n(5,_=x(s,[0,600]).nice()),l.$$.dirty[0]&64&&n(4,i=x(o,[350,0]).nice()),l.$$.dirty[0]&32&&n(10,v=_.ticks(8)),l.$$.dirty[0]&16&&n(9,g=i.ticks(8)),l.$$.dirty[0]&4&&n(8,p=a.reduce((d,C,L)=>({...d,[C.name]:N(L)}),{}))},[h,A,a,t,i,_,o,s,p,g,v,f,m,k]}class we extends I{constructor(e){super(),J(this,e,Ge,De,K,{value:11,x:0,y:1,colors:12},null,[-1,-1])}}function Ie(l){let e,n;return e=new Se({props:{filetype:"text/csv",include_file_metadata:!1,$$slots:{default:[We]},$$scope:{ctx:l}}}),e.$on("load",l[19]),{c(){O(e.$$.fragment)},m(t,a){X(e,t,a),n=!0},p(t,a){const s={};a&8388608&&(s.$$scope={dirty:a,ctx:t}),e.$set(s)},i(t){n||(B(e.$$.fragment,t),n=!0)},o(t){S(e.$$.fragment,t),n=!1},d(t){q(e,t)}}}function Je(l){let e,n,t,a,s;return n=new Ue({}),n.$on("clear",l[17]),a=new we({props:{value:l[14],y:l[4],x:l[5],colors:l[9]}}),a.$on("process",l[18]),{c(){e=R("div"),O(n.$$.fragment),t=V(),O(a.$$.fragment),u(e,"class","chart svelte-etmurc")},m(o,_){j(o,e,_),X(n,e,null),y(e,t),X(a,e,null),s=!0},p(o,_){const i={};_&16384&&(i.value=o[14]),_&16&&(i.y=o[4]),_&32&&(i.x=o[5]),_&512&&(i.colors=o[9]),a.$set(i)},i(o){s||(B(n.$$.fragment,o),B(a.$$.fragment,o),s=!0)},o(o){S(n.$$.fragment,o),S(a.$$.fragment,o),s=!1},d(o){o&&E(e),q(n),q(a)}}}function Ke(l){let e,n,t,a;const s=[xe,Qe],o=[];function _(i,v){return i[15]?0:1}return e=_(l),n=o[e]=s[e](l),{c(){n.c(),t=Z()},m(i,v){o[e].m(i,v),j(i,t,v),a=!0},p(i,v){let g=e;e=_(i),e===g?o[e].p(i,v):(pe(),S(o[g],1,1,()=>{o[g]=null}),ke(),n=o[e],n?n.p(i,v):(n=o[e]=s[e](i),n.c()),B(n,1),n.m(t.parentNode,t))},i(i){a||(B(n),a=!0)},o(i){S(n),a=!1},d(i){i&&E(t),o[e].d(i)}}}function We(l){let e,n;return e=new He({props:{type:"csv"}}),{c(){O(e.$$.fragment)},m(t,a){X(e,t,a),n=!0},p:P,i(t){n||(B(e.$$.fragment,t),n=!0)},o(t){S(e.$$.fragment,t),n=!1},d(t){q(e,t)}}}function Qe(l){let e,n;return e=new Oe({props:{unpadded_box:!0,size:"large",$$slots:{default:[$e]},$$scope:{ctx:l}}}),{c(){O(e.$$.fragment)},m(t,a){X(e,t,a),n=!0},p(t,a){const s={};a&8388608&&(s.$$scope={dirty:a,ctx:t}),e.$set(s)},i(t){n||(B(e.$$.fragment,t),n=!0)},o(t){S(e.$$.fragment,t),n=!1},d(t){q(e,t)}}}function xe(l){let e,n;return e=new we({props:{value:l[15],colors:l[9]}}),{c(){O(e.$$.fragment)},m(t,a){X(e,t,a),n=!0},p(t,a){const s={};a&32768&&(s.value=t[15]),a&512&&(s.colors=t[9]),e.$set(s)},i(t){n||(B(e.$$.fragment,t),n=!0)},o(t){S(e.$$.fragment,t),n=!1},d(t){q(e,t)}}}function $e(l){let e,n;return e=new ye({}),{c(){O(e.$$.fragment)},m(t,a){X(e,t,a),n=!0},i(t){n||(B(e.$$.fragment,t),n=!0)},o(t){S(e.$$.fragment,t),n=!1},d(t){q(e,t)}}}function el(l){let e,n,t,a,s,o,_,i;e=new Fe({props:{show_label:l[8],Icon:ye,label:l[7]||"TimeSeries"}});const v=[l[13]];let g={};for(let m=0;m{h[k]=null}),ke()),~s?(o=h[s],o?o.p(m,b):(o=h[s]=f[s](m),o.c()),B(o,1),o.m(_.parentNode,_)):o=null)},i(m){i||(B(e.$$.fragment,m),B(t.$$.fragment,m),B(o),i=!0)},o(m){S(e.$$.fragment,m),S(t.$$.fragment,m),S(o),i=!1},d(m){m&&(E(n),E(a),E(_)),q(e,m),q(t,m),~s&&h[s].d(m)}}}function ll(l){let e,n;return e=new Ne({props:{visible:l[3],variant:l[6]==="dynamic"&&!l[14]?"dashed":"solid",padding:!1,elem_id:l[1],elem_classes:l[2],container:l[10],scale:l[11],min_width:l[12],$$slots:{default:[el]},$$scope:{ctx:l}}}),{c(){O(e.$$.fragment)},m(t,a){X(e,t,a),n=!0},p(t,[a]){const s={};a&8&&(s.visible=t[3]),a&16448&&(s.variant=t[6]==="dynamic"&&!t[14]?"dashed":"solid"),a&2&&(s.elem_id=t[1]),a&4&&(s.elem_classes=t[2]),a&1024&&(s.container=t[10]),a&2048&&(s.scale=t[11]),a&4096&&(s.min_width=t[12]),a&8446961&&(s.$$scope={dirty:a,ctx:t}),e.$set(s)},i(t){n||(B(e.$$.fragment,t),n=!0)},o(t){S(e.$$.fragment,t),n=!1},d(t){q(e,t)}}}function tl(l){return l.data.map(e=>e.reduce((n,t,a)=>({...n,[l.headers[a]]:t}),{}))}function nl(l){const e=atob(l.split(",")[1]),n=l.split(",")[0].split(":")[1].split(";")[0],t=new ArrayBuffer(e.length),a=new Uint8Array(t);for(let s=0;sn.push(a));for(let a=0;as.push(o[a].y)),t.push(s)}return{headers:n,data:t}}function ol(l,e,n){let t;const a=be();let{elem_id:s=""}=e,{elem_classes:o=[]}=e,{visible:_=!0}=e,{value:i}=e,{y:v}=e,{x:g}=e,{mode:f}=e,{label:h}=e,{show_label:A}=e,{colors:m}=e,{container:b=!0}=e,{scale:p=null}=e,{min_width:N=void 0}=e,{loading_status:k}=e,d;function C(r){const w=new FileReader;w.addEventListener("loadend",W=>{n(14,d=W.srcElement.result)}),w.readAsText(r)}function L(r){r.headers&&n(14,d=r.headers.join(",")),r.data.forEach(W=>{n(14,d=d+` -`),n(14,d=d+W.join(","))})}function H(r){return n(0,i={data:r}),r}function M({detail:r}){n(0,i=null),a("change"),a("clear")}const c=({detail:{x:r,y:w}})=>n(0,i=al(r,w)),z=({detail:r})=>H(r);return l.$$set=r=>{"elem_id"in r&&n(1,s=r.elem_id),"elem_classes"in r&&n(2,o=r.elem_classes),"visible"in r&&n(3,_=r.visible),"value"in r&&n(0,i=r.value),"y"in r&&n(4,v=r.y),"x"in r&&n(5,g=r.x),"mode"in r&&n(6,f=r.mode),"label"in r&&n(7,h=r.label),"show_label"in r&&n(8,A=r.show_label),"colors"in r&&n(9,m=r.colors),"container"in r&&n(10,b=r.container),"scale"in r&&n(11,p=r.scale),"min_width"in r&&n(12,N=r.min_width),"loading_status"in r&&n(13,k=r.loading_status)},l.$$.update=()=>{l.$$.dirty&1&&(i&&i.data&&typeof i.data=="string"?i?C(nl(i.data)):n(14,d=null):i&&i.data&&typeof i.data!="string"&&(i||n(14,d=null),L(i))),l.$$.dirty&16385&&n(14,d=i==null?null:d),l.$$.dirty&65&&n(15,t=f==="static"&&i&&tl(i)),l.$$.dirty&1&&a("change")},[i,s,o,_,v,g,f,h,A,m,b,p,N,k,d,t,H,M,c,z]}class sl extends I{constructor(e){super(),J(this,e,ol,ll,K,{elem_id:1,elem_classes:2,visible:3,value:0,y:4,x:5,mode:6,label:7,show_label:8,colors:9,container:10,scale:11,min_width:12,loading_status:13})}}const wl=sl,Ll=["static","dynamic"],jl=l=>({type:{payload:"{data: Array> | string; headers?: Array;}"},description:{payload:"dataset of series"}});export{wl as Component,jl as document,Ll as modes}; -//# sourceMappingURL=index-3610549a.js.map diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-82eb6288.js b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-82eb6288.js deleted file mode 100644 index ea168e78136b367a8d320a007b4444d739585b7a..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-82eb6288.js +++ /dev/null @@ -1,2 +0,0 @@ -import{L as s}from"./index-f8ff95a1.js";import{s as o,t as r,L as n,i as P,w as a,f as i,a as Q,b as p}from"./index-3ba00a4a.js";import"./index-1d65707a.js";import"./Blocks-c9e1499d.js";import"./Button-f155035a.js";import"./BlockLabel-66866176.js";import"./Empty-eec13822.js";import"./Copy-9f1657c4.js";import"./Download-daff1959.js";const c=o({String:r.string,Number:r.number,"True False":r.bool,PropertyName:r.propertyName,Null:r.null,",":r.separator,"[ ]":r.squareBracket,"{ }":r.brace}),g=s.deserialize({version:14,states:"$bOVQPOOOOQO'#Cb'#CbOnQPO'#CeOvQPO'#CjOOQO'#Cp'#CpQOQPOOOOQO'#Cg'#CgO}QPO'#CfO!SQPO'#CrOOQO,59P,59PO![QPO,59PO!aQPO'#CuOOQO,59U,59UO!iQPO,59UOVQPO,59QOqQPO'#CkO!nQPO,59^OOQO1G.k1G.kOVQPO'#ClO!vQPO,59aOOQO1G.p1G.pOOQO1G.l1G.lOOQO,59V,59VOOQO-E6i-E6iOOQO,59W,59WOOQO-E6j-E6j",stateData:"#O~OcOS~OQSORSOSSOTSOWQO]ROePO~OVXOeUO~O[[O~PVOg^O~Oh_OVfX~OVaO~OhbO[iX~O[dO~Oh_OVfa~OhbO[ia~O",goto:"!kjPPPPPPkPPkqwPPk{!RPPP!XP!ePP!hXSOR^bQWQRf_TVQ_Q`WRg`QcZRicQTOQZRQe^RhbRYQR]R",nodeNames:"⚠ JsonText True False Null Number String } { Object Property PropertyName ] [ Array",maxTerm:25,nodeProps:[["openedBy",7,"{",12,"["],["closedBy",8,"}",13,"]"]],propSources:[c],skippedNodes:[0],repeatNodeCount:2,tokenData:"(p~RaXY!WYZ!W]^!Wpq!Wrs!]|}$i}!O$n!Q!R$w!R![&V![!]&h!}#O&m#P#Q&r#Y#Z&w#b#c'f#h#i'}#o#p(f#q#r(k~!]Oc~~!`Upq!]qr!]rs!rs#O!]#O#P!w#P~!]~!wOe~~!zXrs!]!P!Q!]#O#P!]#U#V!]#Y#Z!]#b#c!]#f#g!]#h#i!]#i#j#g~#jR!Q![#s!c!i#s#T#Z#s~#vR!Q![$P!c!i$P#T#Z$P~$SR!Q![$]!c!i$]#T#Z$]~$`R!Q![!]!c!i!]#T#Z!]~$nOh~~$qQ!Q!R$w!R![&V~$|RT~!O!P%V!g!h%k#X#Y%k~%YP!Q![%]~%bRT~!Q![%]!g!h%k#X#Y%k~%nR{|%w}!O%w!Q![%}~%zP!Q![%}~&SPT~!Q![%}~&[ST~!O!P%V!Q![&V!g!h%k#X#Y%k~&mOg~~&rO]~~&wO[~~&zP#T#U&}~'QP#`#a'T~'WP#g#h'Z~'^P#X#Y'a~'fOR~~'iP#i#j'l~'oP#`#a'r~'uP#`#a'x~'}OS~~(QP#f#g(T~(WP#i#j(Z~(^P#X#Y(a~(fOQ~~(kOW~~(pOV~",tokenizers:[0],topRules:{JsonText:[0,1]},tokenPrec:0}),V=()=>t=>{try{JSON.parse(t.state.doc.toString())}catch(O){if(!(O instanceof SyntaxError))throw O;const e=m(O,t.state.doc);return[{from:e,message:O.message,severity:"error",to:e}]}return[]};function m(t,O){let e;return(e=t.message.match(/at position (\d+)/))?Math.min(+e[1],O.length):(e=t.message.match(/at line (\d+) column (\d+)/))?Math.min(O.line(+e[1]).from+ +e[2]-1,O.length):0}const u=n.define({name:"json",parser:g.configure({props:[P.add({Object:a({except:/^\s*\}/}),Array:a({except:/^\s*\]/})}),i.add({"Object Array":Q})]}),languageData:{closeBrackets:{brackets:["[","{",'"']},indentOnInput:/^\s*[\}\]]$/}});function $(){return new p(u)}export{$ as json,u as jsonLanguage,V as jsonParseLinter}; -//# sourceMappingURL=index-82eb6288.js.map diff --git a/spaces/Dauzy/whisper-webui/README.md b/spaces/Dauzy/whisper-webui/README.md deleted file mode 100644 index 21124105a487c24c3bd6d1618d74cf7df4839a5f..0000000000000000000000000000000000000000 --- a/spaces/Dauzy/whisper-webui/README.md +++ /dev/null @@ -1,179 +0,0 @@ ---- -title: Whisper Webui -emoji: ⚡ -colorFrom: pink -colorTo: purple -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: aadnk/whisper-webui ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference - -# Running Locally - -To run this program locally, first install Python 3.9+ and Git. Then install Pytorch 10.1+ and all the other dependencies: -``` -pip install -r requirements.txt -``` - -You can find detailed instructions for how to install this on Windows 10/11 [here (PDF)](docs/windows/install_win10_win11.pdf). - -Finally, run the full version (no audio length restrictions) of the app with parallel CPU/GPU enabled: -``` -python app.py --input_audio_max_duration -1 --server_name 127.0.0.1 --auto_parallel True -``` - -You can also run the CLI interface, which is similar to Whisper's own CLI but also supports the following additional arguments: -``` -python cli.py \ -[--vad {none,silero-vad,silero-vad-skip-gaps,silero-vad-expand-into-gaps,periodic-vad}] \ -[--vad_merge_window VAD_MERGE_WINDOW] \ -[--vad_max_merge_size VAD_MAX_MERGE_SIZE] \ -[--vad_padding VAD_PADDING] \ -[--vad_prompt_window VAD_PROMPT_WINDOW] -[--vad_cpu_cores NUMBER_OF_CORES] -[--vad_parallel_devices COMMA_DELIMITED_DEVICES] -[--auto_parallel BOOLEAN] -``` -In addition, you may also use URL's in addition to file paths as input. -``` -python cli.py --model large --vad silero-vad --language Japanese "https://www.youtube.com/watch?v=4cICErqqRSM" -``` - -Rather than supplying arguments to `app.py` or `cli.py`, you can also use the configuration file [config.json5](config.json5). See that file for more information. -If you want to use a different configuration file, you can use the `WHISPER_WEBUI_CONFIG` environment variable to specify the path to another file. - -### Multiple Files - -You can upload multiple files either through the "Upload files" option, or as a playlist on YouTube. -Each audio file will then be processed in turn, and the resulting SRT/VTT/Transcript will be made available in the "Download" section. -When more than one file is processed, the UI will also generate a "All_Output" zip file containing all the text output files. - -## Whisper Implementation - -You can choose between using `whisper` or `faster-whisper`. [Faster Whisper](https://github.com/guillaumekln/faster-whisper) as a drop-in replacement for the -default Whisper which achieves up to a 4x speedup and 2x reduction in memory usage. - -You can install the requirements for a specific Whisper implementation in `requirements-fasterWhisper.txt` -or `requirements-whisper.txt`: -``` -pip install -r requirements-fasterWhisper.txt -``` -And then run the App or the CLI with the `--whisper_implementation faster-whisper` flag: -``` -python app.py --whisper_implementation faster-whisper --input_audio_max_duration -1 --server_name 127.0.0.1 --auto_parallel True -``` -You can also select the whisper implementation in `config.json5`: -```json5 -{ - "whisper_implementation": "faster-whisper" -} -``` -### GPU Acceleration - -In order to use GPU acceleration with Faster Whisper, both CUDA 11.2 and cuDNN 8 must be installed. You may want to install it in a virtual environment like Anaconda. - -## Google Colab - -You can also run this Web UI directly on [Google Colab](https://colab.research.google.com/drive/1qeTSvi7Bt_5RMm88ipW4fkcsMOKlDDss?usp=sharing), if you haven't got a GPU powerful enough to run the larger models. - -See the [colab documentation](docs/colab.md) for more information. - -## Parallel Execution - -You can also run both the Web-UI or the CLI on multiple GPUs in parallel, using the `vad_parallel_devices` option. This takes a comma-delimited list of -device IDs (0, 1, etc.) that Whisper should be distributed to and run on concurrently: -``` -python cli.py --model large --vad silero-vad --language Japanese \ ---vad_parallel_devices 0,1 "https://www.youtube.com/watch?v=4cICErqqRSM" -``` - -Note that this requires a VAD to function properly, otherwise only the first GPU will be used. Though you could use `period-vad` to avoid taking the hit -of running Silero-Vad, at a slight cost to accuracy. - -This is achieved by creating N child processes (where N is the number of selected devices), where Whisper is run concurrently. In `app.py`, you can also -set the `vad_process_timeout` option. This configures the number of seconds until a process is killed due to inactivity, freeing RAM and video memory. -The default value is 30 minutes. - -``` -python app.py --input_audio_max_duration -1 --vad_parallel_devices 0,1 --vad_process_timeout 3600 -``` - -To execute the Silero VAD itself in parallel, use the `vad_cpu_cores` option: -``` -python app.py --input_audio_max_duration -1 --vad_parallel_devices 0,1 --vad_process_timeout 3600 --vad_cpu_cores 4 -``` - -You may also use `vad_process_timeout` with a single device (`--vad_parallel_devices 0`), if you prefer to always free video memory after a period of time. - -### Auto Parallel - -You can also set `auto_parallel` to `True`. This will set `vad_parallel_devices` to use all the GPU devices on the system, and `vad_cpu_cores` to be equal to the number of -cores (up to 8): -``` -python app.py --input_audio_max_duration -1 --auto_parallel True -``` - -# Docker - -To run it in Docker, first install Docker and optionally the NVIDIA Container Toolkit in order to use the GPU. -Then either use the GitLab hosted container below, or check out this repository and build an image: -``` -sudo docker build -t whisper-webui:1 . -``` - -You can then start the WebUI with GPU support like so: -``` -sudo docker run -d --gpus=all -p 7860:7860 whisper-webui:1 -``` - -Leave out "--gpus=all" if you don't have access to a GPU with enough memory, and are fine with running it on the CPU only: -``` -sudo docker run -d -p 7860:7860 whisper-webui:1 -``` - -# GitLab Docker Registry - -This Docker container is also hosted on GitLab: - -``` -sudo docker run -d --gpus=all -p 7860:7860 registry.gitlab.com/aadnk/whisper-webui:latest -``` - -## Custom Arguments - -You can also pass custom arguments to `app.py` in the Docker container, for instance to be able to use all the GPUs in parallel (replace administrator with your user): -``` -sudo docker run -d --gpus all -p 7860:7860 \ ---mount type=bind,source=/home/administrator/.cache/whisper,target=/root/.cache/whisper \ ---mount type=bind,source=/home/administrator/.cache/huggingface,target=/root/.cache/huggingface \ ---restart=on-failure:15 registry.gitlab.com/aadnk/whisper-webui:latest \ -app.py --input_audio_max_duration -1 --server_name 0.0.0.0 --auto_parallel True \ ---default_vad silero-vad --default_model_name large -``` - -You can also call `cli.py` the same way: -``` -sudo docker run --gpus all \ ---mount type=bind,source=/home/administrator/.cache/whisper,target=/root/.cache/whisper \ ---mount type=bind,source=/home/administrator/.cache/huggingface,target=/root/.cache/huggingface \ ---mount type=bind,source=${PWD},target=/app/data \ -registry.gitlab.com/aadnk/whisper-webui:latest \ -cli.py --model large --auto_parallel True --vad silero-vad \ ---output_dir /app/data /app/data/YOUR-FILE-HERE.mp4 -``` - -## Caching - -Note that the models themselves are currently not included in the Docker images, and will be downloaded on the demand. -To avoid this, bind the directory /root/.cache/whisper to some directory on the host (for instance /home/administrator/.cache/whisper), where you can (optionally) -prepopulate the directory with the different Whisper models. -``` -sudo docker run -d --gpus=all -p 7860:7860 \ ---mount type=bind,source=/home/administrator/.cache/whisper,target=/root/.cache/whisper \ -registry.gitlab.com/aadnk/whisper-webui:latest -``` \ No newline at end of file diff --git a/spaces/DemoLou/moe-tts/models.py b/spaces/DemoLou/moe-tts/models.py deleted file mode 100644 index c214bbb0476ba4777093d8bcf032961f09e59496..0000000000000000000000000000000000000000 --- a/spaces/DemoLou/moe-tts/models.py +++ /dev/null @@ -1,549 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import modules -import attentions -import monotonic_align - -from torch.nn import Conv1d, ConvTranspose1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from commons import init_weights, get_padding - - -class StochasticDurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0): - super().__init__() - filter_channels = in_channels # it needs to be removed from future version. - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.log_flow = modules.Log() - self.flows = nn.ModuleList() - self.flows.append(modules.ElementwiseAffine(2)) - for i in range(n_flows): - self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.flows.append(modules.Flip()) - - self.post_pre = nn.Conv1d(1, filter_channels, 1) - self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - self.post_flows = nn.ModuleList() - self.post_flows.append(modules.ElementwiseAffine(2)) - for i in range(4): - self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.post_flows.append(modules.Flip()) - - self.pre = nn.Conv1d(in_channels, filter_channels, 1) - self.proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, filter_channels, 1) - - def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): - x = torch.detach(x) - x = self.pre(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.convs(x, x_mask) - x = self.proj(x) * x_mask - - if not reverse: - flows = self.flows - assert w is not None - - logdet_tot_q = 0 - h_w = self.post_pre(w) - h_w = self.post_convs(h_w, x_mask) - h_w = self.post_proj(h_w) * x_mask - e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask - z_q = e_q - for flow in self.post_flows: - z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) - logdet_tot_q += logdet_q - z_u, z1 = torch.split(z_q, [1, 1], 1) - u = torch.sigmoid(z_u) * x_mask - z0 = (w - u) * x_mask - logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1, 2]) - logq = torch.sum(-0.5 * (math.log(2 * math.pi) + (e_q ** 2)) * x_mask, [1, 2]) - logdet_tot_q - - logdet_tot = 0 - z0, logdet = self.log_flow(z0, x_mask) - logdet_tot += logdet - z = torch.cat([z0, z1], 1) - for flow in flows: - z, logdet = flow(z, x_mask, g=x, reverse=reverse) - logdet_tot = logdet_tot + logdet - nll = torch.sum(0.5 * (math.log(2 * math.pi) + (z ** 2)) * x_mask, [1, 2]) - logdet_tot - return nll + logq # [b] - else: - flows = list(reversed(self.flows)) - flows = flows[:-2] + [flows[-1]] # remove a useless vflow - z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale - for flow in flows: - z = flow(z, x_mask, g=x, reverse=reverse) - z0, z1 = torch.split(z, [1, 1], 1) - logw = z0 - return logw - - -class DurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0): - super().__init__() - - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.gin_channels = gin_channels - - self.drop = nn.Dropout(p_dropout) - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2) - self.norm_1 = modules.LayerNorm(filter_channels) - self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2) - self.norm_2 = modules.LayerNorm(filter_channels) - self.proj = nn.Conv1d(filter_channels, 1, 1) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, in_channels, 1) - - def forward(self, x, x_mask, g=None): - x = torch.detach(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.conv_1(x * x_mask) - x = torch.relu(x) - x = self.norm_1(x) - x = self.drop(x) - x = self.conv_2(x * x_mask) - x = torch.relu(x) - x = self.norm_2(x) - x = self.drop(x) - x = self.proj(x * x_mask) - return x * x_mask - - -class TextEncoder(nn.Module): - def __init__(self, - n_vocab, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - emotion_embedding): - super().__init__() - self.n_vocab = n_vocab - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emotion_embedding = emotion_embedding - - if self.n_vocab != 0: - self.emb = nn.Embedding(n_vocab, hidden_channels) - if emotion_embedding: - self.emo_proj = nn.Linear(1024, hidden_channels) - nn.init.normal_(self.emb.weight, 0.0, hidden_channels ** -0.5) - - self.encoder = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, emotion_embedding=None): - if self.n_vocab != 0: - x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] - if emotion_embedding is not None: - x = x + self.emo_proj(emotion_embedding.unsqueeze(1)) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return x, m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, - gin_channels=gin_channels, mean_only=True)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class PosteriorEncoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class Generator(torch.nn.Module): - def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, - upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) - resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append(weight_norm( - ConvTranspose1d(upsample_initial_channel // (2 ** i), upsample_initial_channel // (2 ** (i + 1)), - k, u, padding=(k - u) // 2))) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - n_vocab, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - n_speakers=0, - gin_channels=0, - use_sdp=True, - emotion_embedding=False, - **kwargs): - - super().__init__() - self.n_vocab = n_vocab - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.n_speakers = n_speakers - self.gin_channels = gin_channels - - self.use_sdp = use_sdp - - self.enc_p = TextEncoder(n_vocab, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - emotion_embedding) - self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, - upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) - self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, - gin_channels=gin_channels) - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) - - if use_sdp: - self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels) - else: - self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels) - - if n_speakers > 1: - self.emb_g = nn.Embedding(n_speakers, gin_channels) - - def forward(self, x, x_lengths, y, y_lengths, sid=None, emotion_embedding=None): - - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, emotion_embedding) - if self.n_speakers > 1: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - - with torch.no_grad(): - # negative cross-entropy - s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t] - neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s] - neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), - s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s] - neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4 - - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach() - - w = attn.sum(2) - if self.use_sdp: - l_length = self.dp(x, x_mask, w, g=g) - l_length = l_length / torch.sum(x_mask) - else: - logw_ = torch.log(w + 1e-6) * x_mask - logw = self.dp(x, x_mask, g=g) - l_length = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(x_mask) # for averaging - - # expand prior - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) - - z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size) - o = self.dec(z_slice, g=g) - return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None, - emotion_embedding=None): - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, emotion_embedding) - if self.n_speakers > 1: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - if self.use_sdp: - logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) - else: - logw = self.dp(x, x_mask, g=g) - w = torch.exp(logw) * x_mask * length_scale - w_ceil = torch.ceil(w) - y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() - y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype) - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = commons.generate_path(w_ceil, attn_mask) - - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, - 2) # [b, t', t], [b, t, d] -> [b, d, t'] - - z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale - z = self.flow(z_p, y_mask, g=g, reverse=True) - o = self.dec((z * y_mask)[:, :, :max_len], g=g) - return o, attn, y_mask, (z, z_p, m_p, logs_p) - - def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): - assert self.n_speakers > 1, "n_speakers have to be larger than 1." - g_src = self.emb_g(sid_src).unsqueeze(-1) - g_tgt = self.emb_g(sid_tgt).unsqueeze(-1) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src) - z_p = self.flow(z, y_mask, g=g_src) - z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True) - o_hat = self.dec(z_hat * y_mask, g=g_tgt) - return o_hat, y_mask, (z, z_p, z_hat) diff --git a/spaces/Dinoking/Guccio-AI-Designer/models/stylegan/stylegan_tf/metrics/metric_base.py b/spaces/Dinoking/Guccio-AI-Designer/models/stylegan/stylegan_tf/metrics/metric_base.py deleted file mode 100644 index 0db82adecb60260393eaf82bd991575d79085787..0000000000000000000000000000000000000000 --- a/spaces/Dinoking/Guccio-AI-Designer/models/stylegan/stylegan_tf/metrics/metric_base.py +++ /dev/null @@ -1,142 +0,0 @@ -# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. -# -# This work is licensed under the Creative Commons Attribution-NonCommercial -# 4.0 International License. To view a copy of this license, visit -# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to -# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. - -"""Common definitions for GAN metrics.""" - -import os -import time -import hashlib -import numpy as np -import tensorflow as tf -import dnnlib -import dnnlib.tflib as tflib - -import config -from training import misc -from training import dataset - -#---------------------------------------------------------------------------- -# Standard metrics. - -fid50k = dnnlib.EasyDict(func_name='metrics.frechet_inception_distance.FID', name='fid50k', num_images=50000, minibatch_per_gpu=8) -ppl_zfull = dnnlib.EasyDict(func_name='metrics.perceptual_path_length.PPL', name='ppl_zfull', num_samples=100000, epsilon=1e-4, space='z', sampling='full', minibatch_per_gpu=16) -ppl_wfull = dnnlib.EasyDict(func_name='metrics.perceptual_path_length.PPL', name='ppl_wfull', num_samples=100000, epsilon=1e-4, space='w', sampling='full', minibatch_per_gpu=16) -ppl_zend = dnnlib.EasyDict(func_name='metrics.perceptual_path_length.PPL', name='ppl_zend', num_samples=100000, epsilon=1e-4, space='z', sampling='end', minibatch_per_gpu=16) -ppl_wend = dnnlib.EasyDict(func_name='metrics.perceptual_path_length.PPL', name='ppl_wend', num_samples=100000, epsilon=1e-4, space='w', sampling='end', minibatch_per_gpu=16) -ls = dnnlib.EasyDict(func_name='metrics.linear_separability.LS', name='ls', num_samples=200000, num_keep=100000, attrib_indices=range(40), minibatch_per_gpu=4) -dummy = dnnlib.EasyDict(func_name='metrics.metric_base.DummyMetric', name='dummy') # for debugging - -#---------------------------------------------------------------------------- -# Base class for metrics. - -class MetricBase: - def __init__(self, name): - self.name = name - self._network_pkl = None - self._dataset_args = None - self._mirror_augment = None - self._results = [] - self._eval_time = None - - def run(self, network_pkl, run_dir=None, dataset_args=None, mirror_augment=None, num_gpus=1, tf_config=None, log_results=True): - self._network_pkl = network_pkl - self._dataset_args = dataset_args - self._mirror_augment = mirror_augment - self._results = [] - - if (dataset_args is None or mirror_augment is None) and run_dir is not None: - run_config = misc.parse_config_for_previous_run(run_dir) - self._dataset_args = dict(run_config['dataset']) - self._dataset_args['shuffle_mb'] = 0 - self._mirror_augment = run_config['train'].get('mirror_augment', False) - - time_begin = time.time() - with tf.Graph().as_default(), tflib.create_session(tf_config).as_default(): # pylint: disable=not-context-manager - _G, _D, Gs = misc.load_pkl(self._network_pkl) - self._evaluate(Gs, num_gpus=num_gpus) - self._eval_time = time.time() - time_begin - - if log_results: - result_str = self.get_result_str() - if run_dir is not None: - log = os.path.join(run_dir, 'metric-%s.txt' % self.name) - with dnnlib.util.Logger(log, 'a'): - print(result_str) - else: - print(result_str) - - def get_result_str(self): - network_name = os.path.splitext(os.path.basename(self._network_pkl))[0] - if len(network_name) > 29: - network_name = '...' + network_name[-26:] - result_str = '%-30s' % network_name - result_str += ' time %-12s' % dnnlib.util.format_time(self._eval_time) - for res in self._results: - result_str += ' ' + self.name + res.suffix + ' ' - result_str += res.fmt % res.value - return result_str - - def update_autosummaries(self): - for res in self._results: - tflib.autosummary.autosummary('Metrics/' + self.name + res.suffix, res.value) - - def _evaluate(self, Gs, num_gpus): - raise NotImplementedError # to be overridden by subclasses - - def _report_result(self, value, suffix='', fmt='%-10.4f'): - self._results += [dnnlib.EasyDict(value=value, suffix=suffix, fmt=fmt)] - - def _get_cache_file_for_reals(self, extension='pkl', **kwargs): - all_args = dnnlib.EasyDict(metric_name=self.name, mirror_augment=self._mirror_augment) - all_args.update(self._dataset_args) - all_args.update(kwargs) - md5 = hashlib.md5(repr(sorted(all_args.items())).encode('utf-8')) - dataset_name = self._dataset_args['tfrecord_dir'].replace('\\', '/').split('/')[-1] - return os.path.join(config.cache_dir, '%s-%s-%s.%s' % (md5.hexdigest(), self.name, dataset_name, extension)) - - def _iterate_reals(self, minibatch_size): - dataset_obj = dataset.load_dataset(data_dir=config.data_dir, **self._dataset_args) - while True: - images, _labels = dataset_obj.get_minibatch_np(minibatch_size) - if self._mirror_augment: - images = misc.apply_mirror_augment(images) - yield images - - def _iterate_fakes(self, Gs, minibatch_size, num_gpus): - while True: - latents = np.random.randn(minibatch_size, *Gs.input_shape[1:]) - fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True) - images = Gs.run(latents, None, output_transform=fmt, is_validation=True, num_gpus=num_gpus, assume_frozen=True) - yield images - -#---------------------------------------------------------------------------- -# Group of multiple metrics. - -class MetricGroup: - def __init__(self, metric_kwarg_list): - self.metrics = [dnnlib.util.call_func_by_name(**kwargs) for kwargs in metric_kwarg_list] - - def run(self, *args, **kwargs): - for metric in self.metrics: - metric.run(*args, **kwargs) - - def get_result_str(self): - return ' '.join(metric.get_result_str() for metric in self.metrics) - - def update_autosummaries(self): - for metric in self.metrics: - metric.update_autosummaries() - -#---------------------------------------------------------------------------- -# Dummy metric for debugging purposes. - -class DummyMetric(MetricBase): - def _evaluate(self, Gs, num_gpus): - _ = Gs, num_gpus - self._report_result(0.0) - -#---------------------------------------------------------------------------- diff --git a/spaces/ECCV2022/bytetrack/deploy/ncnn/cpp/src/STrack.cpp b/spaces/ECCV2022/bytetrack/deploy/ncnn/cpp/src/STrack.cpp deleted file mode 100644 index 8306165304355fe6d3d6e244207211757f21a646..0000000000000000000000000000000000000000 --- a/spaces/ECCV2022/bytetrack/deploy/ncnn/cpp/src/STrack.cpp +++ /dev/null @@ -1,192 +0,0 @@ -#include "STrack.h" - -STrack::STrack(vector tlwh_, float score) -{ - _tlwh.resize(4); - _tlwh.assign(tlwh_.begin(), tlwh_.end()); - - is_activated = false; - track_id = 0; - state = TrackState::New; - - tlwh.resize(4); - tlbr.resize(4); - - static_tlwh(); - static_tlbr(); - frame_id = 0; - tracklet_len = 0; - this->score = score; - start_frame = 0; -} - -STrack::~STrack() -{ -} - -void STrack::activate(byte_kalman::KalmanFilter &kalman_filter, int frame_id) -{ - this->kalman_filter = kalman_filter; - this->track_id = this->next_id(); - - vector _tlwh_tmp(4); - _tlwh_tmp[0] = this->_tlwh[0]; - _tlwh_tmp[1] = this->_tlwh[1]; - _tlwh_tmp[2] = this->_tlwh[2]; - _tlwh_tmp[3] = this->_tlwh[3]; - vector xyah = tlwh_to_xyah(_tlwh_tmp); - DETECTBOX xyah_box; - xyah_box[0] = xyah[0]; - xyah_box[1] = xyah[1]; - xyah_box[2] = xyah[2]; - xyah_box[3] = xyah[3]; - auto mc = this->kalman_filter.initiate(xyah_box); - this->mean = mc.first; - this->covariance = mc.second; - - static_tlwh(); - static_tlbr(); - - this->tracklet_len = 0; - this->state = TrackState::Tracked; - if (frame_id == 1) - { - this->is_activated = true; - } - //this->is_activated = true; - this->frame_id = frame_id; - this->start_frame = frame_id; -} - -void STrack::re_activate(STrack &new_track, int frame_id, bool new_id) -{ - vector xyah = tlwh_to_xyah(new_track.tlwh); - DETECTBOX xyah_box; - xyah_box[0] = xyah[0]; - xyah_box[1] = xyah[1]; - xyah_box[2] = xyah[2]; - xyah_box[3] = xyah[3]; - auto mc = this->kalman_filter.update(this->mean, this->covariance, xyah_box); - this->mean = mc.first; - this->covariance = mc.second; - - static_tlwh(); - static_tlbr(); - - this->tracklet_len = 0; - this->state = TrackState::Tracked; - this->is_activated = true; - this->frame_id = frame_id; - this->score = new_track.score; - if (new_id) - this->track_id = next_id(); -} - -void STrack::update(STrack &new_track, int frame_id) -{ - this->frame_id = frame_id; - this->tracklet_len++; - - vector xyah = tlwh_to_xyah(new_track.tlwh); - DETECTBOX xyah_box; - xyah_box[0] = xyah[0]; - xyah_box[1] = xyah[1]; - xyah_box[2] = xyah[2]; - xyah_box[3] = xyah[3]; - - auto mc = this->kalman_filter.update(this->mean, this->covariance, xyah_box); - this->mean = mc.first; - this->covariance = mc.second; - - static_tlwh(); - static_tlbr(); - - this->state = TrackState::Tracked; - this->is_activated = true; - - this->score = new_track.score; -} - -void STrack::static_tlwh() -{ - if (this->state == TrackState::New) - { - tlwh[0] = _tlwh[0]; - tlwh[1] = _tlwh[1]; - tlwh[2] = _tlwh[2]; - tlwh[3] = _tlwh[3]; - return; - } - - tlwh[0] = mean[0]; - tlwh[1] = mean[1]; - tlwh[2] = mean[2]; - tlwh[3] = mean[3]; - - tlwh[2] *= tlwh[3]; - tlwh[0] -= tlwh[2] / 2; - tlwh[1] -= tlwh[3] / 2; -} - -void STrack::static_tlbr() -{ - tlbr.clear(); - tlbr.assign(tlwh.begin(), tlwh.end()); - tlbr[2] += tlbr[0]; - tlbr[3] += tlbr[1]; -} - -vector STrack::tlwh_to_xyah(vector tlwh_tmp) -{ - vector tlwh_output = tlwh_tmp; - tlwh_output[0] += tlwh_output[2] / 2; - tlwh_output[1] += tlwh_output[3] / 2; - tlwh_output[2] /= tlwh_output[3]; - return tlwh_output; -} - -vector STrack::to_xyah() -{ - return tlwh_to_xyah(tlwh); -} - -vector STrack::tlbr_to_tlwh(vector &tlbr) -{ - tlbr[2] -= tlbr[0]; - tlbr[3] -= tlbr[1]; - return tlbr; -} - -void STrack::mark_lost() -{ - state = TrackState::Lost; -} - -void STrack::mark_removed() -{ - state = TrackState::Removed; -} - -int STrack::next_id() -{ - static int _count = 0; - _count++; - return _count; -} - -int STrack::end_frame() -{ - return this->frame_id; -} - -void STrack::multi_predict(vector &stracks, byte_kalman::KalmanFilter &kalman_filter) -{ - for (int i = 0; i < stracks.size(); i++) - { - if (stracks[i]->state != TrackState::Tracked) - { - stracks[i]->mean[7] = 0; - } - kalman_filter.predict(stracks[i]->mean, stracks[i]->covariance); - } -} \ No newline at end of file diff --git a/spaces/ECCV2022/bytetrack/tools/convert_ethz_to_coco.py b/spaces/ECCV2022/bytetrack/tools/convert_ethz_to_coco.py deleted file mode 100644 index ceb32810dd0c6970f93d819bcca886fd42451a61..0000000000000000000000000000000000000000 --- a/spaces/ECCV2022/bytetrack/tools/convert_ethz_to_coco.py +++ /dev/null @@ -1,59 +0,0 @@ -import os -import numpy as np -import json -from PIL import Image - -DATA_PATH = 'datasets/ETHZ/' -DATA_FILE_PATH = 'datasets/data_path/eth.train' -OUT_PATH = DATA_PATH + 'annotations/' - -def load_paths(data_path): - with open(data_path, 'r') as file: - img_files = file.readlines() - img_files = [x.replace('\n', '') for x in img_files] - img_files = list(filter(lambda x: len(x) > 0, img_files)) - label_files = [x.replace('images', 'labels_with_ids').replace('.png', '.txt').replace('.jpg', '.txt') for x in img_files] - return img_files, label_files - -if __name__ == '__main__': - if not os.path.exists(OUT_PATH): - os.mkdir(OUT_PATH) - - out_path = OUT_PATH + 'train.json' - out = {'images': [], 'annotations': [], 'categories': [{'id': 1, 'name': 'person'}]} - img_paths, label_paths = load_paths(DATA_FILE_PATH) - image_cnt = 0 - ann_cnt = 0 - video_cnt = 0 - for img_path, label_path in zip(img_paths, label_paths): - image_cnt += 1 - im = Image.open(img_path) - image_info = {'file_name': img_path, - 'id': image_cnt, - 'height': im.size[1], - 'width': im.size[0]} - out['images'].append(image_info) - # Load labels - if os.path.isfile(label_path): - labels0 = np.loadtxt(label_path, dtype=np.float32).reshape(-1, 6) - # Normalized xywh to pixel xyxy format - labels = labels0.copy() - labels[:, 2] = image_info['width'] * (labels0[:, 2] - labels0[:, 4] / 2) - labels[:, 3] = image_info['height'] * (labels0[:, 3] - labels0[:, 5] / 2) - labels[:, 4] = image_info['width'] * labels0[:, 4] - labels[:, 5] = image_info['height'] * labels0[:, 5] - else: - labels = np.array([]) - for i in range(len(labels)): - ann_cnt += 1 - fbox = labels[i, 2:6].tolist() - ann = {'id': ann_cnt, - 'category_id': 1, - 'image_id': image_cnt, - 'track_id': -1, - 'bbox': fbox, - 'area': fbox[2] * fbox[3], - 'iscrowd': 0} - out['annotations'].append(ann) - print('loaded train for {} images and {} samples'.format(len(out['images']), len(out['annotations']))) - json.dump(out, open(out_path, 'w')) \ No newline at end of file diff --git a/spaces/ECCV2022/storydalle/dalle/models/stage1/vqgan.py b/spaces/ECCV2022/storydalle/dalle/models/stage1/vqgan.py deleted file mode 100644 index 7f03a4d02aa579275d58290bc4f3714fd58bfe00..0000000000000000000000000000000000000000 --- a/spaces/ECCV2022/storydalle/dalle/models/stage1/vqgan.py +++ /dev/null @@ -1,93 +0,0 @@ -# ------------------------------------------------------------------------------------ -# Modified from VQGAN (https://github.com/CompVis/taming-transformers) -# Copyright (c) 2020 Patrick Esser and Robin Rombach and Björn Ommer. All Rights Reserved. -# ------------------------------------------------------------------------------------ - -import torch -import torch.nn as nn -from typing import List, Tuple, Optional -from einops import rearrange -from omegaconf import OmegaConf -from .layers import Encoder, Decoder - - -class VectorQuantizer(nn.Module): - """ - Simplified VectorQuantizer in the original VQGAN repository - by removing unncessary modules for sampling - """ - def __init__(self, dim: int, n_embed: int, beta: float) -> None: - super().__init__() - self.n_embed = n_embed - self.dim = dim - self.beta = beta - - self.embedding = nn.Embedding(self.n_embed, self.dim) - self.embedding.weight.data.uniform_(-1.0 / self.n_embed, 1.0 / self.n_embed) - - def forward(self, - z: torch.FloatTensor) -> Tuple[torch.FloatTensor, torch.LongTensor]: - z = rearrange(z, 'b c h w -> b h w c').contiguous() # [B,C,H,W] -> [B,H,W,C] - z_flattened = z.view(-1, self.dim) - - d = torch.sum(z_flattened ** 2, dim=1, keepdim=True) + \ - torch.sum(self.embedding.weight**2, dim=1) - 2 * \ - torch.einsum('bd,dn->bn', z_flattened, rearrange(self.embedding.weight, 'n d -> d n')) - - min_encoding_indices = torch.argmin(d, dim=1) - z_q = self.embedding(min_encoding_indices).view(z.shape) - return z_q, min_encoding_indices - - def get_codebook_entry(self, - indices: torch.LongTensor, - shape: Optional[List[int]] = None) -> torch.FloatTensor: - z_q = self.embedding(indices) - if shape is not None: - z_q = z_q.view(shape) - z_q = z_q.permute(0, 3, 1, 2).contiguous() - return z_q - - -class VQGAN(nn.Module): - def __init__(self, n_embed: int, embed_dim: int, hparams: OmegaConf) -> None: - super().__init__() - self.encoder = Encoder(**hparams) - self.decoder = Decoder(**hparams) - self.quantize = VectorQuantizer(dim=embed_dim, n_embed=n_embed, beta=0.25) - self.quant_conv = torch.nn.Conv2d(hparams.z_channels, embed_dim, 1) - self.post_quant_conv = torch.nn.Conv2d(embed_dim, hparams.z_channels, 1) - self.latent_dim = hparams.attn_resolutions[0] - - def forward(self, x: torch.FloatTensor) -> torch.FloatTensor: - quant = self.encode(x) - dec = self.decode(quant) - return dec - - def encode(self, x: torch.FloatTensor) -> torch.FloatTensor: - h = self.encoder(x) - h = self.quant_conv(h) - quant = self.quantize(h)[0] - quant = rearrange(quant, 'b h w c -> b c h w').contiguous() - return quant - - def decode(self, quant: torch.FloatTensor) -> torch.FloatTensor: - quant = self.post_quant_conv(quant) - dec = self.decoder(quant) - return dec - - def decode_code(self, code: torch.LongTensor) -> torch.FloatTensor: - quant = self.quantize.get_codebook_entry(code) - quant = quant.permute(0, 3, 1, 2) - dec = self.decode(quant) - return dec - - def get_codes(self, x: torch.FloatTensor) -> torch.LongTensor: - h = self.encoder(x) - h = self.quant_conv(h) - codes = self.quantize(h)[1].view(x.shape[0], self.latent_dim ** 2) - return codes - - def from_ckpt(self, path: str, strict: bool = True) -> None: - ckpt = torch.load(path, map_location='cpu')['state_dict'] - self.load_state_dict(ckpt, strict=strict) - print(f'{path} successfully restored..') diff --git a/spaces/EronSamez/RVC_HFmeu/infer/lib/uvr5_pack/lib_v5/layers_123821KB.py b/spaces/EronSamez/RVC_HFmeu/infer/lib/uvr5_pack/lib_v5/layers_123821KB.py deleted file mode 100644 index 4fc1b5cb85a3327f60cbb9f5deffbeeaaac516ad..0000000000000000000000000000000000000000 --- a/spaces/EronSamez/RVC_HFmeu/infer/lib/uvr5_pack/lib_v5/layers_123821KB.py +++ /dev/null @@ -1,118 +0,0 @@ -import torch -import torch.nn.functional as F -from torch import nn - -from . import spec_utils - - -class Conv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(Conv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nout, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - bias=False, - ), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class SeperableConv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(SeperableConv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nin, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - groups=nin, - bias=False, - ), - nn.Conv2d(nin, nout, kernel_size=1, bias=False), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class Encoder(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): - super(Encoder, self).__init__() - self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ) - - def __call__(self, x): - skip = self.conv1(x) - h = self.conv2(skip) - - return h, skip - - -class Decoder(nn.Module): - def __init__( - self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False - ): - super(Decoder, self).__init__() - self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.dropout = nn.Dropout2d(0.1) if dropout else None - - def __call__(self, x, skip=None): - x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) - if skip is not None: - skip = spec_utils.crop_center(skip, x) - x = torch.cat([x, skip], dim=1) - h = self.conv(x) - - if self.dropout is not None: - h = self.dropout(h) - - return h - - -class ASPPModule(nn.Module): - def __init__(self, nin, nout, dilations=(4, 8, 16), activ=nn.ReLU): - super(ASPPModule, self).__init__() - self.conv1 = nn.Sequential( - nn.AdaptiveAvgPool2d((1, None)), - Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ), - ) - self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ) - self.conv3 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[0], dilations[0], activ=activ - ) - self.conv4 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[1], dilations[1], activ=activ - ) - self.conv5 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.bottleneck = nn.Sequential( - Conv2DBNActiv(nin * 5, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1) - ) - - def forward(self, x): - _, _, h, w = x.size() - feat1 = F.interpolate( - self.conv1(x), size=(h, w), mode="bilinear", align_corners=True - ) - feat2 = self.conv2(x) - feat3 = self.conv3(x) - feat4 = self.conv4(x) - feat5 = self.conv5(x) - out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1) - bottle = self.bottleneck(out) - return bottle diff --git a/spaces/FFZG-cleopatra/latvian-twitter-sentiment-classifier/dataset.py b/spaces/FFZG-cleopatra/latvian-twitter-sentiment-classifier/dataset.py deleted file mode 100644 index f9030fbcb4b3fe25107c718abef5eda3bb28f031..0000000000000000000000000000000000000000 --- a/spaces/FFZG-cleopatra/latvian-twitter-sentiment-classifier/dataset.py +++ /dev/null @@ -1,40 +0,0 @@ -import config -import torch - - -class BERTDataset: - def __init__(self, review, target): - self.review = review - self.target = target - self.tokenizer = config.TOKENIZER - self.max_len = config.MAX_LEN - - def __len__(self): - return len(self.review) - - def __getitem__(self, item): - review = str(self.review[item]) - review = " ".join(review.split()) - - inputs = self.tokenizer.encode_plus( - review, - None, - add_special_tokens=True, - max_length=self.max_len - ) - - ids = inputs["input_ids"] - mask = inputs["attention_mask"] - token_type_ids = inputs["token_type_ids"] - - padding_length = self.max_len - len(ids) - ids = ids + ([0] * padding_length) - mask = mask + ([0] * padding_length) - token_type_ids = token_type_ids + ([0] * padding_length) - - return { - 'ids': torch.tensor(ids, dtype=torch.long), - 'mask': torch.tensor(mask, dtype=torch.long), - 'token_type_ids': torch.tensor(token_type_ids, dtype=torch.long), - 'targets': torch.tensor(self.target[item], dtype=torch.float) - } diff --git a/spaces/Felix123456/bingo/src/app/layout.tsx b/spaces/Felix123456/bingo/src/app/layout.tsx deleted file mode 100644 index 8b5122759987177b8dc4e4356d1d06cea25c15ea..0000000000000000000000000000000000000000 --- a/spaces/Felix123456/bingo/src/app/layout.tsx +++ /dev/null @@ -1,47 +0,0 @@ -import { Metadata } from 'next' -import { Toaster } from 'react-hot-toast' -import { TailwindIndicator } from '@/components/tailwind-indicator' -import { Providers } from '@/components/providers' -import { Header } from '@/components/header' - -import '@/app/globals.scss' - - -export const metadata: Metadata = { - title: { - default: 'Bing AI Chatbot', - template: `%s - Bing AI Chatbot` - }, - description: 'Bing AI Chatbot Web App.', - themeColor: [ - { media: '(prefers-color-scheme: light)', color: 'white' }, - { media: '(prefers-color-scheme: dark)', color: 'dark' } - ], - icons: { - icon: '/favicon.ico', - shortcut: '../assets/images/logo.svg', - apple: '../assets/images/logo.svg' - } -} - -interface RootLayoutProps { - children: React.ReactNode -} - -export default function RootLayout({ children }: RootLayoutProps) { - return ( - - - - -
- {/* @ts-ignore */} -
-
{children}
-
- -
- - - ) -} diff --git a/spaces/Felix123456/bingo/src/components/toaster.tsx b/spaces/Felix123456/bingo/src/components/toaster.tsx deleted file mode 100644 index 4d2693460b61307a1d4c127fd01df9bee16e59ff..0000000000000000000000000000000000000000 --- a/spaces/Felix123456/bingo/src/components/toaster.tsx +++ /dev/null @@ -1,3 +0,0 @@ -'use client' - -export { Toaster } from 'react-hot-toast' diff --git a/spaces/FelixLuoX/codeformer/CodeFormer/basicsr/data/data_sampler.py b/spaces/FelixLuoX/codeformer/CodeFormer/basicsr/data/data_sampler.py deleted file mode 100644 index 575452d9f844a928f7f42296c81635cfbadec7c2..0000000000000000000000000000000000000000 --- a/spaces/FelixLuoX/codeformer/CodeFormer/basicsr/data/data_sampler.py +++ /dev/null @@ -1,48 +0,0 @@ -import math -import torch -from torch.utils.data.sampler import Sampler - - -class EnlargedSampler(Sampler): - """Sampler that restricts data loading to a subset of the dataset. - - Modified from torch.utils.data.distributed.DistributedSampler - Support enlarging the dataset for iteration-based training, for saving - time when restart the dataloader after each epoch - - Args: - dataset (torch.utils.data.Dataset): Dataset used for sampling. - num_replicas (int | None): Number of processes participating in - the training. It is usually the world_size. - rank (int | None): Rank of the current process within num_replicas. - ratio (int): Enlarging ratio. Default: 1. - """ - - def __init__(self, dataset, num_replicas, rank, ratio=1): - self.dataset = dataset - self.num_replicas = num_replicas - self.rank = rank - self.epoch = 0 - self.num_samples = math.ceil(len(self.dataset) * ratio / self.num_replicas) - self.total_size = self.num_samples * self.num_replicas - - def __iter__(self): - # deterministically shuffle based on epoch - g = torch.Generator() - g.manual_seed(self.epoch) - indices = torch.randperm(self.total_size, generator=g).tolist() - - dataset_size = len(self.dataset) - indices = [v % dataset_size for v in indices] - - # subsample - indices = indices[self.rank:self.total_size:self.num_replicas] - assert len(indices) == self.num_samples - - return iter(indices) - - def __len__(self): - return self.num_samples - - def set_epoch(self, epoch): - self.epoch = epoch diff --git a/spaces/FelixLuoX/codeformer/CodeFormer/facelib/detection/yolov5face/utils/torch_utils.py b/spaces/FelixLuoX/codeformer/CodeFormer/facelib/detection/yolov5face/utils/torch_utils.py deleted file mode 100644 index af2d06587b2d07b2eab199a8484380fde1de5c3c..0000000000000000000000000000000000000000 --- a/spaces/FelixLuoX/codeformer/CodeFormer/facelib/detection/yolov5face/utils/torch_utils.py +++ /dev/null @@ -1,40 +0,0 @@ -import torch -from torch import nn - - -def fuse_conv_and_bn(conv, bn): - # Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/ - fusedconv = ( - nn.Conv2d( - conv.in_channels, - conv.out_channels, - kernel_size=conv.kernel_size, - stride=conv.stride, - padding=conv.padding, - groups=conv.groups, - bias=True, - ) - .requires_grad_(False) - .to(conv.weight.device) - ) - - # prepare filters - w_conv = conv.weight.clone().view(conv.out_channels, -1) - w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) - fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size())) - - # prepare spatial bias - b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias - b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) - fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) - - return fusedconv - - -def copy_attr(a, b, include=(), exclude=()): - # Copy attributes from b to a, options to only include [...] and to exclude [...] - for k, v in b.__dict__.items(): - if (include and k not in include) or k.startswith("_") or k in exclude: - continue - - setattr(a, k, v) diff --git a/spaces/FoxMeo/fire-detector/utils/torch_utils.py b/spaces/FoxMeo/fire-detector/utils/torch_utils.py deleted file mode 100644 index 1e631b555508457a4944c11a479176463719c0e8..0000000000000000000000000000000000000000 --- a/spaces/FoxMeo/fire-detector/utils/torch_utils.py +++ /dev/null @@ -1,374 +0,0 @@ -# YOLOR PyTorch utils - -import datetime -import logging -import math -import os -import platform -import subprocess -import time -from contextlib import contextmanager -from copy import deepcopy -from pathlib import Path - -import torch -import torch.backends.cudnn as cudnn -import torch.nn as nn -import torch.nn.functional as F -import torchvision - -try: - import thop # for FLOPS computation -except ImportError: - thop = None -logger = logging.getLogger(__name__) - - -@contextmanager -def torch_distributed_zero_first(local_rank: int): - """ - Decorator to make all processes in distributed training wait for each local_master to do something. - """ - if local_rank not in [-1, 0]: - torch.distributed.barrier() - yield - if local_rank == 0: - torch.distributed.barrier() - - -def init_torch_seeds(seed=0): - # Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html - torch.manual_seed(seed) - if seed == 0: # slower, more reproducible - cudnn.benchmark, cudnn.deterministic = False, True - else: # faster, less reproducible - cudnn.benchmark, cudnn.deterministic = True, False - - -def date_modified(path=__file__): - # return human-readable file modification date, i.e. '2021-3-26' - t = datetime.datetime.fromtimestamp(Path(path).stat().st_mtime) - return f'{t.year}-{t.month}-{t.day}' - - -def git_describe(path=Path(__file__).parent): # path must be a directory - # return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe - s = f'git -C {path} describe --tags --long --always' - try: - return subprocess.check_output(s, shell=True, stderr=subprocess.STDOUT).decode()[:-1] - except subprocess.CalledProcessError as e: - return '' # not a git repository - - -def select_device(device='', batch_size=None): - # device = 'cpu' or '0' or '0,1,2,3' - s = f'YOLOR 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string - cpu = device.lower() == 'cpu' - if cpu: - os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False - elif device: # non-cpu device requested - os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability - - cuda = not cpu and torch.cuda.is_available() - if cuda: - n = torch.cuda.device_count() - if n > 1 and batch_size: # check that batch_size is compatible with device_count - assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}' - space = ' ' * len(s) - for i, d in enumerate(device.split(',') if device else range(n)): - p = torch.cuda.get_device_properties(i) - s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\n" # bytes to MB - else: - s += 'CPU\n' - - logger.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe - return torch.device('cuda:0' if cuda else 'cpu') - - -def time_synchronized(): - # pytorch-accurate time - if torch.cuda.is_available(): - torch.cuda.synchronize() - return time.time() - - -def profile(x, ops, n=100, device=None): - # profile a pytorch module or list of modules. Example usage: - # x = torch.randn(16, 3, 640, 640) # input - # m1 = lambda x: x * torch.sigmoid(x) - # m2 = nn.SiLU() - # profile(x, [m1, m2], n=100) # profile speed over 100 iterations - - device = device or torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') - x = x.to(device) - x.requires_grad = True - print(torch.__version__, device.type, torch.cuda.get_device_properties(0) if device.type == 'cuda' else '') - print(f"\n{'Params':>12s}{'GFLOPS':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}") - for m in ops if isinstance(ops, list) else [ops]: - m = m.to(device) if hasattr(m, 'to') else m # device - m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m # type - dtf, dtb, t = 0., 0., [0., 0., 0.] # dt forward, backward - try: - flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPS - except: - flops = 0 - - for _ in range(n): - t[0] = time_synchronized() - y = m(x) - t[1] = time_synchronized() - try: - _ = y.sum().backward() - t[2] = time_synchronized() - except: # no backward method - t[2] = float('nan') - dtf += (t[1] - t[0]) * 1000 / n # ms per op forward - dtb += (t[2] - t[1]) * 1000 / n # ms per op backward - - s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' - s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list' - p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters - print(f'{p:12}{flops:12.4g}{dtf:16.4g}{dtb:16.4g}{str(s_in):>24s}{str(s_out):>24s}') - - -def is_parallel(model): - return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel) - - -def intersect_dicts(da, db, exclude=()): - # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values - return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape} - - -def initialize_weights(model): - for m in model.modules(): - t = type(m) - if t is nn.Conv2d: - pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif t is nn.BatchNorm2d: - m.eps = 1e-3 - m.momentum = 0.03 - elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]: - m.inplace = True - - -def find_modules(model, mclass=nn.Conv2d): - # Finds layer indices matching module class 'mclass' - return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)] - - -def sparsity(model): - # Return global model sparsity - a, b = 0., 0. - for p in model.parameters(): - a += p.numel() - b += (p == 0).sum() - return b / a - - -def prune(model, amount=0.3): - # Prune model to requested global sparsity - import torch.nn.utils.prune as prune - print('Pruning model... ', end='') - for name, m in model.named_modules(): - if isinstance(m, nn.Conv2d): - prune.l1_unstructured(m, name='weight', amount=amount) # prune - prune.remove(m, 'weight') # make permanent - print(' %.3g global sparsity' % sparsity(model)) - - -def fuse_conv_and_bn(conv, bn): - # Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/ - fusedconv = nn.Conv2d(conv.in_channels, - conv.out_channels, - kernel_size=conv.kernel_size, - stride=conv.stride, - padding=conv.padding, - groups=conv.groups, - bias=True).requires_grad_(False).to(conv.weight.device) - - # prepare filters - w_conv = conv.weight.clone().view(conv.out_channels, -1) - w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) - fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape)) - - # prepare spatial bias - b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias - b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) - fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) - - return fusedconv - - -def model_info(model, verbose=False, img_size=640): - # Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320] - n_p = sum(x.numel() for x in model.parameters()) # number parameters - n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients - if verbose: - print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma')) - for i, (name, p) in enumerate(model.named_parameters()): - name = name.replace('module_list.', '') - print('%5g %40s %9s %12g %20s %10.3g %10.3g' % - (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())) - - try: # FLOPS - from thop import profile - stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 - img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input - flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPS - img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float - fs = ', %.1f GFLOPS' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPS - except (ImportError, Exception): - fs = '' - - logger.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}") - - -def load_classifier(name='resnet101', n=2): - # Loads a pretrained model reshaped to n-class output - model = torchvision.models.__dict__[name](pretrained=True) - - # ResNet model properties - # input_size = [3, 224, 224] - # input_space = 'RGB' - # input_range = [0, 1] - # mean = [0.485, 0.456, 0.406] - # std = [0.229, 0.224, 0.225] - - # Reshape output to n classes - filters = model.fc.weight.shape[1] - model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True) - model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True) - model.fc.out_features = n - return model - - -def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) - # scales img(bs,3,y,x) by ratio constrained to gs-multiple - if ratio == 1.0: - return img - else: - h, w = img.shape[2:] - s = (int(h * ratio), int(w * ratio)) # new size - img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize - if not same_shape: # pad/crop img - h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)] - return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean - - -def copy_attr(a, b, include=(), exclude=()): - # Copy attributes from b to a, options to only include [...] and to exclude [...] - for k, v in b.__dict__.items(): - if (len(include) and k not in include) or k.startswith('_') or k in exclude: - continue - else: - setattr(a, k, v) - - -class ModelEMA: - """ Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models - Keep a moving average of everything in the model state_dict (parameters and buffers). - This is intended to allow functionality like - https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage - A smoothed version of the weights is necessary for some training schemes to perform well. - This class is sensitive where it is initialized in the sequence of model init, - GPU assignment and distributed training wrappers. - """ - - def __init__(self, model, decay=0.9999, updates=0): - # Create EMA - self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA - # if next(model.parameters()).device.type != 'cpu': - # self.ema.half() # FP16 EMA - self.updates = updates # number of EMA updates - self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs) - for p in self.ema.parameters(): - p.requires_grad_(False) - - def update(self, model): - # Update EMA parameters - with torch.no_grad(): - self.updates += 1 - d = self.decay(self.updates) - - msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict - for k, v in self.ema.state_dict().items(): - if v.dtype.is_floating_point: - v *= d - v += (1. - d) * msd[k].detach() - - def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): - # Update EMA attributes - copy_attr(self.ema, model, include, exclude) - - -class BatchNormXd(torch.nn.modules.batchnorm._BatchNorm): - def _check_input_dim(self, input): - # The only difference between BatchNorm1d, BatchNorm2d, BatchNorm3d, etc - # is this method that is overwritten by the sub-class - # This original goal of this method was for tensor sanity checks - # If you're ok bypassing those sanity checks (eg. if you trust your inference - # to provide the right dimensional inputs), then you can just use this method - # for easy conversion from SyncBatchNorm - # (unfortunately, SyncBatchNorm does not store the original class - if it did - # we could return the one that was originally created) - return - -def revert_sync_batchnorm(module): - # this is very similar to the function that it is trying to revert: - # https://github.com/pytorch/pytorch/blob/c8b3686a3e4ba63dc59e5dcfe5db3430df256833/torch/nn/modules/batchnorm.py#L679 - module_output = module - if isinstance(module, torch.nn.modules.batchnorm.SyncBatchNorm): - new_cls = BatchNormXd - module_output = BatchNormXd(module.num_features, - module.eps, module.momentum, - module.affine, - module.track_running_stats) - if module.affine: - with torch.no_grad(): - module_output.weight = module.weight - module_output.bias = module.bias - module_output.running_mean = module.running_mean - module_output.running_var = module.running_var - module_output.num_batches_tracked = module.num_batches_tracked - if hasattr(module, "qconfig"): - module_output.qconfig = module.qconfig - for name, child in module.named_children(): - module_output.add_module(name, revert_sync_batchnorm(child)) - del module - return module_output - - -class TracedModel(nn.Module): - - def __init__(self, model=None, device=None, img_size=(640,640)): - super(TracedModel, self).__init__() - - print(" Convert model to Traced-model... ") - self.stride = model.stride - self.names = model.names - self.model = model - - self.model = revert_sync_batchnorm(self.model) - self.model.to('cpu') - self.model.eval() - - self.detect_layer = self.model.model[-1] - self.model.traced = True - - rand_example = torch.rand(1, 3, img_size, img_size) - - traced_script_module = torch.jit.trace(self.model, rand_example, strict=False) - #traced_script_module = torch.jit.script(self.model) - traced_script_module.save("traced_model.pt") - print(" traced_script_module saved! ") - self.model = traced_script_module - self.model.to(device) - self.detect_layer.to(device) - print(" model is traced! \n") - - def forward(self, x, augment=False, profile=False): - out = self.model(x) - out = self.detect_layer(out) - return out \ No newline at end of file diff --git a/spaces/FrancXPT/stabilityai-stable-diffusion-2-1/README.md b/spaces/FrancXPT/stabilityai-stable-diffusion-2-1/README.md deleted file mode 100644 index a7d89d9e6602a9ddbb7f5092b1c6692fe362f265..0000000000000000000000000000000000000000 --- a/spaces/FrancXPT/stabilityai-stable-diffusion-2-1/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Stabilityai Stable Diffusion 2 1 -emoji: 📈 -colorFrom: yellow -colorTo: purple -sdk: gradio -sdk_version: 3.18.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/GT4SD/protein_properties/model_cards/article.md b/spaces/GT4SD/protein_properties/model_cards/article.md deleted file mode 100644 index 7230ba8a29575c08a3135ec262581199b9fedf8f..0000000000000000000000000000000000000000 --- a/spaces/GT4SD/protein_properties/model_cards/article.md +++ /dev/null @@ -1,35 +0,0 @@ -# Supported molecular properties - - -### Instability -Compute the protein instability as presented in [Guruprasad et al. (*Protein Engineering, Design and Selection*; 1990)](https://academic.oup.com/peds/article-abstract/4/2/155/1491271). - -### Aromaticity -Compute the protein aromaticity as presented in [Lobry et al. (*Nucleic Acid Research*; 1994)](https://academic.oup.com/nar/article-abstract/22/15/3174/1087817). - -### Isoelectric point -Computes the isoelectric point of every residue and aggregates. - -### Hydrophobicity -"Computes the hydrophobicity of a protein, relative freq. of **A,C,F,I,L,M & V**. - -### Aliphaticity -Compute the aliphatic index of globular proteins as presented in [Ikai (*The Journal of Biochemistry*; 1980)](https://academic.oup.com/jb/article-abstract/88/6/1895/773432). - -### Charge -Compute the charge of a protein, based on a boolean for the amide (whether the sequences are C-terminally amidated) and a pH value; as presented in [Bjellqvist, (*Electrophoresis*; 1993)](https://analyticalsciencejournals.onlinelibrary.wiley.com/doi/abs/10.1002/elps.11501401163). - -### Charge Density -Computes the charge density of a protein, based on a boolean for the amide (whether the sequences are C-terminally amidated) and a pH value; as presented in [Bjellqvist, (*Electrophoresis*; 1993)](https://analyticalsciencejournals.onlinelibrary.wiley.com/doi/abs/10.1002/elps.11501401163). - -### Boman index -Compute the protein aromaticity as presented in [Boman (*Journal of internal medicine*; 2003)](https://onlinelibrary.wiley.com/doi/full/10.1046/j.1365-2796.2003.01228.x). - -### Protein weight -Compute the molecular weight of a protein with [RDKit](https://www.rdkit.org/docs/GettingStartedInPython.html). - -### Length -Retrieves the number of residues of a protein. - -Moreover, GT4SD also includes properties on other entities such as [molecules](https://gt4sd.github.io/gt4sd-core/api/gt4sd.properties.molecules.html) and [crystals](https://gt4sd.github.io/gt4sd-core/api/gt4sd.properties.crystals.html). -The GT4SD web app for molecules can be found [here](https://huggingface.co/spaces/GT4SD/molecular_properties) diff --git a/spaces/Gaurav261/medical_image_classification/app.py b/spaces/Gaurav261/medical_image_classification/app.py deleted file mode 100644 index a699bc5b3c2e987102ca93e0ee28d601e0a93d02..0000000000000000000000000000000000000000 --- a/spaces/Gaurav261/medical_image_classification/app.py +++ /dev/null @@ -1,7 +0,0 @@ -import gradio as gr - -def greet(name): - return "Hello " + name + "!!" - -iface = gr.Interface(fn=greet, inputs="text", outputs="text") -iface.launch() \ No newline at end of file diff --git a/spaces/Gen-Sim/Gen-Sim/gensim/evaluate_finetune_model.py b/spaces/Gen-Sim/Gen-Sim/gensim/evaluate_finetune_model.py deleted file mode 100644 index 0f301476443f740b9311a4d208b35ce0265b003d..0000000000000000000000000000000000000000 --- a/spaces/Gen-Sim/Gen-Sim/gensim/evaluate_finetune_model.py +++ /dev/null @@ -1,78 +0,0 @@ -import openai -import argparse -import os -from cliport import tasks -from cliport.dataset import RavensDataset -from cliport.environments.environment import Environment - -from pygments import highlight -from pygments.lexers import PythonLexer -from pygments.formatters import TerminalFormatter - -import time -import random -import json -import traceback -import pybullet as p -import IPython -from gensim.topdown_sim_runner import TopDownSimulationRunner -import hydra -from datetime import datetime - -from gensim.memory import Memory -from gensim.utils import set_gpt_model, clear_messages, format_finetune_prompt - -@hydra.main(config_path='../cliport/cfg', config_name='data', version_base="1.2") -def main(cfg): - # parser.add_argument("--task", type=str, default='build-car') - # parser.add_argument("--model", type=str, default='davinci:ft-wang-lab:gensim-2023-08-04-18-28-34') - - task = cfg.target_task - model = cfg.target_model - prompt = format_finetune_prompt(task) - - openai.api_key = cfg['openai_key'] - model_time = datetime.now().strftime("%d_%m_%Y_%H:%M:%S") - cfg['model_output_dir'] = os.path.join(cfg['output_folder'], cfg['prompt_folder'] + "_" + cfg.target_model) - if 'seed' in cfg: - cfg['model_output_dir'] = cfg['model_output_dir'] + f"_{cfg['seed']}" - - set_gpt_model(cfg['gpt_model']) - memory = Memory(cfg) - simulation_runner = TopDownSimulationRunner(cfg, memory) - - for trial_i in range(cfg['trials']): - if 'new_finetuned_model' in cfg or 'gpt-3.5-turbo' in cfg.target_model: - # the chat completion version - response = openai.ChatCompletion.create( - model=model, - messages=[{"role": "system", "content": "You are an AI in robot simulation code and task design."}, - {"role": "user", "content": prompt}], - temperature=0.01, - max_tokens=1000, - n=1, - stop=["\n```\n"]) - res = response["choices"][0]["message"]["content"] - else: - response = openai.Completion.create( - model=model, - prompt=prompt, - temperature=0, - max_tokens=1800, - stop=["\n```\n"]) - res = response["choices"][0]["text"] - - simulation_runner.task_creation(res) - simulation_runner.simulate_task() - simulation_runner.print_current_stats() - - simulation_runner.save_stats() - - - - -# load few shot prompts - - -if __name__ == "__main__": - main() diff --git a/spaces/GeorgeOrville/bingo/src/app/loading.css b/spaces/GeorgeOrville/bingo/src/app/loading.css deleted file mode 100644 index eaaab6a86a228334c4eca3c5368ae6f0f593d405..0000000000000000000000000000000000000000 --- a/spaces/GeorgeOrville/bingo/src/app/loading.css +++ /dev/null @@ -1,68 +0,0 @@ -::-webkit-scrollbar { - width: 10px; - height: 10px; - display: none; -} - -::-webkit-scrollbar-button:start:decrement, -::-webkit-scrollbar-button:end:increment { - height: 30px; - background-color: transparent; -} - -::-webkit-scrollbar-track-piece { - background-color: #3b3b3b; - -webkit-border-radius: 16px; -} - -::-webkit-scrollbar-thumb:vertical { - height: 50px; - background-color: #666; - border: 1px solid #eee; - -webkit-border-radius: 6px; -} - -/* loading start */ -.loading-spinner { - display: flex; - justify-content: center; - align-items: center; - height: 100vh; - opacity: 1; - transition: opacity .8s ease-out; -} - -.loading-spinner.hidden { - opacity: 0; -} - -.loading-spinner>div { - width: 30px; - height: 30px; - background: linear-gradient(90deg, #2870EA 10.79%, #1B4AEF 87.08%); - - border-radius: 100%; - display: inline-block; - animation: sk-bouncedelay 1.4s infinite ease-in-out both; -} - -.loading-spinner .bounce1 { - animation-delay: -0.32s; -} - -.loading-spinner .bounce2 { - animation-delay: -0.16s; -} - -@keyframes sk-bouncedelay { - - 0%, - 80%, - 100% { - transform: scale(0); - } - - 40% { - transform: scale(1.0); - } -} diff --git a/spaces/Gradio-Blocks/anime-colorization/pixel_guide_diffusion/train_util.py b/spaces/Gradio-Blocks/anime-colorization/pixel_guide_diffusion/train_util.py deleted file mode 100644 index 1867604145736352dc51ab05b6caae8b541a6ebb..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/anime-colorization/pixel_guide_diffusion/train_util.py +++ /dev/null @@ -1,356 +0,0 @@ -import copy -import functools -import os - -import blobfile as bf -import numpy as np -import torch as th -import torch.distributed as dist -from torch.nn.parallel.distributed import DistributedDataParallel as DDP -from torch.optim import AdamW - -from . import dist_util, logger -from .fp16_util import ( - make_master_params, - master_params_to_model_params, - model_grads_to_master_grads, - unflatten_master_params, - zero_grad, -) -from .nn import update_ema -from .resample import LossAwareSampler, UniformSampler - -# For ImageNet experiments, this was a good default value. -# We found that the lg_loss_scale quickly climbed to -# 20-21 within the first ~1K steps of training. -INITIAL_LOG_LOSS_SCALE = 20.0 - - -class TrainLoop: - def __init__( - self, - *, - model, - diffusion, - data, - batch_size, - microbatch, - lr, - ema_rate, - log_interval, - save_interval, - resume_checkpoint, - use_fp16=False, - fp16_scale_growth=1e-3, - schedule_sampler=None, - weight_decay=0.0, - lr_anneal_steps=0, - ): - self.model = model - self.diffusion = diffusion - self.data = data - self.batch_size = batch_size - self.microbatch = microbatch if microbatch > 0 else batch_size - self.lr = lr - self.ema_rate = ( - [ema_rate] - if isinstance(ema_rate, float) - else [float(x) for x in ema_rate.split(",")] - ) - self.log_interval = log_interval - self.save_interval = save_interval - self.resume_checkpoint = resume_checkpoint - self.use_fp16 = use_fp16 - self.fp16_scale_growth = fp16_scale_growth - self.schedule_sampler = schedule_sampler or UniformSampler(diffusion) - self.weight_decay = weight_decay - self.lr_anneal_steps = lr_anneal_steps - - self.step = 0 - self.resume_step = 0 - self.global_batch = self.batch_size * dist.get_world_size() - - self.model_params = list(self.model.parameters()) - self.master_params = self.model_params - self.lg_loss_scale = INITIAL_LOG_LOSS_SCALE - self.sync_cuda = th.cuda.is_available() - - self._load_and_sync_parameters() - if self.use_fp16: - self._setup_fp16() - - self.opt = AdamW(self.master_params, lr=self.lr, weight_decay=self.weight_decay) - if self.resume_step: - self._load_optimizer_state() - # Model was resumed, either due to a restart or a checkpoint - # being specified at the command line. - self.ema_params = [ - self._load_ema_parameters(rate) for rate in self.ema_rate - ] - else: - self.ema_params = [ - copy.deepcopy(self.master_params) for _ in range(len(self.ema_rate)) - ] - - if th.cuda.is_available(): - self.use_ddp = True - self.ddp_model = DDP( - self.model, - device_ids=[dist_util.dev()], - output_device=dist_util.dev(), - broadcast_buffers=False, - bucket_cap_mb=128, - find_unused_parameters=False, - ) - else: - if dist.get_world_size() > 1: - logger.warn( - "Distributed training requires CUDA. " - "Gradients will not be synchronized properly!" - ) - self.use_ddp = False - self.ddp_model = self.model - - def _load_and_sync_parameters(self): - resume_checkpoint = find_resume_checkpoint() or self.resume_checkpoint - - if resume_checkpoint: - self.resume_step = parse_resume_step_from_filename(resume_checkpoint) - if dist.get_rank() == 0: - logger.log(f"loading model from checkpoint: {resume_checkpoint}...") - self.model.load_state_dict( - dist_util.load_state_dict( - resume_checkpoint, map_location=dist_util.dev() - ) - ) - - dist_util.sync_params(self.model.parameters()) - - def _load_ema_parameters(self, rate): - ema_params = copy.deepcopy(self.master_params) - - main_checkpoint = find_resume_checkpoint() or self.resume_checkpoint - ema_checkpoint = find_ema_checkpoint(main_checkpoint, self.resume_step, rate) - if ema_checkpoint: - if dist.get_rank() == 0: - logger.log(f"loading EMA from checkpoint: {ema_checkpoint}...") - state_dict = dist_util.load_state_dict( - ema_checkpoint, map_location=dist_util.dev() - ) - ema_params = self._state_dict_to_master_params(state_dict) - - dist_util.sync_params(ema_params) - return ema_params - - def _load_optimizer_state(self): - main_checkpoint = find_resume_checkpoint() or self.resume_checkpoint - opt_checkpoint = bf.join( - bf.dirname(main_checkpoint), f"opt{self.resume_step:06}.pt" - ) - if bf.exists(opt_checkpoint): - logger.log(f"loading optimizer state from checkpoint: {opt_checkpoint}") - state_dict = dist_util.load_state_dict( - opt_checkpoint, map_location=dist_util.dev() - ) - self.opt.load_state_dict(state_dict) - - def _setup_fp16(self): - self.master_params = make_master_params(self.model_params) - self.model.convert_to_fp16() - - def run_loop(self): - while ( - not self.lr_anneal_steps - or self.step + self.resume_step < self.lr_anneal_steps - ): - batch, cond = next(self.data) - self.run_step(batch, cond) - if self.step % self.log_interval == 0: - logger.dumpkvs() - if self.step % self.save_interval == 0: - self.save() - # Run for a finite amount of time in integration tests. - if os.environ.get("DIFFUSION_TRAINING_TEST", "") and self.step > 0: - return - self.step += 1 - # Save the last checkpoint if it wasn't already saved. - if (self.step - 1) % self.save_interval != 0: - self.save() - - def run_step(self, batch, cond): - self.forward_backward(batch, cond) - if self.use_fp16: - self.optimize_fp16() - else: - self.optimize_normal() - self.log_step() - - def forward_backward(self, batch, cond): - zero_grad(self.model_params) - for i in range(0, batch.shape[0], self.microbatch): - micro = batch[i : i + self.microbatch].to(dist_util.dev()) - micro_cond = { - k: v[i : i + self.microbatch].to(dist_util.dev()) - for k, v in cond.items() - } - last_batch = (i + self.microbatch) >= batch.shape[0] - t, weights = self.schedule_sampler.sample(micro.shape[0], dist_util.dev()) - - compute_losses = functools.partial( - self.diffusion.training_losses, - self.ddp_model, - micro, - t, - model_kwargs=micro_cond, - ) - - if last_batch or not self.use_ddp: - losses = compute_losses() - else: - with self.ddp_model.no_sync(): - losses = compute_losses() - - if isinstance(self.schedule_sampler, LossAwareSampler): - self.schedule_sampler.update_with_local_losses( - t, losses["loss"].detach() - ) - - loss = (losses["loss"] * weights).mean() - log_loss_dict( - self.diffusion, t, {k: v * weights for k, v in losses.items()} - ) - if self.use_fp16: - loss_scale = 2 ** self.lg_loss_scale - (loss * loss_scale).backward() - else: - loss.backward() - - def optimize_fp16(self): - if any(not th.isfinite(p.grad).all() for p in self.model_params): - self.lg_loss_scale -= 1 - logger.log(f"Found NaN, decreased lg_loss_scale to {self.lg_loss_scale}") - return - - model_grads_to_master_grads(self.model_params, self.master_params) - self.master_params[0].grad.mul_(1.0 / (2 ** self.lg_loss_scale)) - self._log_grad_norm() - self._anneal_lr() - self.opt.step() - for rate, params in zip(self.ema_rate, self.ema_params): - update_ema(params, self.master_params, rate=rate) - master_params_to_model_params(self.model_params, self.master_params) - self.lg_loss_scale += self.fp16_scale_growth - - def optimize_normal(self): - self._log_grad_norm() - self._anneal_lr() - self.opt.step() - for rate, params in zip(self.ema_rate, self.ema_params): - update_ema(params, self.master_params, rate=rate) - - def _log_grad_norm(self): - sqsum = 0.0 - for p in self.master_params: - sqsum += (p.grad ** 2).sum().item() - logger.logkv_mean("grad_norm", np.sqrt(sqsum)) - - def _anneal_lr(self): - if not self.lr_anneal_steps: - return - frac_done = (self.step + self.resume_step) / self.lr_anneal_steps - lr = self.lr * (1 - frac_done) - for param_group in self.opt.param_groups: - param_group["lr"] = lr - - def log_step(self): - logger.logkv("step", self.step + self.resume_step) - logger.logkv("samples", (self.step + self.resume_step + 1) * self.global_batch) - if self.use_fp16: - logger.logkv("lg_loss_scale", self.lg_loss_scale) - - def save(self): - def save_checkpoint(rate, params): - state_dict = self._master_params_to_state_dict(params) - if dist.get_rank() == 0: - logger.log(f"saving model {rate}...") - if not rate: - filename = f"model{(self.step+self.resume_step):06d}.pt" - else: - filename = f"ema_{rate}_{(self.step+self.resume_step):06d}.pt" - with bf.BlobFile(bf.join(get_blob_logdir(), filename), "wb") as f: - th.save(state_dict, f) - - save_checkpoint(0, self.master_params) - for rate, params in zip(self.ema_rate, self.ema_params): - save_checkpoint(rate, params) - - if dist.get_rank() == 0: - with bf.BlobFile( - bf.join(get_blob_logdir(), f"opt{(self.step+self.resume_step):06d}.pt"), - "wb", - ) as f: - th.save(self.opt.state_dict(), f) - - dist.barrier() - - def _master_params_to_state_dict(self, master_params): - if self.use_fp16: - master_params = unflatten_master_params( - self.model.parameters(), master_params - ) - state_dict = self.model.state_dict() - for i, (name, _value) in enumerate(self.model.named_parameters()): - assert name in state_dict - state_dict[name] = master_params[i] - return state_dict - - def _state_dict_to_master_params(self, state_dict): - params = [state_dict[name] for name, _ in self.model.named_parameters()] - if self.use_fp16: - return make_master_params(params) - else: - return params - - -def parse_resume_step_from_filename(filename): - """ - Parse filenames of the form path/to/modelNNNNNN.pt, where NNNNNN is the - checkpoint's number of steps. - """ - split = filename.split("model") - if len(split) < 2: - return 0 - split1 = split[-1].split(".")[0] - try: - return int(split1) - except ValueError: - return 0 - - -def get_blob_logdir(): - return os.environ.get("DIFFUSION_BLOB_LOGDIR", logger.get_dir()) - - -def find_resume_checkpoint(): - # On your infrastructure, you may want to override this to automatically - # discover the latest checkpoint on your blob storage, etc. - return None - - -def find_ema_checkpoint(main_checkpoint, step, rate): - if main_checkpoint is None: - return None - filename = f"ema_{rate}_{(step):06d}.pt" - path = bf.join(bf.dirname(main_checkpoint), filename) - if bf.exists(path): - return path - return None - - -def log_loss_dict(diffusion, ts, losses): - for key, values in losses.items(): - logger.logkv_mean(key, values.mean().item()) - # Log the quantiles (four quartiles, in particular). - for sub_t, sub_loss in zip(ts.cpu().numpy(), values.detach().cpu().numpy()): - quartile = int(4 * sub_t / diffusion.num_timesteps) - logger.logkv_mean(f"{key}_q{quartile}", sub_loss) diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/instaboost/mask_rcnn_r101_fpn_instaboost_4x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/instaboost/mask_rcnn_r101_fpn_instaboost_4x_coco.py deleted file mode 100644 index c2819477abb070b724d0295ccf028025918b263a..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/instaboost/mask_rcnn_r101_fpn_instaboost_4x_coco.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './mask_rcnn_r50_fpn_instaboost_4x_coco.py' -model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) diff --git a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/optim/cosine_lr_scheduler.py b/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/optim/cosine_lr_scheduler.py deleted file mode 100644 index 1e4f0bbf28f1ad893a301f1bfac1da8e97370337..0000000000000000000000000000000000000000 --- a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/optim/cosine_lr_scheduler.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import math - -from torch.optim import Optimizer -from torch.optim.lr_scheduler import _LRScheduler - - -class CosineLRScheduler(_LRScheduler): - """Cosine LR scheduler. - - Args: - optimizer (Optimizer): Torch optimizer. - warmup_steps (int): Number of warmup steps. - total_steps (int): Total number of steps. - lr_min_ratio (float): Minimum learning rate. - cycle_length (float): Cycle length. - """ - def __init__(self, optimizer: Optimizer, total_steps: int, warmup_steps: int, - lr_min_ratio: float = 0.0, cycle_length: float = 1.0): - self.warmup_steps = warmup_steps - assert self.warmup_steps >= 0 - self.total_steps = total_steps - assert self.total_steps >= 0 - self.lr_min_ratio = lr_min_ratio - self.cycle_length = cycle_length - super().__init__(optimizer) - - def _get_sched_lr(self, lr: float, step: int): - if step < self.warmup_steps: - lr_ratio = step / self.warmup_steps - lr = lr_ratio * lr - elif step <= self.total_steps: - s = (step - self.warmup_steps) / (self.total_steps - self.warmup_steps) - lr_ratio = self.lr_min_ratio + 0.5 * (1 - self.lr_min_ratio) * \ - (1. + math.cos(math.pi * s / self.cycle_length)) - lr = lr_ratio * lr - else: - lr_ratio = self.lr_min_ratio - lr = lr_ratio * lr - return lr - - def get_lr(self): - return [self._get_sched_lr(lr, self.last_epoch) for lr in self.base_lrs] diff --git a/spaces/Hallucinate/demo/ldm/modules/encoders/__init__.py b/spaces/Hallucinate/demo/ldm/modules/encoders/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/simultaneous_translation/modules/monotonic_transformer_layer.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/simultaneous_translation/modules/monotonic_transformer_layer.py deleted file mode 100644 index 94bd71fb9c46a64a8b6e1960f47dfc43b78dda43..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/simultaneous_translation/modules/monotonic_transformer_layer.py +++ /dev/null @@ -1,182 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from fairseq.modules import TransformerDecoderLayer, TransformerEncoderLayer - -from . import build_monotonic_attention - -from typing import Dict, Optional, List - -from torch import Tensor -import torch - - -class TransformerMonotonicEncoderLayer(TransformerEncoderLayer): - def forward(self, x, encoder_padding_mask): - seq_len, _, _ = x.size() - attn_mask = x.new_ones([seq_len, seq_len]).triu(1) - attn_mask = attn_mask.masked_fill(attn_mask.bool(), float("-inf")) - return super().forward(x, encoder_padding_mask, attn_mask) - - -class TransformerMonotonicDecoderLayer(TransformerDecoderLayer): - def __init__(self, args): - super().__init__(args) - - assert args.simul_type is not None, "A --simul-type is needed." - self.encoder_attn = build_monotonic_attention(args) - - def prune_incremental_state( - self, - incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] - ): - input_buffer = self.self_attn._get_input_buffer(incremental_state) - for key in ["prev_key", "prev_value"]: - input_buffer_key = input_buffer[key] - assert input_buffer_key is not None - if input_buffer_key.size(2) > 1: - input_buffer[key] = input_buffer_key[:, :, :-1, :] - else: - typed_empty_dict: Dict[str, Optional[Tensor]] = {} - input_buffer = typed_empty_dict - break - assert incremental_state is not None - self.self_attn._set_input_buffer(incremental_state, input_buffer) - - def forward( - self, - x, - encoder_out: Optional[Tensor] = None, - encoder_padding_mask: Optional[Tensor] = None, - incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, - prev_self_attn_state: Optional[List[Tensor]] = None, - prev_attn_state: Optional[List[Tensor]] = None, - self_attn_mask: Optional[Tensor] = None, - self_attn_padding_mask: Optional[Tensor] = None, - need_attn: bool = False, - need_head_weights: bool = False, - ): - """ - Args: - x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` - encoder_padding_mask (ByteTensor, optional): binary - ByteTensor of shape `(batch, src_len)` where padding - elements are indicated by ``1``. - need_attn (bool, optional): return attention weights - need_head_weights (bool, optional): return attention weights - for each head (default: return average over heads). - - Returns: - encoded output of shape `(seq_len, batch, embed_dim)` - """ - if need_head_weights: - need_attn = True - - residual = x - if self.normalize_before: - x = self.self_attn_layer_norm(x) - if prev_self_attn_state is not None: - prev_key, prev_value = prev_self_attn_state[:2] - saved_state: Dict[str, Optional[Tensor]] = { - "prev_key": prev_key, - "prev_value": prev_value, - } - if len(prev_self_attn_state) >= 3: - saved_state["prev_key_padding_mask"] = prev_self_attn_state[2] - assert incremental_state is not None - self.self_attn._set_input_buffer(incremental_state, saved_state) - _self_attn_input_buffer = self.self_attn._get_input_buffer(incremental_state) - if self.cross_self_attention and not ( - incremental_state is not None - and _self_attn_input_buffer is not None - and "prev_key" in _self_attn_input_buffer - ): - if self_attn_mask is not None: - assert encoder_out is not None - self_attn_mask = torch.cat( - (x.new_zeros(x.size(0), encoder_out.size(0)), self_attn_mask), dim=1 - ) - if self_attn_padding_mask is not None: - if encoder_padding_mask is None: - assert encoder_out is not None - encoder_padding_mask = self_attn_padding_mask.new_zeros( - encoder_out.size(1), encoder_out.size(0) - ) - self_attn_padding_mask = torch.cat( - (encoder_padding_mask, self_attn_padding_mask), dim=1 - ) - assert encoder_out is not None - y = torch.cat((encoder_out, x), dim=0) - else: - y = x - - x, attn = self.self_attn( - query=x, - key=y, - value=y, - key_padding_mask=self_attn_padding_mask, - incremental_state=incremental_state, - need_weights=False, - attn_mask=self_attn_mask, - ) - x = self.dropout_module(x) - x = self.residual_connection(x, residual) - if not self.normalize_before: - x = self.self_attn_layer_norm(x) - - assert self.encoder_attn is not None - residual = x - if self.normalize_before: - x = self.encoder_attn_layer_norm(x) - if prev_attn_state is not None: - prev_key, prev_value = prev_attn_state[:2] - saved_state: Dict[str, Optional[Tensor]] = { - "prev_key": prev_key, - "prev_value": prev_value, - } - if len(prev_attn_state) >= 3: - saved_state["prev_key_padding_mask"] = prev_attn_state[2] - assert incremental_state is not None - self.encoder_attn._set_input_buffer(incremental_state, saved_state) - - x, attn = self.encoder_attn( - query=x, - key=encoder_out, - value=encoder_out, - key_padding_mask=encoder_padding_mask, - incremental_state=incremental_state, - static_kv=True, - need_weights=need_attn or (not self.training and self.need_attn), - need_head_weights=need_head_weights, - ) - x = self.dropout_module(x) - x = self.residual_connection(x, residual) - if not self.normalize_before: - x = self.encoder_attn_layer_norm(x) - - residual = x - if self.normalize_before: - x = self.final_layer_norm(x) - - x = self.activation_fn(self.fc1(x)) - x = self.activation_dropout_module(x) - x = self.fc2(x) - x = self.dropout_module(x) - x = self.residual_connection(x, residual) - if not self.normalize_before: - x = self.final_layer_norm(x) - if self.onnx_trace and incremental_state is not None: - saved_state = self.self_attn._get_input_buffer(incremental_state) - assert saved_state is not None - if self_attn_padding_mask is not None: - self_attn_state = [ - saved_state["prev_key"], - saved_state["prev_value"], - saved_state["prev_key_padding_mask"], - ] - else: - self_attn_state = [saved_state["prev_key"], saved_state["prev_value"]] - return x, attn, self_attn_state - return x, attn, None diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/modules/quantization/pq/pq.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/modules/quantization/pq/pq.py deleted file mode 100644 index eddc2eb34602403f10979f54cd23a45bc2f104d5..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/modules/quantization/pq/pq.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from .em import EM, EmptyClusterResolveError - - -class PQ(EM): - """ - Quantizes the layer weights W with the standard Product Quantization - technique. This learns a codebook of codewords or centroids of size - block_size from W. For further reference on using PQ to quantize - neural networks, see "And the Bit Goes Down: Revisiting the Quantization - of Neural Networks", Stock et al., ICLR 2020. - - PQ is performed in two steps: - (1) The matrix W (weights or fully-connected or convolutional layer) - is reshaped to (block_size, -1). - - If W is fully-connected (2D), its columns are split into - blocks of size block_size. - - If W is convolutional (4D), its filters are split along the - spatial dimension. - (2) We apply the standard EM/k-means algorithm to the resulting reshaped matrix. - - Args: - - W: weight matrix to quantize of size (in_features x out_features) - - block_size: size of the blocks (subvectors) - - n_centroids: number of centroids - - n_iter: number of k-means iterations - - eps: for cluster reassignment when an empty cluster is found - - max_tentatives for cluster reassignment when an empty cluster is found - - verbose: print information after each iteration - - Remarks: - - block_size be compatible with the shape of W - """ - - def __init__( - self, - W, - block_size, - n_centroids=256, - n_iter=20, - eps=1e-6, - max_tentatives=30, - verbose=True, - ): - self.block_size = block_size - W_reshaped = self._reshape(W) - super(PQ, self).__init__( - W_reshaped, - n_centroids=n_centroids, - n_iter=n_iter, - eps=eps, - max_tentatives=max_tentatives, - verbose=verbose, - ) - - def _reshape(self, W): - """ - Reshapes the matrix W as expained in step (1). - """ - - # fully connected: by convention the weight has size out_features x in_features - if len(W.size()) == 2: - self.out_features, self.in_features = W.size() - assert ( - self.in_features % self.block_size == 0 - ), "Linear: n_blocks must be a multiple of in_features" - return ( - W.reshape(self.out_features, -1, self.block_size) - .permute(2, 1, 0) - .flatten(1, 2) - ) - - # convolutional: we reshape along the spatial dimension - elif len(W.size()) == 4: - self.out_channels, self.in_channels, self.k_h, self.k_w = W.size() - assert ( - self.in_channels * self.k_h * self.k_w - ) % self.block_size == 0, ( - "Conv2d: n_blocks must be a multiple of in_channels * k_h * k_w" - ) - return ( - W.reshape(self.out_channels, -1, self.block_size) - .permute(2, 1, 0) - .flatten(1, 2) - ) - # not implemented - else: - raise NotImplementedError(W.size()) - - def encode(self): - """ - Performs self.n_iter EM steps. - """ - - self.initialize_centroids() - for i in range(self.n_iter): - try: - self.step(i) - except EmptyClusterResolveError: - break - - def decode(self): - """ - Returns the encoded full weight matrix. Must be called after - the encode function. - """ - - # fully connected case - if "k_h" not in self.__dict__: - return ( - self.centroids[self.assignments] - .reshape(-1, self.out_features, self.block_size) - .permute(1, 0, 2) - .flatten(1, 2) - ) - - # convolutional case - else: - return ( - self.centroids[self.assignments] - .reshape(-1, self.out_channels, self.block_size) - .permute(1, 0, 2) - .reshape(self.out_channels, self.in_channels, self.k_h, self.k_w) - ) diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/tasks/multilingual_masked_lm.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/tasks/multilingual_masked_lm.py deleted file mode 100644 index 9e6ce4b8a2f77ed889a6e1451321a8e3ac21dc67..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/tasks/multilingual_masked_lm.py +++ /dev/null @@ -1,338 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging -import os - -import numpy as np -import torch -from fairseq import utils -from fairseq.data import ( - ConcatDataset, - Dictionary, - IdDataset, - MaskTokensDataset, - NestedDictionaryDataset, - NumelDataset, - NumSamplesDataset, - PadDataset, - PrependTokenDataset, - RawLabelDataset, - ResamplingDataset, - SortDataset, - TokenBlockDataset, - data_utils, - encoders, -) -from fairseq.tasks import LegacyFairseqTask, register_task - - -logger = logging.getLogger(__name__) - - -@register_task("multilingual_masked_lm") -class MultiLingualMaskedLMTask(LegacyFairseqTask): - """Task for training masked language models (e.g., BERT, RoBERTa).""" - - @staticmethod - def add_args(parser): - """Add task-specific arguments to the parser.""" - parser.add_argument( - "data", - help="colon separated path to data directories list, \ - will be iterated upon during epochs in round-robin manner", - ) - parser.add_argument( - "--sample-break-mode", - default="complete", - choices=["none", "complete", "complete_doc", "eos"], - help='If omitted or "none", fills each sample with tokens-per-sample ' - 'tokens. If set to "complete", splits samples only at the end ' - "of sentence, but may include multiple sentences per sample. " - '"complete_doc" is similar but respects doc boundaries. ' - 'If set to "eos", includes only one sentence per sample.', - ) - parser.add_argument( - "--tokens-per-sample", - default=512, - type=int, - help="max number of total tokens over all segments " - "per sample for BERT dataset", - ) - parser.add_argument( - "--mask-prob", - default=0.15, - type=float, - help="probability of replacing a token with mask", - ) - parser.add_argument( - "--leave-unmasked-prob", - default=0.1, - type=float, - help="probability that a masked token is unmasked", - ) - parser.add_argument( - "--random-token-prob", - default=0.1, - type=float, - help="probability of replacing a token with a random token", - ) - parser.add_argument( - "--freq-weighted-replacement", - action="store_true", - help="sample random replacement words based on word frequencies", - ) - parser.add_argument( - "--mask-whole-words", - default=False, - action="store_true", - help="mask whole words; you may also want to set --bpe", - ) - parser.add_argument( - "--multilang-sampling-alpha", - type=float, - default=1.0, - help="smoothing alpha for sample rations across multiple datasets", - ) - - def __init__(self, args, dictionary): - super().__init__(args) - self.dictionary = dictionary - self.seed = args.seed - - # add mask token - self.mask_idx = dictionary.add_symbol("") - - @classmethod - def setup_task(cls, args, **kwargs): - paths = utils.split_paths(args.data) - assert len(paths) > 0 - dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt")) - logger.info("dictionary: {} types".format(len(dictionary))) - return cls(args, dictionary) - - def _get_whole_word_mask(self): - # create masked input and targets - if self.args.mask_whole_words: - bpe = encoders.build_bpe(self.args) - if bpe is not None: - - def is_beginning_of_word(i): - if i < self.source_dictionary.nspecial: - # special elements are always considered beginnings - return True - tok = self.source_dictionary[i] - if tok.startswith("madeupword"): - return True - try: - return bpe.is_beginning_of_word(tok) - except ValueError: - return True - - mask_whole_words = torch.ByteTensor( - list(map(is_beginning_of_word, range(len(self.source_dictionary)))) - ) - else: - mask_whole_words = None - return mask_whole_words - - def _get_sample_prob(self, dataset_lens): - """ - Get smoothed sampling porbability by languages. This helps low resource - languages by upsampling them. - """ - prob = dataset_lens / dataset_lens.sum() - smoothed_prob = prob ** self.args.multilang_sampling_alpha - smoothed_prob = smoothed_prob / smoothed_prob.sum() - return smoothed_prob - - def load_dataset(self, split, epoch=1, combine=False, **kwargs): - """Load a given dataset split. - - Args: - split (str): name of the split (e.g., train, valid, test) - """ - paths = utils.split_paths(self.args.data) - assert len(paths) > 0 - data_path = paths[(epoch - 1) % len(paths)] - - languages = sorted( - name - for name in os.listdir(data_path) - if os.path.isdir(os.path.join(data_path, name)) - ) - - logger.info("Training on {0} languages: {1}".format(len(languages), languages)) - logger.info( - "Language to id mapping: ", {lang: id for id, lang in enumerate(languages)} - ) - - mask_whole_words = self._get_whole_word_mask() - lang_datasets = [] - for lang_id, language in enumerate(languages): - split_path = os.path.join(data_path, language, split) - - dataset = data_utils.load_indexed_dataset( - split_path, - self.source_dictionary, - self.args.dataset_impl, - combine=combine, - ) - if dataset is None: - raise FileNotFoundError( - "Dataset not found: {} ({})".format(split, split_path) - ) - - # create continuous blocks of tokens - dataset = TokenBlockDataset( - dataset, - dataset.sizes, - self.args.tokens_per_sample - 1, # one less for - pad=self.source_dictionary.pad(), - eos=self.source_dictionary.eos(), - break_mode=self.args.sample_break_mode, - ) - logger.info("loaded {} blocks from: {}".format(len(dataset), split_path)) - - # prepend beginning-of-sentence token (, equiv. to [CLS] in BERT) - dataset = PrependTokenDataset(dataset, self.source_dictionary.bos()) - - src_dataset, tgt_dataset = MaskTokensDataset.apply_mask( - dataset, - self.source_dictionary, - pad_idx=self.source_dictionary.pad(), - mask_idx=self.mask_idx, - seed=self.args.seed, - mask_prob=self.args.mask_prob, - leave_unmasked_prob=self.args.leave_unmasked_prob, - random_token_prob=self.args.random_token_prob, - freq_weighted_replacement=self.args.freq_weighted_replacement, - mask_whole_words=mask_whole_words, - ) - - lang_dataset = NestedDictionaryDataset( - { - "net_input": { - "src_tokens": PadDataset( - src_dataset, - pad_idx=self.source_dictionary.pad(), - left_pad=False, - ), - "src_lengths": NumelDataset(src_dataset, reduce=False), - }, - "target": PadDataset( - tgt_dataset, - pad_idx=self.source_dictionary.pad(), - left_pad=False, - ), - "nsentences": NumSamplesDataset(), - "ntokens": NumelDataset(src_dataset, reduce=True), - "lang_id": RawLabelDataset([lang_id] * src_dataset.sizes.shape[0]), - }, - sizes=[src_dataset.sizes], - ) - lang_datasets.append(lang_dataset) - - dataset_lengths = np.array( - [len(d) for d in lang_datasets], - dtype=float, - ) - logger.info( - "loaded total {} blocks for all languages".format( - dataset_lengths.sum(), - ) - ) - if split == self.args.train_subset: - # For train subset, additionally up or down sample languages. - sample_probs = self._get_sample_prob(dataset_lengths) - logger.info( - "Sample probability by language: ", - { - lang: "{0:.4f}".format(sample_probs[id]) - for id, lang in enumerate(languages) - }, - ) - size_ratio = (sample_probs * dataset_lengths.sum()) / dataset_lengths - logger.info( - "Up/Down Sampling ratio by language: ", - { - lang: "{0:.2f}".format(size_ratio[id]) - for id, lang in enumerate(languages) - }, - ) - - resampled_lang_datasets = [ - ResamplingDataset( - lang_datasets[i], - size_ratio=size_ratio[i], - seed=self.args.seed, - epoch=epoch, - replace=size_ratio[i] >= 1.0, - ) - for i, d in enumerate(lang_datasets) - ] - dataset = ConcatDataset(resampled_lang_datasets) - else: - dataset = ConcatDataset(lang_datasets) - lang_splits = [split] - for lang_id, lang_dataset in enumerate(lang_datasets): - split_name = split + "_" + languages[lang_id] - lang_splits.append(split_name) - self.datasets[split_name] = lang_dataset - - # [TODO]: This is hacky for now to print validation ppl for each - # language individually. Maybe need task API changes to allow it - # in more generic ways. - if split in self.args.valid_subset: - self.args.valid_subset = self.args.valid_subset.replace( - split, ",".join(lang_splits) - ) - - with data_utils.numpy_seed(self.args.seed + epoch): - shuffle = np.random.permutation(len(dataset)) - - self.datasets[split] = SortDataset( - dataset, - sort_order=[ - shuffle, - dataset.sizes, - ], - ) - - def build_dataset_for_inference(self, src_tokens, src_lengths, sort=True): - src_dataset = PadDataset( - TokenBlockDataset( - src_tokens, - src_lengths, - self.args.tokens_per_sample - 1, # one less for - pad=self.source_dictionary.pad(), - eos=self.source_dictionary.eos(), - break_mode="eos", - ), - pad_idx=self.source_dictionary.pad(), - left_pad=False, - ) - src_dataset = PrependTokenDataset(src_dataset, self.source_dictionary.bos()) - src_dataset = NestedDictionaryDataset( - { - "id": IdDataset(), - "net_input": { - "src_tokens": src_dataset, - "src_lengths": NumelDataset(src_dataset, reduce=False), - }, - }, - sizes=src_lengths, - ) - if sort: - src_dataset = SortDataset(src_dataset, sort_order=[src_lengths]) - return src_dataset - - @property - def source_dictionary(self): - return self.dictionary - - @property - def target_dictionary(self): - return self.dictionary diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/tasks/simultaneous_translation.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/tasks/simultaneous_translation.py deleted file mode 100644 index 11c7dc1ea966a54f8915ef164377e40f90e851a1..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/tasks/simultaneous_translation.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging -from fairseq.tasks import register_task -from fairseq.tasks.speech_to_text import SpeechToTextTask -from fairseq.tasks.translation import ( - TranslationTask, TranslationConfig -) - -try: - import examples.simultaneous_translation # noqa - import_successful = True -except BaseException: - import_successful = False - - -logger = logging.getLogger(__name__) - - -def check_import(flag): - if not flag: - raise ImportError( - "'examples.simultaneous_translation' is not correctly imported. " - "Please considering `pip install -e $FAIRSEQ_DIR`." - ) - - -@register_task("simul_speech_to_text") -class SimulSpeechToTextTask(SpeechToTextTask): - def __init__(self, args, tgt_dict): - check_import(import_successful) - super().__init__(args, tgt_dict) - - -@register_task("simul_text_to_text", dataclass=TranslationConfig) -class SimulTextToTextTask(TranslationTask): - def __init__(self, cfg, src_dict, tgt_dict): - check_import(import_successful) - super().__init__(cfg, src_dict, tgt_dict) diff --git a/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/src/glow_tts/monotonic_align/setup.py b/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/src/glow_tts/monotonic_align/setup.py deleted file mode 100644 index 3a3892f92e3fbb866e3111199a9a4cf1f88e3959..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/src/glow_tts/monotonic_align/setup.py +++ /dev/null @@ -1,23 +0,0 @@ -import numpy -from setuptools import Extension, find_packages -from distutils.core import setup -from Cython.Build import cythonize - - -_VERSION = "1.1" - - -ext_modules = cythonize( - "monotonic_align/core.pyx", - compiler_directives={"language_level": "3"}, -) - -setup( - name="monotonic_align", - ext_modules=ext_modules, - include_dirs=[numpy.get_include(), "monotonic_align"], - packages=find_packages(), - setup_requires=["numpy", "cython"], - install_requires=["numpy"], - version=_VERSION, -) diff --git a/spaces/HawkingChen/LangFlow/Dockerfile b/spaces/HawkingChen/LangFlow/Dockerfile deleted file mode 100644 index 5507d40d099b33aeaeb69447359d62714cc505f2..0000000000000000000000000000000000000000 --- a/spaces/HawkingChen/LangFlow/Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -FROM python:3.10-slim - -RUN apt-get update && apt-get install gcc g++ git make -y -RUN useradd -m -u 1000 user -USER user -ENV HOME=/home/user \ - PATH=/home/user/.local/bin:$PATH - -WORKDIR $HOME/app - -COPY --chown=user . $HOME/app - -RUN pip install langflow>==0.1.7 -U --user -CMD ["python", "-m", "langflow", "--host", "0.0.0.0", "--port", "7860", "--remove-api-keys" , "--database-url", "sqlite:////home/langflow/langflow.db"] \ No newline at end of file diff --git a/spaces/ICML2022/OFA/fairseq/examples/simultaneous_translation/docs/enja-waitk.md b/spaces/ICML2022/OFA/fairseq/examples/simultaneous_translation/docs/enja-waitk.md deleted file mode 100644 index fb9d82576f80b4405564a99774fc98ac2fe6ad3b..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/simultaneous_translation/docs/enja-waitk.md +++ /dev/null @@ -1,106 +0,0 @@ -# An example of English to Japaneses Simultaneous Translation System - -This is an example of training and evaluating a transformer *wait-k* English to Japanese simultaneous text-to-text translation model. - -## Data Preparation -This section introduces the data preparation for training and evaluation. -If you only want to evaluate the model, please jump to [Inference & Evaluation](#inference-&-evaluation) - -For illustration, we only use the following subsets of the available data from [WMT20 news translation task](http://www.statmt.org/wmt20/translation-task.html), which results in 7,815,391 sentence pairs. -- News Commentary v16 -- Wiki Titles v3 -- WikiMatrix V1 -- Japanese-English Subtitle Corpus -- The Kyoto Free Translation Task Corpus - -We use WMT20 development data as development set. Training `transformer_vaswani_wmt_en_de_big` model on such amount of data will result in 17.3 BLEU with greedy search and 19.7 with beam (10) search. Notice that a better performance can be achieved with the full WMT training data. - -We use [sentencepiece](https://github.com/google/sentencepiece) toolkit to tokenize the data with a vocabulary size of 32000. -Additionally, we filtered out the sentences longer than 200 words after tokenization. -Assuming the tokenized text data is saved at `${DATA_DIR}`, -we prepare the data binary with the following command. - -```bash -fairseq-preprocess \ - --source-lang en --target-lang ja \ - --trainpref ${DATA_DIR}/train \ - --validpref ${DATA_DIR}/dev \ - --testpref ${DATA_DIR}/test \ - --destdir ${WMT20_ENJA_DATA_BIN} \ - --nwordstgt 32000 --nwordssrc 32000 \ - --workers 20 -``` - -## Simultaneous Translation Model Training -To train a wait-k `(k=10)` model. -```bash -fairseq-train ${WMT20_ENJA_DATA_BIN} \ - --save-dir ${SAVEDIR} - --simul-type waitk \ - --waitk-lagging 10 \ - --max-epoch 70 \ - --arch transformer_monotonic_vaswani_wmt_en_de_big \ - --optimizer adam \ - --adam-betas '(0.9, 0.98)' \ - --lr-scheduler inverse_sqrt \ - --warmup-init-lr 1e-07 \ - --warmup-updates 4000 \ - --lr 0.0005 \ - --stop-min-lr 1e-09 \ - --clip-norm 10.0 \ - --dropout 0.3 \ - --weight-decay 0.0 \ - --criterion label_smoothed_cross_entropy \ - --label-smoothing 0.1 \ - --max-tokens 3584 -``` -This command is for training on 8 GPUs. Equivalently, the model can be trained on one GPU with `--update-freq 8`. - -## Inference & Evaluation -First of all, install [SimulEval](https://github.com/facebookresearch/SimulEval) for evaluation. - -```bash -git clone https://github.com/facebookresearch/SimulEval.git -cd SimulEval -pip install -e . -``` - -The following command is for the evaluation. -Assuming the source and reference files are `${SRC_FILE}` and `${REF_FILE}`, the sentencepiece model file for English is saved at `${SRC_SPM_PATH}` - - -```bash -simuleval \ - --source ${SRC_FILE} \ - --target ${TGT_FILE} \ - --data-bin ${WMT20_ENJA_DATA_BIN} \ - --sacrebleu-tokenizer ja-mecab \ - --eval-latency-unit char \ - --no-space \ - --src-splitter-type sentencepiecemodel \ - --src-splitter-path ${SRC_SPM_PATH} \ - --agent ${FAIRSEQ}/examples/simultaneous_translation/agents/simul_trans_text_agent_enja.py \ - --model-path ${SAVE_DIR}/${CHECKPOINT_FILENAME} \ - --output ${OUTPUT} \ - --scores -``` - -The `--data-bin` should be the same in previous sections if you prepare the data from the scratch. -If only for evaluation, a prepared data directory can be found [here](https://dl.fbaipublicfiles.com/simultaneous_translation/wmt20_enja_medium_databin.tgz) and a pretrained checkpoint (wait-k=10 model) can be downloaded from [here](https://dl.fbaipublicfiles.com/simultaneous_translation/wmt20_enja_medium_wait10_ckpt.pt). - -The output should look like this: -```bash -{ - "Quality": { - "BLEU": 11.442253287568398 - }, - "Latency": { - "AL": 8.6587861866951, - "AP": 0.7863304776251316, - "DAL": 9.477850951194764 - } -} -``` -The latency is evaluated by characters (`--eval-latency-unit`) on the target side. The latency is evaluated with `sacrebleu` with `MeCab` tokenizer `--sacrebleu-tokenizer ja-mecab`. `--no-space` indicates that do not add space when merging the predicted words. - -If `--output ${OUTPUT}` option is used, the detailed log and scores will be stored under the `${OUTPUT}` directory. diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/modules/quantization/scalar/utils.py b/spaces/ICML2022/OFA/fairseq/fairseq/modules/quantization/scalar/utils.py deleted file mode 100644 index 2ec6af3fcb09ccaf853be15a84ed8181f9e2f546..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/fairseq/modules/quantization/scalar/utils.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging -from operator import attrgetter - -import torch.distributed as dist -import torch.nn as nn - -from ..pq.utils import attrsetter, get_layers -from .modules import ActivationQuantizer, IntConv2d, IntEmbedding, IntLinear - - -MAPPING = {nn.Linear: IntLinear, nn.Embedding: IntEmbedding, nn.Conv2d: IntConv2d} - - -def quantize_model_(model, p=0.2, bits=8, update_step=3000, method="histogram", remove_weights=False): - """ - Replaces all modules with their scalar quantized counterpart and - registers hooks to quantize the post-ativations of those modules. - - Args: - - model: a nn.Module - - p: amount of noise (0 for no noise, 1 to quantize all the weights/activations) - - bits: number of bits - - update_step: update quantization parameters every update_step steps - """ - # quantize all layers - # remove weights indicates whether the weights extension should be removed, in addition to - # weight_orig and weight extension on names - quantized_layers = get_layers(model, "(.*?)", remove_weights=remove_weights) - - for layer in quantized_layers: - - # book-keeping - is_master_process = (not dist.is_initialized()) or ( - dist.is_initialized() and dist.get_rank() == 0 - ) - - # recover module - module = attrgetter(layer)(model) - if is_master_process: - logging.info( - f"Quantizing layer {layer} with bits={bits} and QuantNoise={p}" - ) - - # quantization params - q_params = { - "p": p, - "update_step": update_step, - "bits": bits, - "method": method, - "counter": 0, - } - - # instantiate the quantized counterpart - if isinstance(module, tuple(MAPPING.keys())): - QuantizedModule = MAPPING[module.__class__] - quantized_module = QuantizedModule.__new__(QuantizedModule) - params = module.__dict__ - params.update(q_params) - quantized_module.__dict__.update(params) - - else: - if is_master_process: - logging.info(f"Module {module} not yet supported for quantization") - continue - - # activation quantization - a_q = ActivationQuantizer(quantized_module, p=0, bits=bits, method=method) - - # replace layer by its quantized counterpart - attrsetter(layer)(model, quantized_module) - - # return name of quantized layers - return quantized_layers diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/ngram_repeat_block.py b/spaces/ICML2022/OFA/fairseq/fairseq/ngram_repeat_block.py deleted file mode 100644 index 854125149448a2d37ad2773cd1e6d614e73e0e79..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/fairseq/ngram_repeat_block.py +++ /dev/null @@ -1,150 +0,0 @@ -# Originally from Microsoft Corporation. -# Licensed under the MIT License. - -""" Wrapper for ngram_repeat_block cuda extension """ -import torch -from torch import nn - -import math -from typing import Dict, List, Optional -import warnings - -try: - from fairseq import ngram_repeat_block_cuda - - EXTENSION_BUILT = True -except ImportError: - EXTENSION_BUILT = False - - -def is_cuda_extension_usable() -> bool: - """Check whether ngram_repeat_block_cuda is built properly""" - if not EXTENSION_BUILT or not torch.cuda.is_available(): - return False - bsz = 2 - tokens = torch.tensor([[4, 4, 3, 2], [1, 2, 3, 4]], dtype=torch.long, device="cuda") - lprobs = torch.rand((8, 12), device="cuda") - try: - outputs = ngram_repeat_block_cuda.forward(tokens, lprobs, bsz, 3, 4, 3) - outputs = outputs + 4 # This line breaks if the extension is built incorrectly. - return True - except RuntimeError: - warnings.warn( - "NGramRepeatBlock extension must be rebuilt." - 'Run TORCH_CUDA_ARCH_LIST="6.0;6.1;7.0" python setup.py build_ext --inplace' - ) - return False - - -class NGramRepeatBlock(nn.Module): - """ Wrapper class for calling ngram_repeat_block cuda extension """ - - def __init__(self, no_repeat_ngram_size: int, use_extension: bool = True): - super().__init__() - self.use_extension = is_cuda_extension_usable() if use_extension else False - self.no_repeat_ngram_size = no_repeat_ngram_size - - def reset_parameters(self): - pass - - @torch.jit.unused - def call_cuda_extension( - self, - tokens, - lprobs, - bsz: int, - beam_size: int, - step: int, - ): - return ngram_repeat_block_cuda.forward( - tokens, lprobs, bsz, step, beam_size, self.no_repeat_ngram_size - ) - - def forward( - self, - tokens, - lprobs, - bsz: int, - beam_size: int, - step: int, - ): - """ - Args: - tokens(Tensor): Input tokens(Bsz*beam, seq_len) - lprobs(Tensor): likelihood probability, - Expected to be updated in place.(Bsz*beam, vocab_size) - bsz(int): batch size - step(int): current step - beam_size(int): beam size - no_repeat_ngram_size(int): Ngram size - """ - msg = f"expected {bsz *beam_size} got" - assert tokens.size(0) == bsz * beam_size, f"{msg} {tokens.size(0)}" - assert lprobs.size(0) == bsz * beam_size, f"{msg} {lprobs.size(0)}" - if self.use_extension: - return self.call_cuda_extension(tokens, lprobs, bsz, beam_size, step) - - else: - return self._no_repeat_ngram( - tokens, - lprobs, - bsz, - beam_size, - step, - ) - - def _no_repeat_ngram(self, tokens, lprobs, bsz: int, beam_size: int, step: int): - """For each hypothesis generate a list of previous ngrams and set associated lprobs to -inf""" - gen_ngrams: List[Dict[str, List[int]]] = [ - torch.jit.annotate(Dict[str, List[int]], {}) - for bbsz_idx in range(bsz * beam_size) - ] - cpu_tokens = tokens.cpu() - for bbsz_idx in range(bsz * beam_size): - gen_tokens: List[int] = cpu_tokens[bbsz_idx].tolist() - for ngram in self.transpose_list( - [gen_tokens[i:] for i in range(self.no_repeat_ngram_size)] - ): - key = ",".join([str(x) for x in ngram[:-1]]) - gen_ngrams[bbsz_idx][key] = gen_ngrams[bbsz_idx].get( - key, torch.jit.annotate(List[int], []) - ) + [ngram[-1]] - if step + 2 - self.no_repeat_ngram_size >= 0: - # no banned tokens if we haven't generated no_repeat_ngram_size tokens yet - banned_tokens = [ - self.calculate_banned_tokens( - tokens, step, gen_ngrams, self.no_repeat_ngram_size, bbsz_idx - ) - for bbsz_idx in range(bsz * beam_size) - ] - else: - banned_tokens = [ - torch.jit.annotate(List[int], []) for bbsz_idx in range(bsz * beam_size) - ] - for bbsz_idx in range(bsz * beam_size): - lprobs[bbsz_idx][ - torch.tensor(banned_tokens[bbsz_idx], dtype=torch.int64) - ] = torch.tensor(-math.inf).to(lprobs) - return lprobs - - @staticmethod - def calculate_banned_tokens( - tokens, - step: int, - gen_ngrams: List[Dict[str, List[int]]], - no_repeat_ngram_size: int, - bbsz_idx: int, - ): - tokens_list: List[int] = tokens[ - bbsz_idx, step + 2 - no_repeat_ngram_size : step + 1 - ].tolist() - # before decoding the next token, prevent decoding of ngrams that have already appeared - ngram_index = ",".join([str(x) for x in tokens_list]) - return gen_ngrams[bbsz_idx].get(ngram_index, torch.jit.annotate(List[int], [])) - - @staticmethod - def transpose_list(l: List[List[int]]): - # GeneratorExp aren't supported in TS so ignoring the lint - min_len = min([len(x) for x in l]) # noqa - l2 = [[row[i] for row in l] for i in range(min_len)] - return l2 diff --git a/spaces/IDEA-Research/Grounded-SAM/GroundingDINO/groundingdino/config/GroundingDINO_SwinB.cfg.py b/spaces/IDEA-Research/Grounded-SAM/GroundingDINO/groundingdino/config/GroundingDINO_SwinB.cfg.py deleted file mode 100644 index f490c4bbd598a35de43d36ceafcbd769e7ff21bf..0000000000000000000000000000000000000000 --- a/spaces/IDEA-Research/Grounded-SAM/GroundingDINO/groundingdino/config/GroundingDINO_SwinB.cfg.py +++ /dev/null @@ -1,43 +0,0 @@ -batch_size = 1 -modelname = "groundingdino" -backbone = "swin_B_384_22k" -position_embedding = "sine" -pe_temperatureH = 20 -pe_temperatureW = 20 -return_interm_indices = [1, 2, 3] -backbone_freeze_keywords = None -enc_layers = 6 -dec_layers = 6 -pre_norm = False -dim_feedforward = 2048 -hidden_dim = 256 -dropout = 0.0 -nheads = 8 -num_queries = 900 -query_dim = 4 -num_patterns = 0 -num_feature_levels = 4 -enc_n_points = 4 -dec_n_points = 4 -two_stage_type = "standard" -two_stage_bbox_embed_share = False -two_stage_class_embed_share = False -transformer_activation = "relu" -dec_pred_bbox_embed_share = True -dn_box_noise_scale = 1.0 -dn_label_noise_ratio = 0.5 -dn_label_coef = 1.0 -dn_bbox_coef = 1.0 -embed_init_tgt = True -dn_labelbook_size = 2000 -max_text_len = 256 -text_encoder_type = "bert-base-uncased" -use_text_enhancer = True -use_fusion_layer = True -use_checkpoint = True -use_transformer_ckpt = True -use_text_cross_attention = True -text_dropout = 0.0 -fusion_dropout = 0.0 -fusion_droppath = 0.1 -sub_sentence_present = True diff --git a/spaces/IPN/demo_/app.py b/spaces/IPN/demo_/app.py deleted file mode 100644 index 60bf76f875a2cd9840d27d747f4371b754dae4c2..0000000000000000000000000000000000000000 --- a/spaces/IPN/demo_/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("huggingface/google/vit-base-patch16-224").launch(); diff --git a/spaces/Iceclear/StableSR/StableSR/basicsr/archs/vgg_arch.py b/spaces/Iceclear/StableSR/StableSR/basicsr/archs/vgg_arch.py deleted file mode 100644 index 05200334e477e59feefd1e4a0b5e94204e4eb2fa..0000000000000000000000000000000000000000 --- a/spaces/Iceclear/StableSR/StableSR/basicsr/archs/vgg_arch.py +++ /dev/null @@ -1,161 +0,0 @@ -import os -import torch -from collections import OrderedDict -from torch import nn as nn -from torchvision.models import vgg as vgg - -from basicsr.utils.registry import ARCH_REGISTRY - -VGG_PRETRAIN_PATH = 'experiments/pretrained_models/vgg19-dcbb9e9d.pth' -NAMES = { - 'vgg11': [ - 'conv1_1', 'relu1_1', 'pool1', 'conv2_1', 'relu2_1', 'pool2', 'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', - 'pool3', 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'pool4', 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', - 'pool5' - ], - 'vgg13': [ - 'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2', - 'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'pool3', 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'pool4', - 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'pool5' - ], - 'vgg16': [ - 'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2', - 'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'pool3', 'conv4_1', 'relu4_1', 'conv4_2', - 'relu4_2', 'conv4_3', 'relu4_3', 'pool4', 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', - 'pool5' - ], - 'vgg19': [ - 'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2', - 'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'conv3_4', 'relu3_4', 'pool3', 'conv4_1', - 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3', 'relu4_3', 'conv4_4', 'relu4_4', 'pool4', 'conv5_1', 'relu5_1', - 'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'conv5_4', 'relu5_4', 'pool5' - ] -} - - -def insert_bn(names): - """Insert bn layer after each conv. - - Args: - names (list): The list of layer names. - - Returns: - list: The list of layer names with bn layers. - """ - names_bn = [] - for name in names: - names_bn.append(name) - if 'conv' in name: - position = name.replace('conv', '') - names_bn.append('bn' + position) - return names_bn - - -@ARCH_REGISTRY.register() -class VGGFeatureExtractor(nn.Module): - """VGG network for feature extraction. - - In this implementation, we allow users to choose whether use normalization - in the input feature and the type of vgg network. Note that the pretrained - path must fit the vgg type. - - Args: - layer_name_list (list[str]): Forward function returns the corresponding - features according to the layer_name_list. - Example: {'relu1_1', 'relu2_1', 'relu3_1'}. - vgg_type (str): Set the type of vgg network. Default: 'vgg19'. - use_input_norm (bool): If True, normalize the input image. Importantly, - the input feature must in the range [0, 1]. Default: True. - range_norm (bool): If True, norm images with range [-1, 1] to [0, 1]. - Default: False. - requires_grad (bool): If true, the parameters of VGG network will be - optimized. Default: False. - remove_pooling (bool): If true, the max pooling operations in VGG net - will be removed. Default: False. - pooling_stride (int): The stride of max pooling operation. Default: 2. - """ - - def __init__(self, - layer_name_list, - vgg_type='vgg19', - use_input_norm=True, - range_norm=False, - requires_grad=False, - remove_pooling=False, - pooling_stride=2): - super(VGGFeatureExtractor, self).__init__() - - self.layer_name_list = layer_name_list - self.use_input_norm = use_input_norm - self.range_norm = range_norm - - self.names = NAMES[vgg_type.replace('_bn', '')] - if 'bn' in vgg_type: - self.names = insert_bn(self.names) - - # only borrow layers that will be used to avoid unused params - max_idx = 0 - for v in layer_name_list: - idx = self.names.index(v) - if idx > max_idx: - max_idx = idx - - if os.path.exists(VGG_PRETRAIN_PATH): - vgg_net = getattr(vgg, vgg_type)(pretrained=False) - state_dict = torch.load(VGG_PRETRAIN_PATH, map_location=lambda storage, loc: storage) - vgg_net.load_state_dict(state_dict) - else: - vgg_net = getattr(vgg, vgg_type)(pretrained=True) - - features = vgg_net.features[:max_idx + 1] - - modified_net = OrderedDict() - for k, v in zip(self.names, features): - if 'pool' in k: - # if remove_pooling is true, pooling operation will be removed - if remove_pooling: - continue - else: - # in some cases, we may want to change the default stride - modified_net[k] = nn.MaxPool2d(kernel_size=2, stride=pooling_stride) - else: - modified_net[k] = v - - self.vgg_net = nn.Sequential(modified_net) - - if not requires_grad: - self.vgg_net.eval() - for param in self.parameters(): - param.requires_grad = False - else: - self.vgg_net.train() - for param in self.parameters(): - param.requires_grad = True - - if self.use_input_norm: - # the mean is for image with range [0, 1] - self.register_buffer('mean', torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1)) - # the std is for image with range [0, 1] - self.register_buffer('std', torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1)) - - def forward(self, x): - """Forward function. - - Args: - x (Tensor): Input tensor with shape (n, c, h, w). - - Returns: - Tensor: Forward results. - """ - if self.range_norm: - x = (x + 1) / 2 - if self.use_input_norm: - x = (x - self.mean) / self.std - - output = {} - for key, layer in self.vgg_net._modules.items(): - x = layer(x) - if key in self.layer_name_list: - output[key] = x.clone() - - return output diff --git a/spaces/Iceclear/StableSR/StableSR/basicsr/models/lr_scheduler.py b/spaces/Iceclear/StableSR/StableSR/basicsr/models/lr_scheduler.py deleted file mode 100644 index 11e1c6c7a74f5233accda52370f92681d3d3cecf..0000000000000000000000000000000000000000 --- a/spaces/Iceclear/StableSR/StableSR/basicsr/models/lr_scheduler.py +++ /dev/null @@ -1,96 +0,0 @@ -import math -from collections import Counter -from torch.optim.lr_scheduler import _LRScheduler - - -class MultiStepRestartLR(_LRScheduler): - """ MultiStep with restarts learning rate scheme. - - Args: - optimizer (torch.nn.optimizer): Torch optimizer. - milestones (list): Iterations that will decrease learning rate. - gamma (float): Decrease ratio. Default: 0.1. - restarts (list): Restart iterations. Default: [0]. - restart_weights (list): Restart weights at each restart iteration. - Default: [1]. - last_epoch (int): Used in _LRScheduler. Default: -1. - """ - - def __init__(self, optimizer, milestones, gamma=0.1, restarts=(0, ), restart_weights=(1, ), last_epoch=-1): - self.milestones = Counter(milestones) - self.gamma = gamma - self.restarts = restarts - self.restart_weights = restart_weights - assert len(self.restarts) == len(self.restart_weights), 'restarts and their weights do not match.' - super(MultiStepRestartLR, self).__init__(optimizer, last_epoch) - - def get_lr(self): - if self.last_epoch in self.restarts: - weight = self.restart_weights[self.restarts.index(self.last_epoch)] - return [group['initial_lr'] * weight for group in self.optimizer.param_groups] - if self.last_epoch not in self.milestones: - return [group['lr'] for group in self.optimizer.param_groups] - return [group['lr'] * self.gamma**self.milestones[self.last_epoch] for group in self.optimizer.param_groups] - - -def get_position_from_periods(iteration, cumulative_period): - """Get the position from a period list. - - It will return the index of the right-closest number in the period list. - For example, the cumulative_period = [100, 200, 300, 400], - if iteration == 50, return 0; - if iteration == 210, return 2; - if iteration == 300, return 2. - - Args: - iteration (int): Current iteration. - cumulative_period (list[int]): Cumulative period list. - - Returns: - int: The position of the right-closest number in the period list. - """ - for i, period in enumerate(cumulative_period): - if iteration <= period: - return i - - -class CosineAnnealingRestartLR(_LRScheduler): - """ Cosine annealing with restarts learning rate scheme. - - An example of config: - periods = [10, 10, 10, 10] - restart_weights = [1, 0.5, 0.5, 0.5] - eta_min=1e-7 - - It has four cycles, each has 10 iterations. At 10th, 20th, 30th, the - scheduler will restart with the weights in restart_weights. - - Args: - optimizer (torch.nn.optimizer): Torch optimizer. - periods (list): Period for each cosine anneling cycle. - restart_weights (list): Restart weights at each restart iteration. - Default: [1]. - eta_min (float): The minimum lr. Default: 0. - last_epoch (int): Used in _LRScheduler. Default: -1. - """ - - def __init__(self, optimizer, periods, restart_weights=(1, ), eta_min=0, last_epoch=-1): - self.periods = periods - self.restart_weights = restart_weights - self.eta_min = eta_min - assert (len(self.periods) == len( - self.restart_weights)), 'periods and restart_weights should have the same length.' - self.cumulative_period = [sum(self.periods[0:i + 1]) for i in range(0, len(self.periods))] - super(CosineAnnealingRestartLR, self).__init__(optimizer, last_epoch) - - def get_lr(self): - idx = get_position_from_periods(self.last_epoch, self.cumulative_period) - current_weight = self.restart_weights[idx] - nearest_restart = 0 if idx == 0 else self.cumulative_period[idx - 1] - current_period = self.periods[idx] - - return [ - self.eta_min + current_weight * 0.5 * (base_lr - self.eta_min) * - (1 + math.cos(math.pi * ((self.last_epoch - nearest_restart) / current_period))) - for base_lr in self.base_lrs - ] diff --git a/spaces/Ikaros521/moe-tts/text/english.py b/spaces/Ikaros521/moe-tts/text/english.py deleted file mode 100644 index 6817392ba8a9eb830351de89fb7afc5ad72f5e42..0000000000000000000000000000000000000000 --- a/spaces/Ikaros521/moe-tts/text/english.py +++ /dev/null @@ -1,188 +0,0 @@ -""" from https://github.com/keithito/tacotron """ - -''' -Cleaners are transformations that run over the input text at both training and eval time. - -Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners" -hyperparameter. Some cleaners are English-specific. You'll typically want to use: - 1. "english_cleaners" for English text - 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using - the Unidecode library (https://pypi.python.org/pypi/Unidecode) - 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update - the symbols in symbols.py to match your data). -''' - - -# Regular expression matching whitespace: - - -import re -import inflect -from unidecode import unidecode -import eng_to_ipa as ipa -_inflect = inflect.engine() -_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])') -_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)') -_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)') -_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)') -_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)') -_number_re = re.compile(r'[0-9]+') - -# List of (regular expression, replacement) pairs for abbreviations: -_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [ - ('mrs', 'misess'), - ('mr', 'mister'), - ('dr', 'doctor'), - ('st', 'saint'), - ('co', 'company'), - ('jr', 'junior'), - ('maj', 'major'), - ('gen', 'general'), - ('drs', 'doctors'), - ('rev', 'reverend'), - ('lt', 'lieutenant'), - ('hon', 'honorable'), - ('sgt', 'sergeant'), - ('capt', 'captain'), - ('esq', 'esquire'), - ('ltd', 'limited'), - ('col', 'colonel'), - ('ft', 'fort'), -]] - - -# List of (ipa, lazy ipa) pairs: -_lazy_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('r', 'ɹ'), - ('æ', 'e'), - ('ɑ', 'a'), - ('ɔ', 'o'), - ('ð', 'z'), - ('θ', 's'), - ('ɛ', 'e'), - ('ɪ', 'i'), - ('ʊ', 'u'), - ('ʒ', 'ʥ'), - ('ʤ', 'ʥ'), - ('ˈ', '↓'), -]] - -# List of (ipa, lazy ipa2) pairs: -_lazy_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('r', 'ɹ'), - ('ð', 'z'), - ('θ', 's'), - ('ʒ', 'ʑ'), - ('ʤ', 'dʑ'), - ('ˈ', '↓'), -]] - -# List of (ipa, ipa2) pairs -_ipa_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('r', 'ɹ'), - ('ʤ', 'dʒ'), - ('ʧ', 'tʃ') -]] - - -def expand_abbreviations(text): - for regex, replacement in _abbreviations: - text = re.sub(regex, replacement, text) - return text - - -def collapse_whitespace(text): - return re.sub(r'\s+', ' ', text) - - -def _remove_commas(m): - return m.group(1).replace(',', '') - - -def _expand_decimal_point(m): - return m.group(1).replace('.', ' point ') - - -def _expand_dollars(m): - match = m.group(1) - parts = match.split('.') - if len(parts) > 2: - return match + ' dollars' # Unexpected format - dollars = int(parts[0]) if parts[0] else 0 - cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0 - if dollars and cents: - dollar_unit = 'dollar' if dollars == 1 else 'dollars' - cent_unit = 'cent' if cents == 1 else 'cents' - return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit) - elif dollars: - dollar_unit = 'dollar' if dollars == 1 else 'dollars' - return '%s %s' % (dollars, dollar_unit) - elif cents: - cent_unit = 'cent' if cents == 1 else 'cents' - return '%s %s' % (cents, cent_unit) - else: - return 'zero dollars' - - -def _expand_ordinal(m): - return _inflect.number_to_words(m.group(0)) - - -def _expand_number(m): - num = int(m.group(0)) - if num > 1000 and num < 3000: - if num == 2000: - return 'two thousand' - elif num > 2000 and num < 2010: - return 'two thousand ' + _inflect.number_to_words(num % 100) - elif num % 100 == 0: - return _inflect.number_to_words(num // 100) + ' hundred' - else: - return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ') - else: - return _inflect.number_to_words(num, andword='') - - -def normalize_numbers(text): - text = re.sub(_comma_number_re, _remove_commas, text) - text = re.sub(_pounds_re, r'\1 pounds', text) - text = re.sub(_dollars_re, _expand_dollars, text) - text = re.sub(_decimal_number_re, _expand_decimal_point, text) - text = re.sub(_ordinal_re, _expand_ordinal, text) - text = re.sub(_number_re, _expand_number, text) - return text - - -def mark_dark_l(text): - return re.sub(r'l([^aeiouæɑɔəɛɪʊ ]*(?: |$))', lambda x: 'ɫ'+x.group(1), text) - - -def english_to_ipa(text): - text = unidecode(text).lower() - text = expand_abbreviations(text) - text = normalize_numbers(text) - phonemes = ipa.convert(text) - phonemes = collapse_whitespace(phonemes) - return phonemes - - -def english_to_lazy_ipa(text): - text = english_to_ipa(text) - for regex, replacement in _lazy_ipa: - text = re.sub(regex, replacement, text) - return text - - -def english_to_ipa2(text): - text = english_to_ipa(text) - text = mark_dark_l(text) - for regex, replacement in _ipa_to_ipa2: - text = re.sub(regex, replacement, text) - return text.replace('...', '…') - - -def english_to_lazy_ipa2(text): - text = english_to_ipa(text) - for regex, replacement in _lazy_ipa2: - text = re.sub(regex, replacement, text) - return text diff --git a/spaces/Irnkvezz/SIC98-GPT2-python-code-generator/README.md b/spaces/Irnkvezz/SIC98-GPT2-python-code-generator/README.md deleted file mode 100644 index fa71574e81452b4c99e4eb227b75faad9bf0a2ca..0000000000000000000000000000000000000000 --- a/spaces/Irnkvezz/SIC98-GPT2-python-code-generator/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: SIC98 GPT2 Python Code Generator -emoji: 🌍 -colorFrom: blue -colorTo: gray -sdk: gradio -sdk_version: 3.18.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/JLD/docker-hello-world/Dockerfile b/spaces/JLD/docker-hello-world/Dockerfile deleted file mode 100644 index 4a5a821629c9a08569f0e83004405a13032cd177..0000000000000000000000000000000000000000 --- a/spaces/JLD/docker-hello-world/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM python:3.9 - -WORKDIR /code - -COPY ./requirements.txt /code/requirements.txt - -RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt - -COPY . . - -CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"] diff --git a/spaces/JUNGU/face-swap/utils/utils.py b/spaces/JUNGU/face-swap/utils/utils.py deleted file mode 100644 index beccaf08edc411529a66d4c11498cd6b43423d0d..0000000000000000000000000000000000000000 --- a/spaces/JUNGU/face-swap/utils/utils.py +++ /dev/null @@ -1,377 +0,0 @@ -import json -from tensorflow.keras.models import model_from_json -from networks.layers import AdaIN, AdaptiveAttention -import tensorflow as tf - -import numpy as np -import cv2 -import math -from skimage import transform as trans -from scipy.signal import convolve2d -from skimage.color import rgb2yuv, yuv2rgb - -from PIL import Image - - -def save_model_internal(model, path, name, num): - json_model = model.to_json() - with open(path + name + '.json', "w") as json_file: - json_file.write(json_model) - - model.save_weights(path + name + '_' + str(num) + '.h5') - - -def load_model_internal(path, name, num): - with open(path + name + '.json', 'r') as json_file: - model_dict = json_file.read() - - mod = model_from_json(model_dict, custom_objects={'AdaIN': AdaIN, 'AdaptiveAttention': AdaptiveAttention}) - mod.load_weights(path + name + '_' + str(num) + '.h5') - - return mod - - -def save_training_meta(state_dict, path, num): - with open(path + str(num) + '.json', 'w') as json_file: - json.dump(state_dict, json_file, indent=2) - - -def load_training_meta(path, num): - with open(path + str(num) + '.json', 'r') as json_file: - state_dict = json.load(json_file) - return state_dict - - -def log_info(sw, results_dict, iteration): - with sw.as_default(): - for key in results_dict.keys(): - tf.summary.scalar(key, results_dict[key], step=iteration) - - -src1 = np.array([[51.642, 50.115], [57.617, 49.990], [35.740, 69.007], - [51.157, 89.050], [57.025, 89.702]], - dtype=np.float32) -# <--left -src2 = np.array([[45.031, 50.118], [65.568, 50.872], [39.677, 68.111], - [45.177, 86.190], [64.246, 86.758]], - dtype=np.float32) - -# ---frontal -src3 = np.array([[39.730, 51.138], [72.270, 51.138], [56.000, 68.493], - [42.463, 87.010], [69.537, 87.010]], - dtype=np.float32) - -# -->right -src4 = np.array([[46.845, 50.872], [67.382, 50.118], [72.737, 68.111], - [48.167, 86.758], [67.236, 86.190]], - dtype=np.float32) - -# -->right profile -src5 = np.array([[54.796, 49.990], [60.771, 50.115], [76.673, 69.007], - [55.388, 89.702], [61.257, 89.050]], - dtype=np.float32) - -src = np.array([src1, src2, src3, src4, src5]) -src_map = {112: src, 224: src * 2} - -# Left eye, right eye, nose, left mouth, right mouth -arcface_src = np.array( - [[38.2946, 51.6963], [73.5318, 51.5014], [56.0252, 71.7366], - [41.5493, 92.3655], [70.7299, 92.2041]], - dtype=np.float32) - -arcface_src = np.expand_dims(arcface_src, axis=0) - - -def extract_face(img, bb, absolute_center, mode='arcface', extention_rate=0.05, debug=False): - """Extract face from image given a bounding box""" - # bbox - x1, y1, x2, y2 = bb + 60 - adjusted_absolute_center = (absolute_center[0] + 60, absolute_center[1] + 60) - if debug: - print(bb + 60) - x1, y1, x2, y2 = bb - cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 3) - cv2.circle(img, absolute_center, 1, (255, 0, 255), 2) - Image.fromarray(img).show() - x1, y1, x2, y2 = bb + 60 - # Pad image in case face is out of frame - padded_img = np.zeros(shape=(248, 248, 3), dtype=np.uint8) - padded_img[60:-60, 60:-60, :] = img - - if debug: - cv2.rectangle(padded_img, (x1, y1), (x2, y2), (0, 255, 255), 3) - cv2.circle(padded_img, adjusted_absolute_center, 1, (255, 255, 255), 2) - Image.fromarray(padded_img).show() - - y_len = abs(y1 - y2) - x_len = abs(x1 - x2) - - new_len = (y_len + x_len) // 2 - - extension = int(new_len * extention_rate) - - x_adjust = (x_len - new_len) // 2 - y_adjust = (y_len - new_len) // 2 - - x_1_adjusted = x1 + x_adjust - extension - x_2_adjusted = x2 - x_adjust + extension - - if mode == 'arcface': - y_1_adjusted = y1 - extension - y_2_adjusted = y2 - 2 * y_adjust + extension - else: - y_1_adjusted = y1 + 2 * y_adjust - extension - y_2_adjusted = y2 + extension - - move_x = adjusted_absolute_center[0] - (x_1_adjusted + x_2_adjusted) // 2 - move_y = adjusted_absolute_center[1] - (y_1_adjusted + y_2_adjusted) // 2 - - x_1_adjusted = x_1_adjusted + move_x - x_2_adjusted = x_2_adjusted + move_x - y_1_adjusted = y_1_adjusted + move_y - y_2_adjusted = y_2_adjusted + move_y - - # print(y_1_adjusted, y_2_adjusted, x_1_adjusted, x_2_adjusted) - - return padded_img[y_1_adjusted:y_2_adjusted, x_1_adjusted:x_2_adjusted] - - -def distance(a, b): - return np.sqrt((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2) - - -def euclidean_distance(a, b): - x1 = a[0]; y1 = a[1] - x2 = b[0]; y2 = b[1] - return np.sqrt(((x2 - x1) * (x2 - x1)) + ((y2 - y1) * (y2 - y1))) - - -def align_face(img, landmarks, debug=False): - nose, right_eye, left_eye = landmarks - - left_eye_x = left_eye[0] - left_eye_y = left_eye[1] - - right_eye_x = right_eye[0] - right_eye_y = right_eye[1] - - center_eye = ((left_eye[0] + right_eye[0]) // 2, (left_eye[1] + right_eye[1]) // 2) - - if left_eye_y < right_eye_y: - point_3rd = (right_eye_x, left_eye_y) - direction = -1 - else: - point_3rd = (left_eye_x, right_eye_y) - direction = 1 - - if debug: - cv2.circle(img, point_3rd, 1, (255, 0, 0), 1) - cv2.circle(img, center_eye, 1, (255, 0, 0), 1) - - cv2.line(img, right_eye, left_eye, (0, 0, 0), 1) - cv2.line(img, left_eye, point_3rd, (0, 0, 0), 1) - cv2.line(img, right_eye, point_3rd, (0, 0, 0), 1) - - a = euclidean_distance(left_eye, point_3rd) - b = euclidean_distance(right_eye, left_eye) - c = euclidean_distance(right_eye, point_3rd) - - cos_a = (b * b + c * c - a * a) / (2 * b * c) - - angle = np.arccos(cos_a) - - angle = (angle * 180) / np.pi - - if direction == -1: - angle = 90 - angle - ang = math.radians(direction * angle) - else: - ang = math.radians(direction * angle) - angle = 0 - angle - - M = cv2.getRotationMatrix2D((64, 64), angle, 1) - new_img = cv2.warpAffine(img, M, (128, 128), - flags=cv2.INTER_CUBIC) - - rotated_nose = (int((nose[0] - 64) * np.cos(ang) - (nose[1] - 64) * np.sin(ang) + 64), - int((nose[0] - 64) * np.sin(ang) + (nose[1] - 64) * np.cos(ang) + 64)) - - rotated_center_eye = (int((center_eye[0] - 64) * np.cos(ang) - (center_eye[1] - 64) * np.sin(ang) + 64), - int((center_eye[0] - 64) * np.sin(ang) + (center_eye[1] - 64) * np.cos(ang) + 64)) - - abolute_center = (rotated_center_eye[0], (rotated_nose[1] + rotated_center_eye[1]) // 2) - - if debug: - cv2.circle(new_img, rotated_nose, 1, (0, 0, 255), 1) - cv2.circle(new_img, rotated_center_eye, 1, (0, 0, 255), 1) - cv2.circle(new_img, abolute_center, 1, (0, 0, 255), 1) - - return new_img, abolute_center - - -def estimate_norm(lmk, image_size=112, mode='arcface', shrink_factor=1.0): - assert lmk.shape == (5, 2) - tform = trans.SimilarityTransform() - lmk_tran = np.insert(lmk, 2, values=np.ones(5), axis=1) - min_M = [] - min_index = [] - min_error = float('inf') - src_factor = image_size / 112 - if mode == 'arcface': - src = arcface_src * shrink_factor + (1 - shrink_factor) * 56 - src = src * src_factor - else: - src = src_map[image_size] * src_factor - for i in np.arange(src.shape[0]): - tform.estimate(lmk, src[i]) - M = tform.params[0:2, :] - results = np.dot(M, lmk_tran.T) - results = results.T - error = np.sum(np.sqrt(np.sum((results - src[i])**2, axis=1))) - # print(error) - if error < min_error: - min_error = error - min_M = M - min_index = i - return min_M, min_index - - -def inverse_estimate_norm(lmk, t_lmk, image_size=112, mode='arcface', shrink_factor=1.0): - assert lmk.shape == (5, 2) - tform = trans.SimilarityTransform() - lmk_tran = np.insert(lmk, 2, values=np.ones(5), axis=1) - min_M = [] - min_index = [] - min_error = float('inf') - src_factor = image_size / 112 - if mode == 'arcface': - src = arcface_src * shrink_factor + (1 - shrink_factor) * 56 - src = src * src_factor - else: - src = src_map[image_size] * src_factor - for i in np.arange(src.shape[0]): - tform.estimate(t_lmk, lmk) - M = tform.params[0:2, :] - results = np.dot(M, lmk_tran.T) - results = results.T - error = np.sum(np.sqrt(np.sum((results - src[i])**2, axis=1))) - # print(error) - if error < min_error: - min_error = error - min_M = M - min_index = i - return min_M, min_index - - -def norm_crop(img, landmark, image_size=112, mode='arcface', shrink_factor=1.0): - """ - Align and crop the image based of the facial landmarks in the image. The alignment is done with - a similarity transformation based of source coordinates. - :param img: Image to transform. - :param landmark: Five landmark coordinates in the image. - :param image_size: Desired output size after transformation. - :param mode: 'arcface' aligns the face for the use of Arcface facial recognition model. Useful for - both facial recognition tasks and face swapping tasks. - :param shrink_factor: Shrink factor that shrinks the source landmark coordinates. This will include more border - information around the face. Useful when you want to include more background information when performing face swaps. - The lower the shrink factor the more of the face is included. Default value 1.0 will align the image to be ready - for the Arcface recognition model, but usually omits part of the chin. Value of 0.0 would transform all source points - to the middle of the image, probably rendering the alignment procedure useless. - - If you process the image with a shrink factor of 0.85 and then want to extract the identity embedding with arcface, - you simply do a central crop of factor 0.85 to yield same cropped result as using shrink factor 1.0. This will - reduce the resolution, the recommendation is to processed images to output resolutions higher than 112 is using - Arcface. This will make sure no information is lost by resampling the image after central crop. - :return: Returns the transformed image. - """ - M, pose_index = estimate_norm(landmark, image_size, mode, shrink_factor=shrink_factor) - warped = cv2.warpAffine(img, M, (image_size, image_size), borderValue=0.0) - return warped - - -def transform_landmark_points(M, points): - lmk_tran = np.insert(points, 2, values=np.ones(5), axis=1) - transformed_lmk = np.dot(M, lmk_tran.T) - transformed_lmk = transformed_lmk.T - - return transformed_lmk - - -def multi_convolver(image, kernel, iterations): - if kernel == "Sharpen": - kernel = np.array([[0, -1, 0], - [-1, 5, -1], - [0, -1, 0]]) - elif kernel == "Unsharp_mask": - kernel = np.array([[1, 4, 6, 4, 1], - [4, 16, 24, 16, 1], - [6, 24, -476, 24, 1], - [4, 16, 24, 16, 1], - [1, 4, 6, 4, 1]]) * (-1 / 256) - elif kernel == "Blur": - kernel = (1 / 16.0) * np.array([[1., 2., 1.], - [2., 4., 2.], - [1., 2., 1.]]) - for i in range(iterations): - image = convolve2d(image, kernel, 'same', boundary='fill', fillvalue = 0) - return image - - -def convolve_rgb(image, kernel, iterations=1): - img_yuv = rgb2yuv(image) - img_yuv[:, :, 0] = multi_convolver(img_yuv[:, :, 0], kernel, - iterations) - final_image = yuv2rgb(img_yuv) - - return final_image.astype('float32') - - -def generate_mask_from_landmarks(lms, im_size): - blend_mask_lm = np.zeros(shape=(im_size, im_size, 3), dtype='float32') - - # EYES - blend_mask_lm = cv2.circle(blend_mask_lm, - (int(lms[0][0]), int(lms[0][1])), 12, (255, 255, 255), 30) - blend_mask_lm = cv2.circle(blend_mask_lm, - (int(lms[1][0]), int(lms[1][1])), 12, (255, 255, 255), 30) - blend_mask_lm = cv2.circle(blend_mask_lm, - (int((lms[0][0] + lms[1][0]) / 2), int((lms[0][1] + lms[1][1]) / 2)), - 16, (255, 255, 255), 65) - - # NOSE - blend_mask_lm = cv2.circle(blend_mask_lm, - (int(lms[2][0]), int(lms[2][1])), 5, (255, 255, 255), 5) - blend_mask_lm = cv2.circle(blend_mask_lm, - (int((lms[0][0] + lms[1][0]) / 2), int(lms[2][1])), 16, (255, 255, 255), 100) - - # MOUTH - blend_mask_lm = cv2.circle(blend_mask_lm, - (int(lms[3][0]), int(lms[3][1])), 6, (255, 255, 255), 30) - blend_mask_lm = cv2.circle(blend_mask_lm, - (int(lms[4][0]), int(lms[4][1])), 6, (255, 255, 255), 30) - - blend_mask_lm = cv2.circle(blend_mask_lm, - (int((lms[3][0] + lms[4][0]) / 2), int((lms[3][1] + lms[4][1]) / 2)), - 16, (255, 255, 255), 40) - return blend_mask_lm - - -def display_distance_text(im, distance, lms, im_w, im_h, scale=2): - blended_insert = cv2.putText(im, str(distance)[:4], - (int(lms[4] * im_w * 0.5), int(lms[5] * im_h * 0.8)), - cv2.FONT_HERSHEY_SIMPLEX, scale * 0.5, (0.08, 0.16, 0.08), int(scale * 2)) - blended_insert = cv2.putText(blended_insert, str(distance)[:4], - (int(lms[4] * im_w * 0.5), int(lms[5] * im_h * 0.8)), - cv2.FONT_HERSHEY_SIMPLEX, scale* 0.5, (0.3, 0.7, 0.32), int(scale * 1)) - return blended_insert - - -def get_lm(annotation, im_w, im_h): - lm_align = np.array([[annotation[4] * im_w, annotation[5] * im_h], - [annotation[6] * im_w, annotation[7] * im_h], - [annotation[8] * im_w, annotation[9] * im_h], - [annotation[10] * im_w, annotation[11] * im_h], - [annotation[12] * im_w, annotation[13] * im_h]], - dtype=np.float32) - return lm_align diff --git a/spaces/JammyMachina/the-jam-machine-app/playback.py b/spaces/JammyMachina/the-jam-machine-app/playback.py deleted file mode 100644 index b11503a5292a84bb33614f3e02cb24e4ffdc435a..0000000000000000000000000000000000000000 --- a/spaces/JammyMachina/the-jam-machine-app/playback.py +++ /dev/null @@ -1,36 +0,0 @@ -import matplotlib.pyplot as plt -import librosa.display -from pretty_midi import PrettyMIDI - - -# Note: these functions are meant to be played within an interactive Python shell -# Please refer to the synth.ipynb for an example of how to use them - - -def get_music(midi_file): - """ - Load a midi file and return the PrettyMIDI object and the audio signal - """ - print(f"Getting MIDI music from: {midi_file}") - music = PrettyMIDI(midi_file=midi_file) - waveform = music.fluidsynth() - return music, waveform - - -def show_piano_roll(music_notes, fs=100): - """ - Show the piano roll of a music piece, with all instruments squashed onto a single 128xN matrix - :param music_notes: PrettyMIDI object - :param fs: sampling frequency - """ - # get the piano roll - piano_roll = music_notes.get_piano_roll(fs) - print("Piano roll shape: {}".format(piano_roll.shape)) - - # plot the piano roll - plt.figure(figsize=(12, 4)) - librosa.display.specshow(piano_roll, sr=100, x_axis="time", y_axis="cqt_note") - plt.colorbar() - plt.title("Piano roll") - plt.tight_layout() - plt.show() diff --git a/spaces/Jeff2323/ai-comic-factory/src/components/ui/avatar.tsx b/spaces/Jeff2323/ai-comic-factory/src/components/ui/avatar.tsx deleted file mode 100644 index 88aeea9d9368f2bd7385f0a0885829bf6d789492..0000000000000000000000000000000000000000 --- a/spaces/Jeff2323/ai-comic-factory/src/components/ui/avatar.tsx +++ /dev/null @@ -1,50 +0,0 @@ -"use client" - -import * as React from "react" -import * as AvatarPrimitive from "@radix-ui/react-avatar" - -import { cn } from "@/lib/utils" - -const Avatar = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -Avatar.displayName = AvatarPrimitive.Root.displayName - -const AvatarImage = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -AvatarImage.displayName = AvatarPrimitive.Image.displayName - -const AvatarFallback = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -AvatarFallback.displayName = AvatarPrimitive.Fallback.displayName - -export { Avatar, AvatarImage, AvatarFallback } diff --git a/spaces/JeffJing/ZookChatBot/OpenAIAuth/OpenAIAuth.py b/spaces/JeffJing/ZookChatBot/OpenAIAuth/OpenAIAuth.py deleted file mode 100644 index c86e5d0fb3103c458b9b5df3aad69ef249117f18..0000000000000000000000000000000000000000 --- a/spaces/JeffJing/ZookChatBot/OpenAIAuth/OpenAIAuth.py +++ /dev/null @@ -1,359 +0,0 @@ -# Credits to github.com/rawandahmad698/PyChatGPT -import re -import urllib - -import tls_client - - -class Debugger: - def __init__(self, debug: bool = False): - if debug: - print("Debugger enabled on OpenAIAuth") - self.debug = debug - - def set_debug(self, debug: bool): - self.debug = debug - - def log(self, message: str, end: str = "\n"): - if self.debug: - print(message, end=end) - - -class OpenAIAuth: - def __init__( - self, - email_address: str, - password: str, - proxy: str = None, - debug: bool = False, - ): - self.session_token = None - self.email_address = email_address - self.password = password - self.proxy = proxy - self.session = tls_client.Session( - client_identifier="chrome_109", - ) - self.access_token: str = None - self.debugger = Debugger(debug) - self.user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36" - - @staticmethod - def url_encode(string: str) -> str: - """ - URL encode a string - :param string: - :return: - """ - return urllib.parse.quote(string) - - def begin(self) -> None: - """ - Begin the auth process - """ - self.debugger.log("Beginning auth process") - if not self.email_address or not self.password: - return - - if self.proxy: - proxies = { - "http": self.proxy, - "https": self.proxy, - } - self.session.proxies = proxies - - # First, make a request to https://explorer.api.openai.com/auth/login - url = "https://explorer.api.openai.com/" - headers = { - "Host": "ask.openai.com", - "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", - "User-Agent": self.user_agent, - "Accept-Language": "en-GB,en-US;q=0.9,en;q=0.8", - "Accept-Encoding": "gzip, deflate, br", - "Connection": "keep-alive", - } - - response = self.session.get( - url=url, - headers=headers, - ) - if response.status_code == 200: - self.__part_two() - else: - self.debugger.log("Error in part one") - self.debugger.log("Response: ", end="") - self.debugger.log(response.text) - self.debugger.log("Status code: ", end="") - self.debugger.log(response.status_code) - raise Exception("API error") - - def __part_two(self) -> None: - """ - In part two, We make a request to https://explorer.api.openai.com/api/auth/csrf and grab a fresh csrf token - """ - self.debugger.log("Beginning part two") - - url = "https://explorer.api.openai.com/api/auth/csrf" - headers = { - "Host": "ask.openai.com", - "Accept": "*/*", - "Connection": "keep-alive", - "User-Agent": self.user_agent, - "Accept-Language": "en-GB,en-US;q=0.9,en;q=0.8", - "Referer": "https://explorer.api.openai.com/auth/login", - "Accept-Encoding": "gzip, deflate, br", - } - response = self.session.get( - url=url, - headers=headers, - ) - if response.status_code == 200 and "json" in response.headers["Content-Type"]: - csrf_token = response.json()["csrfToken"] - self.__part_three(token=csrf_token) - else: - self.debugger.log("Error in part two") - self.debugger.log("Response: ", end="") - self.debugger.log(response.text) - self.debugger.log("Status code: ", end="") - self.debugger.log(response.status_code) - raise Exception("Error logging in") - - def __part_three(self, token: str) -> None: - """ - We reuse the token from part to make a request to /api/auth/signin/auth0?prompt=login - """ - self.debugger.log("Beginning part three") - url = "https://explorer.api.openai.com/api/auth/signin/auth0?prompt=login" - payload = f"callbackUrl=%2F&csrfToken={token}&json=true" - headers = { - "Host": "explorer.api.openai.com", - "User-Agent": self.user_agent, - "Content-Type": "application/x-www-form-urlencoded", - "Accept": "*/*", - "Sec-Gpc": "1", - "Accept-Language": "en-US,en;q=0.8", - "Origin": "https://explorer.api.openai.com", - "Sec-Fetch-Site": "same-origin", - "Sec-Fetch-Mode": "cors", - "Sec-Fetch-Dest": "empty", - "Referer": "https://explorer.api.openai.com/auth/login", - "Accept-Encoding": "gzip, deflate", - } - self.debugger.log("Payload: " + payload) - self.debugger.log("Payload length: " + str(len(payload))) - response = self.session.post(url=url, headers=headers, data=payload) - if response.status_code == 200 and "json" in response.headers["Content-Type"]: - url = response.json()["url"] - if ( - url - == "https://explorer.api.openai.com/api/auth/error?error=OAuthSignin" - or "error" in url - ): - self.debugger.log("You have been rate limited") - raise Exception("You have been rate limited.") - self.__part_four(url=url) - else: - self.debugger.log("Error in part three") - self.debugger.log("Response: ", end="") - self.debugger.log("Status code: ", end="") - self.debugger.log(response.status_code) - self.debugger.log(response.headers) - self.debugger.log(self.session.cookies.get_dict()) - raise Exception("Unknown error") - - def __part_four(self, url: str) -> None: - """ - We make a GET request to url - :param url: - :return: - """ - self.debugger.log("Beginning part four") - headers = { - "Host": "auth0.openai.com", - "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", - "Connection": "keep-alive", - "User-Agent": self.user_agent, - "Accept-Language": "en-US,en;q=0.9", - "Referer": "https://explorer.api.openai.com/", - } - response = self.session.get( - url=url, - headers=headers, - ) - if response.status_code == 302: - try: - state = re.findall(r"state=(.*)", response.text)[0] - state = state.split('"')[0] - self.__part_five(state=state) - except IndexError as exc: - self.debugger.log("Error in part four") - self.debugger.log("Status code: ", end="") - self.debugger.log(response.status_code) - self.debugger.log("Rate limit hit") - self.debugger.log("Response: " + str(response.text)) - raise Exception("Rate limit hit") from exc - else: - self.debugger.log("Error in part four") - self.debugger.log("Response: ", end="") - self.debugger.log(response.text) - self.debugger.log("Status code: ", end="") - self.debugger.log(response.status_code) - self.debugger.log("Wrong response code") - raise Exception("Unknown error") - - def __part_five(self, state: str) -> None: - """ - We use the state to get the login page & check for a captcha - """ - self.debugger.log("Beginning part five") - url = f"https://auth0.openai.com/u/login/identifier?state={state}" - - headers = { - "Host": "auth0.openai.com", - "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", - "Connection": "keep-alive", - "User-Agent": self.user_agent, - "Accept-Language": "en-US,en;q=0.9", - "Referer": "https://explorer.api.openai.com/", - } - response = self.session.get(url, headers=headers) - if response.status_code == 200: - self.__part_six(state=state) - else: - self.debugger.log("Error in part five") - self.debugger.log("Response: ", end="") - self.debugger.log(response.text) - self.debugger.log("Status code: ", end="") - self.debugger.log(response.status_code) - raise ValueError("Invalid response code") - - def __part_six(self, state: str) -> None: - """ - We make a POST request to the login page with the captcha, email - :param state: - :return: - """ - self.debugger.log("Beginning part six") - url = f"https://auth0.openai.com/u/login/identifier?state={state}" - email_url_encoded = self.url_encode(self.email_address) - - payload = ( - f"state={state}&username={email_url_encoded}&js-available=false&webauthn-available=true&is" - f"-brave=false&webauthn-platform-available=true&action=default " - ) - - headers = { - "Host": "auth0.openai.com", - "Origin": "https://auth0.openai.com", - "Connection": "keep-alive", - "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", - "User-Agent": self.user_agent, - "Referer": f"https://auth0.openai.com/u/login/identifier?state={state}", - "Accept-Language": "en-US,en;q=0.9", - "Content-Type": "application/x-www-form-urlencoded", - } - response = self.session.post( - url, - headers=headers, - data=payload, - ) - if response.status_code == 302: - self.__part_seven(state=state) - else: - self.debugger.log("Error in part six") - self.debugger.log("Response: ", end="") - self.debugger.log(response.text) - self.debugger.log("Status code: ", end="") - self.debugger.log(response.status_code) - raise Exception("Unknown error") - - def __part_seven(self, state: str) -> None: - """ - We enter the password - :param state: - :return: - """ - url = f"https://auth0.openai.com/u/login/password?state={state}" - self.debugger.log("Beginning part seven") - email_url_encoded = self.url_encode(self.email_address) - password_url_encoded = self.url_encode(self.password) - payload = f"state={state}&username={email_url_encoded}&password={password_url_encoded}&action=default" - headers = { - "Host": "auth0.openai.com", - "Origin": "https://auth0.openai.com", - "Connection": "keep-alive", - "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", - "User-Agent": self.user_agent, - "Referer": f"https://auth0.openai.com/u/login/password?state={state}", - "Accept-Language": "en-US,en;q=0.9", - "Content-Type": "application/x-www-form-urlencoded", - } - try: - response = self.session.post( - url, - headers=headers, - data=payload, - ) - self.debugger.log("Request went through") - except Exception as exc: - self.debugger.log("Error in part seven") - self.debugger.log("Exception: ", end="") - self.debugger.log(exc) - raise Exception("Could not get response") from exc - if response.status_code == 302: - self.debugger.log("Response code is 302") - try: - new_state = re.findall(r"state=(.*)", response.text)[0] - new_state = new_state.split('"')[0] - self.debugger.log("New state found") - self.__part_eight(old_state=state, new_state=new_state) - except Exception as exc: - raise Exception("Could not find new state") from exc - else: - self.debugger.log("Error in part seven") - self.debugger.log("Status code: ", end="") - self.debugger.log(response.status_code) - raise Exception("Wrong status code") - - def __part_eight(self, old_state: str, new_state) -> None: - self.debugger.log("Beginning part eight") - url = f"https://auth0.openai.com/authorize/resume?state={new_state}" - headers = { - "Host": "auth0.openai.com", - "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", - "Connection": "keep-alive", - "User-Agent": self.user_agent, - "Accept-Language": "en-GB,en-US;q=0.9,en;q=0.8", - "Referer": f"https://auth0.openai.com/u/login/password?state={old_state}", - } - response = self.session.get( - url, - headers=headers, - allow_redirects=True, - ) - if response.status_code == 200: - self.session_token = response.cookies.get_dict()[ - "__Secure-next-auth.session-token" - ] - self.get_access_token() - - def get_access_token(self): - """ - Gets access token - """ - self.session.cookies.set( - "__Secure-next-auth.session-token", - self.session_token, - ) - response = self.session.get( - "https://explorer.api.openai.com/api/auth/session", - ) - if response.status_code == 200: - self.access_token = response.json()["accessToken"] - self.debugger.log("Access token found") - return self.access_token - else: - self.debugger.log("Error in part nine") - self.debugger.log("Status code: ", end="") - self.debugger.log(response.status_code) - raise Exception("Wrong status code") diff --git a/spaces/Kangarroar/ApplioRVC-Inference/diffq/__init__.py b/spaces/Kangarroar/ApplioRVC-Inference/diffq/__init__.py deleted file mode 100644 index 2b997ee4ed99a90cc43db7812383927e6fe1a3e8..0000000000000000000000000000000000000000 --- a/spaces/Kangarroar/ApplioRVC-Inference/diffq/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -# flake8: noqa -""" -This package implements different quantization strategies: - -- `diffq.uniform.UniformQuantizer`: classic uniform quantization over n bits. -- `diffq.diffq.DiffQuantizer`: differentiable quantizer based on scaled noise injection. - -Also, do check `diffq.base.BaseQuantizer` for the common methods of all Quantizers. -""" - -from .uniform import UniformQuantizer -from .diffq import DiffQuantizer diff --git a/spaces/Kangarroar/ApplioRVC-Inference/train/data_utils.py b/spaces/Kangarroar/ApplioRVC-Inference/train/data_utils.py deleted file mode 100644 index 71c0eff1815469a52399dc90a093a2f8a29223eb..0000000000000000000000000000000000000000 --- a/spaces/Kangarroar/ApplioRVC-Inference/train/data_utils.py +++ /dev/null @@ -1,512 +0,0 @@ -import os, traceback -import numpy as np -import torch -import torch.utils.data - -from mel_processing import spectrogram_torch -from utils import load_wav_to_torch, load_filepaths_and_text - - -class TextAudioLoaderMultiNSFsid(torch.utils.data.Dataset): - """ - 1) loads audio, text pairs - 2) normalizes text and converts them to sequences of integers - 3) computes spectrograms from audio files. - """ - - def __init__(self, audiopaths_and_text, hparams): - self.audiopaths_and_text = load_filepaths_and_text(audiopaths_and_text) - self.max_wav_value = hparams.max_wav_value - self.sampling_rate = hparams.sampling_rate - self.filter_length = hparams.filter_length - self.hop_length = hparams.hop_length - self.win_length = hparams.win_length - self.sampling_rate = hparams.sampling_rate - self.min_text_len = getattr(hparams, "min_text_len", 1) - self.max_text_len = getattr(hparams, "max_text_len", 5000) - self._filter() - - def _filter(self): - """ - Filter text & store spec lengths - """ - # Store spectrogram lengths for Bucketing - # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2) - # spec_length = wav_length // hop_length - audiopaths_and_text_new = [] - lengths = [] - for audiopath, text, pitch, pitchf, dv in self.audiopaths_and_text: - if self.min_text_len <= len(text) and len(text) <= self.max_text_len: - audiopaths_and_text_new.append([audiopath, text, pitch, pitchf, dv]) - lengths.append(os.path.getsize(audiopath) // (3 * self.hop_length)) - self.audiopaths_and_text = audiopaths_and_text_new - self.lengths = lengths - - def get_sid(self, sid): - sid = torch.LongTensor([int(sid)]) - return sid - - def get_audio_text_pair(self, audiopath_and_text): - # separate filename and text - file = audiopath_and_text[0] - phone = audiopath_and_text[1] - pitch = audiopath_and_text[2] - pitchf = audiopath_and_text[3] - dv = audiopath_and_text[4] - - phone, pitch, pitchf = self.get_labels(phone, pitch, pitchf) - spec, wav = self.get_audio(file) - dv = self.get_sid(dv) - - len_phone = phone.size()[0] - len_spec = spec.size()[-1] - # print(123,phone.shape,pitch.shape,spec.shape) - if len_phone != len_spec: - len_min = min(len_phone, len_spec) - # amor - len_wav = len_min * self.hop_length - - spec = spec[:, :len_min] - wav = wav[:, :len_wav] - - phone = phone[:len_min, :] - pitch = pitch[:len_min] - pitchf = pitchf[:len_min] - - return (spec, wav, phone, pitch, pitchf, dv) - - def get_labels(self, phone, pitch, pitchf): - phone = np.load(phone) - phone = np.repeat(phone, 2, axis=0) - pitch = np.load(pitch) - pitchf = np.load(pitchf) - n_num = min(phone.shape[0], 900) # DistributedBucketSampler - # print(234,phone.shape,pitch.shape) - phone = phone[:n_num, :] - pitch = pitch[:n_num] - pitchf = pitchf[:n_num] - phone = torch.FloatTensor(phone) - pitch = torch.LongTensor(pitch) - pitchf = torch.FloatTensor(pitchf) - return phone, pitch, pitchf - - def get_audio(self, filename): - audio, sampling_rate = load_wav_to_torch(filename) - if sampling_rate != self.sampling_rate: - raise ValueError( - "{} SR doesn't match target {} SR".format( - sampling_rate, self.sampling_rate - ) - ) - audio_norm = audio - # audio_norm = audio / self.max_wav_value - # audio_norm = audio / np.abs(audio).max() - - audio_norm = audio_norm.unsqueeze(0) - spec_filename = filename.replace(".wav", ".spec.pt") - if os.path.exists(spec_filename): - try: - spec = torch.load(spec_filename) - except: - print(spec_filename, traceback.format_exc()) - spec = spectrogram_torch( - audio_norm, - self.filter_length, - self.sampling_rate, - self.hop_length, - self.win_length, - center=False, - ) - spec = torch.squeeze(spec, 0) - torch.save(spec, spec_filename, _use_new_zipfile_serialization=False) - else: - spec = spectrogram_torch( - audio_norm, - self.filter_length, - self.sampling_rate, - self.hop_length, - self.win_length, - center=False, - ) - spec = torch.squeeze(spec, 0) - torch.save(spec, spec_filename, _use_new_zipfile_serialization=False) - return spec, audio_norm - - def __getitem__(self, index): - return self.get_audio_text_pair(self.audiopaths_and_text[index]) - - def __len__(self): - return len(self.audiopaths_and_text) - - -class TextAudioCollateMultiNSFsid: - """Zero-pads model inputs and targets""" - - def __init__(self, return_ids=False): - self.return_ids = return_ids - - def __call__(self, batch): - """Collate's training batch from normalized text and aduio - PARAMS - ------ - batch: [text_normalized, spec_normalized, wav_normalized] - """ - # Right zero-pad all one-hot text sequences to max input length - _, ids_sorted_decreasing = torch.sort( - torch.LongTensor([x[0].size(1) for x in batch]), dim=0, descending=True - ) - - max_spec_len = max([x[0].size(1) for x in batch]) - max_wave_len = max([x[1].size(1) for x in batch]) - spec_lengths = torch.LongTensor(len(batch)) - wave_lengths = torch.LongTensor(len(batch)) - spec_padded = torch.FloatTensor(len(batch), batch[0][0].size(0), max_spec_len) - wave_padded = torch.FloatTensor(len(batch), 1, max_wave_len) - spec_padded.zero_() - wave_padded.zero_() - - max_phone_len = max([x[2].size(0) for x in batch]) - phone_lengths = torch.LongTensor(len(batch)) - phone_padded = torch.FloatTensor( - len(batch), max_phone_len, batch[0][2].shape[1] - ) # (spec, wav, phone, pitch) - pitch_padded = torch.LongTensor(len(batch), max_phone_len) - pitchf_padded = torch.FloatTensor(len(batch), max_phone_len) - phone_padded.zero_() - pitch_padded.zero_() - pitchf_padded.zero_() - # dv = torch.FloatTensor(len(batch), 256)#gin=256 - sid = torch.LongTensor(len(batch)) - - for i in range(len(ids_sorted_decreasing)): - row = batch[ids_sorted_decreasing[i]] - - spec = row[0] - spec_padded[i, :, : spec.size(1)] = spec - spec_lengths[i] = spec.size(1) - - wave = row[1] - wave_padded[i, :, : wave.size(1)] = wave - wave_lengths[i] = wave.size(1) - - phone = row[2] - phone_padded[i, : phone.size(0), :] = phone - phone_lengths[i] = phone.size(0) - - pitch = row[3] - pitch_padded[i, : pitch.size(0)] = pitch - pitchf = row[4] - pitchf_padded[i, : pitchf.size(0)] = pitchf - - # dv[i] = row[5] - sid[i] = row[5] - - return ( - phone_padded, - phone_lengths, - pitch_padded, - pitchf_padded, - spec_padded, - spec_lengths, - wave_padded, - wave_lengths, - # dv - sid, - ) - - -class TextAudioLoader(torch.utils.data.Dataset): - """ - 1) loads audio, text pairs - 2) normalizes text and converts them to sequences of integers - 3) computes spectrograms from audio files. - """ - - def __init__(self, audiopaths_and_text, hparams): - self.audiopaths_and_text = load_filepaths_and_text(audiopaths_and_text) - self.max_wav_value = hparams.max_wav_value - self.sampling_rate = hparams.sampling_rate - self.filter_length = hparams.filter_length - self.hop_length = hparams.hop_length - self.win_length = hparams.win_length - self.sampling_rate = hparams.sampling_rate - self.min_text_len = getattr(hparams, "min_text_len", 1) - self.max_text_len = getattr(hparams, "max_text_len", 5000) - self._filter() - - def _filter(self): - """ - Filter text & store spec lengths - """ - # Store spectrogram lengths for Bucketing - # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2) - # spec_length = wav_length // hop_length - audiopaths_and_text_new = [] - lengths = [] - for audiopath, text, dv in self.audiopaths_and_text: - if self.min_text_len <= len(text) and len(text) <= self.max_text_len: - audiopaths_and_text_new.append([audiopath, text, dv]) - lengths.append(os.path.getsize(audiopath) // (3 * self.hop_length)) - self.audiopaths_and_text = audiopaths_and_text_new - self.lengths = lengths - - def get_sid(self, sid): - sid = torch.LongTensor([int(sid)]) - return sid - - def get_audio_text_pair(self, audiopath_and_text): - # separate filename and text - file = audiopath_and_text[0] - phone = audiopath_and_text[1] - dv = audiopath_and_text[2] - - phone = self.get_labels(phone) - spec, wav = self.get_audio(file) - dv = self.get_sid(dv) - - len_phone = phone.size()[0] - len_spec = spec.size()[-1] - if len_phone != len_spec: - len_min = min(len_phone, len_spec) - len_wav = len_min * self.hop_length - spec = spec[:, :len_min] - wav = wav[:, :len_wav] - phone = phone[:len_min, :] - return (spec, wav, phone, dv) - - def get_labels(self, phone): - phone = np.load(phone) - phone = np.repeat(phone, 2, axis=0) - n_num = min(phone.shape[0], 900) # DistributedBucketSampler - phone = phone[:n_num, :] - phone = torch.FloatTensor(phone) - return phone - - def get_audio(self, filename): - audio, sampling_rate = load_wav_to_torch(filename) - if sampling_rate != self.sampling_rate: - raise ValueError( - "{} SR doesn't match target {} SR".format( - sampling_rate, self.sampling_rate - ) - ) - audio_norm = audio - # audio_norm = audio / self.max_wav_value - # audio_norm = audio / np.abs(audio).max() - - audio_norm = audio_norm.unsqueeze(0) - spec_filename = filename.replace(".wav", ".spec.pt") - if os.path.exists(spec_filename): - try: - spec = torch.load(spec_filename) - except: - print(spec_filename, traceback.format_exc()) - spec = spectrogram_torch( - audio_norm, - self.filter_length, - self.sampling_rate, - self.hop_length, - self.win_length, - center=False, - ) - spec = torch.squeeze(spec, 0) - torch.save(spec, spec_filename, _use_new_zipfile_serialization=False) - else: - spec = spectrogram_torch( - audio_norm, - self.filter_length, - self.sampling_rate, - self.hop_length, - self.win_length, - center=False, - ) - spec = torch.squeeze(spec, 0) - torch.save(spec, spec_filename, _use_new_zipfile_serialization=False) - return spec, audio_norm - - def __getitem__(self, index): - return self.get_audio_text_pair(self.audiopaths_and_text[index]) - - def __len__(self): - return len(self.audiopaths_and_text) - - -class TextAudioCollate: - """Zero-pads model inputs and targets""" - - def __init__(self, return_ids=False): - self.return_ids = return_ids - - def __call__(self, batch): - """Collate's training batch from normalized text and aduio - PARAMS - ------ - batch: [text_normalized, spec_normalized, wav_normalized] - """ - # Right zero-pad all one-hot text sequences to max input length - _, ids_sorted_decreasing = torch.sort( - torch.LongTensor([x[0].size(1) for x in batch]), dim=0, descending=True - ) - - max_spec_len = max([x[0].size(1) for x in batch]) - max_wave_len = max([x[1].size(1) for x in batch]) - spec_lengths = torch.LongTensor(len(batch)) - wave_lengths = torch.LongTensor(len(batch)) - spec_padded = torch.FloatTensor(len(batch), batch[0][0].size(0), max_spec_len) - wave_padded = torch.FloatTensor(len(batch), 1, max_wave_len) - spec_padded.zero_() - wave_padded.zero_() - - max_phone_len = max([x[2].size(0) for x in batch]) - phone_lengths = torch.LongTensor(len(batch)) - phone_padded = torch.FloatTensor( - len(batch), max_phone_len, batch[0][2].shape[1] - ) - phone_padded.zero_() - sid = torch.LongTensor(len(batch)) - - for i in range(len(ids_sorted_decreasing)): - row = batch[ids_sorted_decreasing[i]] - - spec = row[0] - spec_padded[i, :, : spec.size(1)] = spec - spec_lengths[i] = spec.size(1) - - wave = row[1] - wave_padded[i, :, : wave.size(1)] = wave - wave_lengths[i] = wave.size(1) - - phone = row[2] - phone_padded[i, : phone.size(0), :] = phone - phone_lengths[i] = phone.size(0) - - sid[i] = row[3] - - return ( - phone_padded, - phone_lengths, - spec_padded, - spec_lengths, - wave_padded, - wave_lengths, - sid, - ) - - -class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler): - """ - Maintain similar input lengths in a batch. - Length groups are specified by boundaries. - Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}. - - It removes samples which are not included in the boundaries. - Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded. - """ - - def __init__( - self, - dataset, - batch_size, - boundaries, - num_replicas=None, - rank=None, - shuffle=True, - ): - super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle) - self.lengths = dataset.lengths - self.batch_size = batch_size - self.boundaries = boundaries - - self.buckets, self.num_samples_per_bucket = self._create_buckets() - self.total_size = sum(self.num_samples_per_bucket) - self.num_samples = self.total_size // self.num_replicas - - def _create_buckets(self): - buckets = [[] for _ in range(len(self.boundaries) - 1)] - for i in range(len(self.lengths)): - length = self.lengths[i] - idx_bucket = self._bisect(length) - if idx_bucket != -1: - buckets[idx_bucket].append(i) - - for i in range(len(buckets) - 1, -1, -1): # - if len(buckets[i]) == 0: - buckets.pop(i) - self.boundaries.pop(i + 1) - - num_samples_per_bucket = [] - for i in range(len(buckets)): - len_bucket = len(buckets[i]) - total_batch_size = self.num_replicas * self.batch_size - rem = ( - total_batch_size - (len_bucket % total_batch_size) - ) % total_batch_size - num_samples_per_bucket.append(len_bucket + rem) - return buckets, num_samples_per_bucket - - def __iter__(self): - # deterministically shuffle based on epoch - g = torch.Generator() - g.manual_seed(self.epoch) - - indices = [] - if self.shuffle: - for bucket in self.buckets: - indices.append(torch.randperm(len(bucket), generator=g).tolist()) - else: - for bucket in self.buckets: - indices.append(list(range(len(bucket)))) - - batches = [] - for i in range(len(self.buckets)): - bucket = self.buckets[i] - len_bucket = len(bucket) - ids_bucket = indices[i] - num_samples_bucket = self.num_samples_per_bucket[i] - - # add extra samples to make it evenly divisible - rem = num_samples_bucket - len_bucket - ids_bucket = ( - ids_bucket - + ids_bucket * (rem // len_bucket) - + ids_bucket[: (rem % len_bucket)] - ) - - # subsample - ids_bucket = ids_bucket[self.rank :: self.num_replicas] - - # batching - for j in range(len(ids_bucket) // self.batch_size): - batch = [ - bucket[idx] - for idx in ids_bucket[ - j * self.batch_size : (j + 1) * self.batch_size - ] - ] - batches.append(batch) - - if self.shuffle: - batch_ids = torch.randperm(len(batches), generator=g).tolist() - batches = [batches[i] for i in batch_ids] - self.batches = batches - - assert len(self.batches) * self.batch_size == self.num_samples - return iter(self.batches) - - def _bisect(self, x, lo=0, hi=None): - if hi is None: - hi = len(self.boundaries) - 1 - - if hi > lo: - mid = (hi + lo) // 2 - if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]: - return mid - elif x <= self.boundaries[mid]: - return self._bisect(x, lo, mid) - else: - return self._bisect(x, mid + 1, hi) - else: - return -1 - - def __len__(self): - return self.num_samples // self.batch_size diff --git a/spaces/Kevin676/AutoGPT/CODE_OF_CONDUCT.md b/spaces/Kevin676/AutoGPT/CODE_OF_CONDUCT.md deleted file mode 100644 index d2331b4c60b9fb27f06953273355dcf53b8d4321..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/AutoGPT/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,40 +0,0 @@ -# Code of Conduct for auto-gpt - -## 1. Purpose - -The purpose of this Code of Conduct is to provide guidelines for contributors to the auto-gpt project on GitHub. We aim to create a positive and inclusive environment where all participants can contribute and collaborate effectively. By participating in this project, you agree to abide by this Code of Conduct. - -## 2. Scope - -This Code of Conduct applies to all contributors, maintainers, and users of the auto-gpt project. It extends to all project spaces, including but not limited to issues, pull requests, code reviews, comments, and other forms of communication within the project. - -## 3. Our Standards - -We encourage the following behavior: - -* Being respectful and considerate to others -* Actively seeking diverse perspectives -* Providing constructive feedback and assistance -* Demonstrating empathy and understanding - -We discourage the following behavior: - -* Harassment or discrimination of any kind -* Disrespectful, offensive, or inappropriate language or content -* Personal attacks or insults -* Unwarranted criticism or negativity - -## 4. Reporting and Enforcement - -If you witness or experience any violations of this Code of Conduct, please report them to the project maintainers by email or other appropriate means. The maintainers will investigate and take appropriate action, which may include warnings, temporary or permanent bans, or other measures as necessary. - -Maintainers are responsible for ensuring compliance with this Code of Conduct and may take action to address any violations. - -## 5. Acknowledgements - -This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org/version/2/0/code_of_conduct.html). - -## 6. Contact - -If you have any questions or concerns, please contact the project maintainers. - diff --git a/spaces/KyanChen/FunSR/models/rdn.py b/spaces/KyanChen/FunSR/models/rdn.py deleted file mode 100644 index 39a95054857d485edf08310fed1ac665a2db196a..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/FunSR/models/rdn.py +++ /dev/null @@ -1,128 +0,0 @@ -# Residual Dense Network for Image Super-Resolution -# https://arxiv.org/abs/1802.08797 -# modified from: https://github.com/thstkdgus35/EDSR-PyTorch - -from argparse import Namespace - -import torch -import torch.nn as nn - -from models import register - - -class RDB_Conv(nn.Module): - def __init__(self, inChannels, growRate, kSize=3): - super(RDB_Conv, self).__init__() - Cin = inChannels - G = growRate - self.conv = nn.Sequential(*[ - nn.Conv2d(Cin, G, kSize, padding=(kSize-1)//2, stride=1), - nn.ReLU() - ]) - - def forward(self, x): - out = self.conv(x) - return torch.cat((x, out), 1) - -class RDB(nn.Module): - def __init__(self, growRate0, growRate, nConvLayers, kSize=3): - super(RDB, self).__init__() - G0 = growRate0 - G = growRate - C = nConvLayers - - convs = [] - for c in range(C): - convs.append(RDB_Conv(G0 + c*G, G)) - self.convs = nn.Sequential(*convs) - - # Local Feature Fusion - self.LFF = nn.Conv2d(G0 + C*G, G0, 1, padding=0, stride=1) - - def forward(self, x): - return self.LFF(self.convs(x)) + x - -class RDN(nn.Module): - def __init__(self, args): - super(RDN, self).__init__() - self.args = args - r = args.scale[0] - G0 = args.G0 - kSize = args.RDNkSize - - # number of RDB blocks, conv layers, out channels - self.D, C, G = { - 'A': (20, 6, 32), - 'B': (16, 8, 64), - }[args.RDNconfig] - - # Shallow feature extraction net - self.SFENet1 = nn.Conv2d(args.n_colors, G0, kSize, padding=(kSize-1)//2, stride=1) - self.SFENet2 = nn.Conv2d(G0, G0, kSize, padding=(kSize-1)//2, stride=1) - - # Redidual dense blocks and dense feature fusion - self.RDBs = nn.ModuleList() - for i in range(self.D): - self.RDBs.append( - RDB(growRate0 = G0, growRate = G, nConvLayers = C) - ) - - # Global Feature Fusion - self.GFF = nn.Sequential(*[ - nn.Conv2d(self.D * G0, G0, 1, padding=0, stride=1), - nn.Conv2d(G0, G0, kSize, padding=(kSize-1)//2, stride=1) - ]) - - if args.no_upsampling: - self.out_dim = G0 - else: - self.out_dim = args.n_colors - # Up-sampling net - if r == 2 or r == 3: - self.UPNet = nn.Sequential(*[ - nn.Conv2d(G0, G * r * r, kSize, padding=(kSize-1)//2, stride=1), - nn.PixelShuffle(r), - nn.Conv2d(G, args.n_colors, kSize, padding=(kSize-1)//2, stride=1) - ]) - elif r == 4: - self.UPNet = nn.Sequential(*[ - nn.Conv2d(G0, G * 4, kSize, padding=(kSize-1)//2, stride=1), - nn.PixelShuffle(2), - nn.Conv2d(G, G * 4, kSize, padding=(kSize-1)//2, stride=1), - nn.PixelShuffle(2), - nn.Conv2d(G, args.n_colors, kSize, padding=(kSize-1)//2, stride=1) - ]) - else: - raise ValueError("scale must be 2 or 3 or 4.") - - def forward(self, x): - f__1 = self.SFENet1(x) - x = self.SFENet2(f__1) - - RDBs_out = [] - for i in range(self.D): - x = self.RDBs[i](x) - RDBs_out.append(x) - - x = self.GFF(torch.cat(RDBs_out,1)) - x += f__1 - - if self.args.no_upsampling: - return x - else: - return self.UPNet(x) - - -@register('rdn') -def make_rdn(G0=64, RDNkSize=3, RDNconfig='B', - scale=2, no_upsampling=False): - args = Namespace() - args.G0 = G0 - args.RDNkSize = RDNkSize - args.RDNconfig = RDNconfig - - args.scale = [scale] - args.no_upsampling = no_upsampling - - args.n_colors = 3 - return RDN(args) diff --git a/spaces/KyanChen/RSPrompter/configs/rsprompter/samseg_mask2former_whu_config.py b/spaces/KyanChen/RSPrompter/configs/rsprompter/samseg_mask2former_whu_config.py deleted file mode 100644 index 09aabea151d06a6fc91eda4b3e35139ab43cff7c..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/configs/rsprompter/samseg_mask2former_whu_config.py +++ /dev/null @@ -1,349 +0,0 @@ -custom_imports = dict(imports=['mmseg.datasets', 'mmseg.models'], allow_failed_imports=False) - -sub_model_train = [ - 'panoptic_head', - 'sam_neck', - 'data_preprocessor' -] - -sub_model_optim = { - 'sam_neck': {'lr_mult': 1}, - 'panoptic_head': {'lr_mult': 1}, -} - -max_epochs = 400 - -optimizer = dict( - type='AdamW', - sub_model=sub_model_optim, - lr=0.0005, - weight_decay=1e-3 -) - -param_scheduler = [ - # warm up learning rate scheduler - dict( - type='LinearLR', - start_factor=5e-4, - by_epoch=True, - begin=0, - end=1, - # update by iter - convert_to_iter_based=True), - # main learning rate scheduler - dict( - type='CosineAnnealingLR', - T_max=max_epochs, - by_epoch=True, - begin=1, - end=max_epochs, - ), -] - -param_scheduler_callback = dict( - type='ParamSchedulerHook' -) - -evaluator_ = dict( - type='CocoPLMetric', - metric=['bbox', 'segm'], - proposal_nums=[1, 10, 100] -) - -evaluator = dict( - # train_evaluator=evaluator_, - val_evaluator=evaluator_, -) - - -image_size = (1024, 1024) - -data_preprocessor = dict( - type='mmdet.DetDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True, - pad_size_divisor=32, - pad_mask=True, - mask_pad_value=0, -) - -num_things_classes = 1 -num_stuff_classes = 0 -num_classes = num_things_classes + num_stuff_classes - -num_queries = 100 -model_cfg = dict( - type='SegSAMPLer', - hyperparameters=dict( - optimizer=optimizer, - param_scheduler=param_scheduler, - evaluator=evaluator, - ), - need_train_names=sub_model_train, - data_preprocessor=data_preprocessor, - backbone=dict( - type='vit_h', - checkpoint='pretrain/sam/sam_vit_h_4b8939.pth', - # type='vit_b', - # checkpoint='pretrain/sam/sam_vit_b_01ec64.pth', - ), - sam_neck=dict( - type='SAMAggregatorNeck', - in_channels=[1280] * 32, - # in_channels=[768] * 12, - inner_channels=32, - selected_channels=range(4, 32, 2), - # selected_channels=range(4, 12, 2), - out_channels=256, - up_sample_scale=4, - ), - panoptic_head=dict( - type='mmdet.Mask2FormerHead', - in_channels=[256, 256, 256], # pass to pixel_decoder inside - strides=[8, 16, 32], - feat_channels=256, - out_channels=256, - num_things_classes=num_things_classes, - num_stuff_classes=num_stuff_classes, - num_queries=num_queries, - num_transformer_feat_level=3, - pixel_decoder=dict( - type='mmdet.MSDeformAttnPixelDecoder', - num_outs=3, - norm_cfg=dict(type='GN', num_groups=32), - act_cfg=dict(type='ReLU'), - encoder=dict( # DeformableDetrTransformerEncoder - # num_layers=6, - num_layers=2, - layer_cfg=dict( # DeformableDetrTransformerEncoderLayer - self_attn_cfg=dict( # MultiScaleDeformableAttention - embed_dims=256, - num_heads=8, - num_levels=3, - num_points=4, - dropout=0.1, - batch_first=True), - ffn_cfg=dict( - embed_dims=256, - feedforward_channels=1024, - num_fcs=2, - ffn_drop=0.1, - act_cfg=dict(type='ReLU', inplace=True)))), - positional_encoding=dict(num_feats=128, normalize=True)), - enforce_decoder_input_project=False, - positional_encoding=dict(num_feats=128, normalize=True), - transformer_decoder=dict( # Mask2FormerTransformerDecoder - return_intermediate=True, - # num_layers=9, - num_layers=3, - layer_cfg=dict( # Mask2FormerTransformerDecoderLayer - self_attn_cfg=dict( # MultiheadAttention - embed_dims=256, - num_heads=8, - dropout=0.1, - batch_first=True), - cross_attn_cfg=dict( # MultiheadAttention - embed_dims=256, - num_heads=8, - dropout=0.1, - batch_first=True), - ffn_cfg=dict( - embed_dims=256, - feedforward_channels=2048, - num_fcs=2, - ffn_drop=0.1, - act_cfg=dict(type='ReLU', inplace=True))), - init_cfg=None), - loss_cls=dict( - type='mmdet.CrossEntropyLoss', - use_sigmoid=False, - loss_weight=2.0, - reduction='mean', - class_weight=[1.0] * num_classes + [0.1]), - loss_mask=dict( - type='mmdet.CrossEntropyLoss', - use_sigmoid=True, - reduction='mean', - loss_weight=5.0), - loss_dice=dict( - type='mmdet.DiceLoss', - use_sigmoid=True, - activate=True, - reduction='mean', - naive_dice=True, - eps=1.0, - loss_weight=5.0)), - panoptic_fusion_head=dict( - type='mmdet.MaskFormerFusionHead', - num_things_classes=num_things_classes, - num_stuff_classes=num_stuff_classes, - loss_panoptic=None, - init_cfg=None), - train_cfg=dict( - num_points=12544, - oversample_ratio=3.0, - importance_sample_ratio=0.75, - assigner=dict( - type='mmdet.HungarianAssigner', - match_costs=[ - dict(type='mmdet.ClassificationCost', weight=2.0), - dict( - type='mmdet.CrossEntropyLossCost', weight=5.0, use_sigmoid=True), - dict(type='mmdet.DiceCost', weight=5.0, pred_act=True, eps=1.0) - ]), - sampler=dict(type='mmdet.MaskPseudoSampler')), - test_cfg=dict( - panoptic_on=False, - # For now, the dataset does not support - # evaluating semantic segmentation metric. - semantic_on=False, - instance_on=True, - # max_per_image is for instance segmentation. - max_per_image=num_queries, - iou_thr=0.8, - # In Mask2Former's panoptic postprocessing, - # it will filter mask area where score is less than 0.5 . - filter_low_score=True), - init_cfg=None) - -task_name = 'whu_ins' -exp_name = 'E20230531_2' -logger = dict( - type='WandbLogger', - project=task_name, - group='samcls-mask2former', - name=exp_name -) -# logger = None - -callbacks = [ - param_scheduler_callback, - dict( - type='ModelCheckpoint', - dirpath=f'results/{task_name}/{exp_name}/checkpoints', - save_last=True, - mode='max', - monitor='valsegm_map_0', - save_top_k=2, - filename='epoch_{epoch}-map_{valsegm_map_0:.4f}' - ), - dict( - type='LearningRateMonitor', - logging_interval='step' - ) -] - - -trainer_cfg = dict( - compiled_model=False, - accelerator="auto", - strategy="auto", - # strategy="ddp", - # strategy='ddp_find_unused_parameters_true', - # precision='32', - # precision='16-mixed', - devices=8, - default_root_dir=f'results/{task_name}/{exp_name}', - # default_root_dir='results/tmp', - max_epochs=max_epochs, - logger=logger, - callbacks=callbacks, - log_every_n_steps=20, - check_val_every_n_epoch=5, - benchmark=True, - # sync_batchnorm=True, - # fast_dev_run=True, - - # limit_train_batches=1, - # limit_val_batches=0, - # limit_test_batches=None, - # limit_predict_batches=None, - # overfit_batches=0.0, - - # val_check_interval=None, - # num_sanity_val_steps=0, - # enable_checkpointing=None, - # enable_progress_bar=None, - # enable_model_summary=None, - # accumulate_grad_batches=32, - # gradient_clip_val=15, - # gradient_clip_algorithm='norm', - # deterministic=None, - # inference_mode: bool=True, - use_distributed_sampler=True, - # profiler="simple", - # detect_anomaly=False, - # barebones=False, - # plugins=None, - # reload_dataloaders_every_n_epochs=0, -) - - -backend_args = None -train_pipeline = [ - dict(type='mmdet.LoadImageFromFile'), - dict(type='mmdet.LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='mmdet.Resize', scale=image_size), - dict(type='mmdet.RandomFlip', prob=0.5), - dict(type='mmdet.PackDetInputs') -] - -test_pipeline = [ - dict(type='mmdet.LoadImageFromFile', backend_args=backend_args), - dict(type='mmdet.Resize', scale=image_size), - # If you don't have a gt annotation, delete the pipeline - dict(type='mmdet.LoadAnnotations', with_bbox=True, with_mask=True), - dict( - type='mmdet.PackDetInputs', - meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', - 'scale_factor')) -] - - -train_batch_size_per_gpu = 6 -train_num_workers = 4 -test_batch_size_per_gpu = 6 -test_num_workers = 4 -persistent_workers = True - -data_parent = '/mnt/search01/dataset/cky_data/WHU' -train_data_prefix = 'train/' -val_data_prefix = 'test/' -dataset_type = 'WHUInsSegDataset' - -val_loader = dict( - batch_size=test_batch_size_per_gpu, - num_workers=test_num_workers, - persistent_workers=persistent_workers, - pin_memory=True, - dataset=dict( - type=dataset_type, - data_root=data_parent, - ann_file='annotations/WHU_building_test.json', - data_prefix=dict(img_path=val_data_prefix + '/image', seg_path=val_data_prefix + '/label'), - test_mode=True, - filter_cfg=dict(filter_empty_gt=True, min_size=32), - pipeline=test_pipeline, - backend_args=backend_args)) - -datamodule_cfg = dict( - type='PLDataModule', - train_loader=dict( - batch_size=train_batch_size_per_gpu, - num_workers=train_num_workers, - persistent_workers=persistent_workers, - pin_memory=True, - dataset=dict( - type=dataset_type, - data_root=data_parent, - ann_file='annotations/WHU_building_train.json', - data_prefix=dict(img_path=train_data_prefix + '/image', seg_path=train_data_prefix + '/label'), - filter_cfg=dict(filter_empty_gt=True, min_size=32), - pipeline=train_pipeline, - backend_args=backend_args) - ), - val_loader=val_loader, - # test_loader=val_loader - predict_loader=val_loader -) \ No newline at end of file diff --git a/spaces/KyanChen/RSPrompter/mmdet/datasets/samplers/class_aware_sampler.py b/spaces/KyanChen/RSPrompter/mmdet/datasets/samplers/class_aware_sampler.py deleted file mode 100644 index 6ca2f9b3ffb7c780ab25cc3704b67589763259e0..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmdet/datasets/samplers/class_aware_sampler.py +++ /dev/null @@ -1,192 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math -from typing import Dict, Iterator, Optional, Union - -import numpy as np -import torch -from mmengine.dataset import BaseDataset -from mmengine.dist import get_dist_info, sync_random_seed -from torch.utils.data import Sampler - -from mmdet.registry import DATA_SAMPLERS - - -@DATA_SAMPLERS.register_module() -class ClassAwareSampler(Sampler): - r"""Sampler that restricts data loading to the label of the dataset. - - A class-aware sampling strategy to effectively tackle the - non-uniform class distribution. The length of the training data is - consistent with source data. Simple improvements based on `Relay - Backpropagation for Effective Learning of Deep Convolutional - Neural Networks `_ - - The implementation logic is referred to - https://github.com/Sense-X/TSD/blob/master/mmdet/datasets/samplers/distributed_classaware_sampler.py - - Args: - dataset: Dataset used for sampling. - seed (int, optional): random seed used to shuffle the sampler. - This number should be identical across all - processes in the distributed group. Defaults to None. - num_sample_class (int): The number of samples taken from each - per-label list. Defaults to 1. - """ - - def __init__(self, - dataset: BaseDataset, - seed: Optional[int] = None, - num_sample_class: int = 1) -> None: - rank, world_size = get_dist_info() - self.rank = rank - self.world_size = world_size - - self.dataset = dataset - self.epoch = 0 - # Must be the same across all workers. If None, will use a - # random seed shared among workers - # (require synchronization among all workers) - if seed is None: - seed = sync_random_seed() - self.seed = seed - - # The number of samples taken from each per-label list - assert num_sample_class > 0 and isinstance(num_sample_class, int) - self.num_sample_class = num_sample_class - # Get per-label image list from dataset - self.cat_dict = self.get_cat2imgs() - - self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / world_size)) - self.total_size = self.num_samples * self.world_size - - # get number of images containing each category - self.num_cat_imgs = [len(x) for x in self.cat_dict.values()] - # filter labels without images - self.valid_cat_inds = [ - i for i, length in enumerate(self.num_cat_imgs) if length != 0 - ] - self.num_classes = len(self.valid_cat_inds) - - def get_cat2imgs(self) -> Dict[int, list]: - """Get a dict with class as key and img_ids as values. - - Returns: - dict[int, list]: A dict of per-label image list, - the item of the dict indicates a label index, - corresponds to the image index that contains the label. - """ - classes = self.dataset.metainfo.get('classes', None) - if classes is None: - raise ValueError('dataset metainfo must contain `classes`') - # sort the label index - cat2imgs = {i: [] for i in range(len(classes))} - for i in range(len(self.dataset)): - cat_ids = set(self.dataset.get_cat_ids(i)) - for cat in cat_ids: - cat2imgs[cat].append(i) - return cat2imgs - - def __iter__(self) -> Iterator[int]: - # deterministically shuffle based on epoch - g = torch.Generator() - g.manual_seed(self.epoch + self.seed) - - # initialize label list - label_iter_list = RandomCycleIter(self.valid_cat_inds, generator=g) - # initialize each per-label image list - data_iter_dict = dict() - for i in self.valid_cat_inds: - data_iter_dict[i] = RandomCycleIter(self.cat_dict[i], generator=g) - - def gen_cat_img_inds(cls_list, data_dict, num_sample_cls): - """Traverse the categories and extract `num_sample_cls` image - indexes of the corresponding categories one by one.""" - id_indices = [] - for _ in range(len(cls_list)): - cls_idx = next(cls_list) - for _ in range(num_sample_cls): - id = next(data_dict[cls_idx]) - id_indices.append(id) - return id_indices - - # deterministically shuffle based on epoch - num_bins = int( - math.ceil(self.total_size * 1.0 / self.num_classes / - self.num_sample_class)) - indices = [] - for i in range(num_bins): - indices += gen_cat_img_inds(label_iter_list, data_iter_dict, - self.num_sample_class) - - # fix extra samples to make it evenly divisible - if len(indices) >= self.total_size: - indices = indices[:self.total_size] - else: - indices += indices[:(self.total_size - len(indices))] - assert len(indices) == self.total_size - - # subsample - offset = self.num_samples * self.rank - indices = indices[offset:offset + self.num_samples] - assert len(indices) == self.num_samples - - return iter(indices) - - def __len__(self) -> int: - """The number of samples in this rank.""" - return self.num_samples - - def set_epoch(self, epoch: int) -> None: - """Sets the epoch for this sampler. - - When :attr:`shuffle=True`, this ensures all replicas use a different - random ordering for each epoch. Otherwise, the next iteration of this - sampler will yield the same ordering. - - Args: - epoch (int): Epoch number. - """ - self.epoch = epoch - - -class RandomCycleIter: - """Shuffle the list and do it again after the list have traversed. - - The implementation logic is referred to - https://github.com/wutong16/DistributionBalancedLoss/blob/master/mllt/datasets/loader/sampler.py - - Example: - >>> label_list = [0, 1, 2, 4, 5] - >>> g = torch.Generator() - >>> g.manual_seed(0) - >>> label_iter_list = RandomCycleIter(label_list, generator=g) - >>> index = next(label_iter_list) - Args: - data (list or ndarray): The data that needs to be shuffled. - generator: An torch.Generator object, which is used in setting the seed - for generating random numbers. - """ # noqa: W605 - - def __init__(self, - data: Union[list, np.ndarray], - generator: torch.Generator = None) -> None: - self.data = data - self.length = len(data) - self.index = torch.randperm(self.length, generator=generator).numpy() - self.i = 0 - self.generator = generator - - def __iter__(self) -> Iterator: - return self - - def __len__(self) -> int: - return len(self.data) - - def __next__(self): - if self.i == self.length: - self.index = torch.randperm( - self.length, generator=self.generator).numpy() - self.i = 0 - idx = self.data[self.index[self.i]] - self.i += 1 - return idx diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/detectors/yolact.py b/spaces/KyanChen/RSPrompter/mmdet/models/detectors/yolact.py deleted file mode 100644 index f15fb7b70263b0c4018751067771b1365af96f67..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmdet/models/detectors/yolact.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmdet.registry import MODELS -from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig -from .single_stage_instance_seg import SingleStageInstanceSegmentor - - -@MODELS.register_module() -class YOLACT(SingleStageInstanceSegmentor): - """Implementation of `YOLACT `_""" - - def __init__(self, - backbone: ConfigType, - neck: ConfigType, - bbox_head: ConfigType, - mask_head: ConfigType, - train_cfg: OptConfigType = None, - test_cfg: OptConfigType = None, - data_preprocessor: OptConfigType = None, - init_cfg: OptMultiConfig = None) -> None: - super().__init__( - backbone=backbone, - neck=neck, - bbox_head=bbox_head, - mask_head=mask_head, - train_cfg=train_cfg, - test_cfg=test_cfg, - data_preprocessor=data_preprocessor, - init_cfg=init_cfg) diff --git a/spaces/KyanChen/RSPrompter/mmpretrain/apis/base.py b/spaces/KyanChen/RSPrompter/mmpretrain/apis/base.py deleted file mode 100644 index 7bff6bd18675a3a0996dcd09081a15728311657f..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmpretrain/apis/base.py +++ /dev/null @@ -1,390 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from abc import abstractmethod -from math import ceil -from typing import Callable, Iterable, List, Optional, Tuple, Union - -import numpy as np -import torch -from mmengine.config import Config -from mmengine.dataset import default_collate -from mmengine.fileio import get_file_backend -from mmengine.model import BaseModel -from mmengine.runner import load_checkpoint - -from mmpretrain.structures import DataSample -from mmpretrain.utils import track -from .model import get_model, list_models - -ModelType = Union[BaseModel, str, Config] -InputType = Union[str, np.ndarray, list] - - -class BaseInferencer: - """Base inferencer for various tasks. - - The BaseInferencer provides the standard workflow for inference as follows: - - 1. Preprocess the input data by :meth:`preprocess`. - 2. Forward the data to the model by :meth:`forward`. ``BaseInferencer`` - assumes the model inherits from :class:`mmengine.models.BaseModel` and - will call `model.test_step` in :meth:`forward` by default. - 3. Visualize the results by :meth:`visualize`. - 4. Postprocess and return the results by :meth:`postprocess`. - - When we call the subclasses inherited from BaseInferencer (not overriding - ``__call__``), the workflow will be executed in order. - - All subclasses of BaseInferencer could define the following class - attributes for customization: - - - ``preprocess_kwargs``: The keys of the kwargs that will be passed to - :meth:`preprocess`. - - ``forward_kwargs``: The keys of the kwargs that will be passed to - :meth:`forward` - - ``visualize_kwargs``: The keys of the kwargs that will be passed to - :meth:`visualize` - - ``postprocess_kwargs``: The keys of the kwargs that will be passed to - :meth:`postprocess` - - All attributes mentioned above should be a ``set`` of keys (strings), - and each key should not be duplicated. Actually, :meth:`__call__` will - dispatch all the arguments to the corresponding methods according to the - ``xxx_kwargs`` mentioned above. - - Subclasses inherited from ``BaseInferencer`` should implement - :meth:`_init_pipeline`, :meth:`visualize` and :meth:`postprocess`: - - - _init_pipeline: Return a callable object to preprocess the input data. - - visualize: Visualize the results returned by :meth:`forward`. - - postprocess: Postprocess the results returned by :meth:`forward` and - :meth:`visualize`. - - Args: - model (BaseModel | str | Config): A model name or a path to the config - file, or a :obj:`BaseModel` object. The model name can be found - by ``cls.list_models()`` and you can also query it in - :doc:`/modelzoo_statistics`. - pretrained (str, optional): Path to the checkpoint. If None, it will - try to find a pre-defined weight from the model you specified - (only work if the ``model`` is a model name). Defaults to None. - device (str | torch.device | None): Transfer the model to the target - device. Defaults to None. - device_map (str | dict | None): A map that specifies where each - submodule should go. It doesn't need to be refined to each - parameter/buffer name, once a given module name is inside, every - submodule of it will be sent to the same device. You can use - `device_map="auto"` to automatically generate the device map. - Defaults to None. - offload_folder (str | None): If the `device_map` contains any value - `"disk"`, the folder where we will offload weights. - **kwargs: Other keyword arguments to initialize the model (only work if - the ``model`` is a model name). - """ - - preprocess_kwargs: set = set() - forward_kwargs: set = set() - visualize_kwargs: set = set() - postprocess_kwargs: set = set() - - def __init__(self, - model: ModelType, - pretrained: Union[bool, str] = True, - device: Union[str, torch.device, None] = None, - device_map=None, - offload_folder=None, - **kwargs) -> None: - - if isinstance(model, BaseModel): - if isinstance(pretrained, str): - load_checkpoint(model, pretrained, map_location='cpu') - if device_map is not None: - from .utils import dispatch_model - model = dispatch_model( - model, - device_map=device_map, - offload_folder=offload_folder) - elif device is not None: - model.to(device) - else: - model = get_model( - model, - pretrained, - device=device, - device_map=device_map, - offload_folder=offload_folder, - **kwargs) - - model.eval() - - self.config = model._config - self.model = model - self.pipeline = self._init_pipeline(self.config) - self.visualizer = None - - def __call__( - self, - inputs, - return_datasamples: bool = False, - batch_size: int = 1, - **kwargs, - ) -> dict: - """Call the inferencer. - - Args: - inputs (InputsType): Inputs for the inferencer. - return_datasamples (bool): Whether to return results as - :obj:`BaseDataElement`. Defaults to False. - batch_size (int): Batch size. Defaults to 1. - **kwargs: Key words arguments passed to :meth:`preprocess`, - :meth:`forward`, :meth:`visualize` and :meth:`postprocess`. - Each key in kwargs should be in the corresponding set of - ``preprocess_kwargs``, ``forward_kwargs``, ``visualize_kwargs`` - and ``postprocess_kwargs``. - - Returns: - dict: Inference and visualization results. - """ - ( - preprocess_kwargs, - forward_kwargs, - visualize_kwargs, - postprocess_kwargs, - ) = self._dispatch_kwargs(**kwargs) - - ori_inputs = self._inputs_to_list(inputs) - inputs = self.preprocess( - ori_inputs, batch_size=batch_size, **preprocess_kwargs) - preds = [] - for data in track( - inputs, 'Inference', total=ceil(len(ori_inputs) / batch_size)): - preds.extend(self.forward(data, **forward_kwargs)) - visualization = self.visualize(ori_inputs, preds, **visualize_kwargs) - results = self.postprocess(preds, visualization, return_datasamples, - **postprocess_kwargs) - return results - - def _inputs_to_list(self, inputs: InputType) -> list: - """Preprocess the inputs to a list. - - Cast the input data to a list of data. - - - list or tuple: return inputs - - str: - - Directory path: return all files in the directory - - other cases: return a list containing the string. The string - could be a path to file, a url or other types of string according - to the task. - - other: return a list with one item. - - Args: - inputs (str | array | list): Inputs for the inferencer. - - Returns: - list: List of input for the :meth:`preprocess`. - """ - if isinstance(inputs, str): - backend = get_file_backend(inputs) - if hasattr(backend, 'isdir') and backend.isdir(inputs): - # Backends like HttpsBackend do not implement `isdir`, so only - # those backends that implement `isdir` could accept the inputs - # as a directory - file_list = backend.list_dir_or_file(inputs, list_dir=False) - inputs = [ - backend.join_path(inputs, file) for file in file_list - ] - - if not isinstance(inputs, (list, tuple)): - inputs = [inputs] - - return list(inputs) - - def preprocess(self, inputs: InputType, batch_size: int = 1, **kwargs): - """Process the inputs into a model-feedable format. - - Customize your preprocess by overriding this method. Preprocess should - return an iterable object, of which each item will be used as the - input of ``model.test_step``. - - ``BaseInferencer.preprocess`` will return an iterable chunked data, - which will be used in __call__ like this: - - .. code-block:: python - - def __call__(self, inputs, batch_size=1, **kwargs): - chunked_data = self.preprocess(inputs, batch_size, **kwargs) - for batch in chunked_data: - preds = self.forward(batch, **kwargs) - - Args: - inputs (InputsType): Inputs given by user. - batch_size (int): batch size. Defaults to 1. - - Yields: - Any: Data processed by the ``pipeline`` and ``default_collate``. - """ - chunked_data = self._get_chunk_data( - map(self.pipeline, inputs), batch_size) - yield from map(default_collate, chunked_data) - - @torch.no_grad() - def forward(self, inputs: Union[dict, tuple], **kwargs): - """Feed the inputs to the model.""" - return self.model.test_step(inputs) - - def visualize(self, - inputs: list, - preds: List[DataSample], - show: bool = False, - **kwargs) -> List[np.ndarray]: - """Visualize predictions. - - Customize your visualization by overriding this method. visualize - should return visualization results, which could be np.ndarray or any - other objects. - - Args: - inputs (list): Inputs preprocessed by :meth:`_inputs_to_list`. - preds (Any): Predictions of the model. - show (bool): Whether to display the image in a popup window. - Defaults to False. - - Returns: - List[np.ndarray]: Visualization results. - """ - if show: - raise NotImplementedError( - f'The `visualize` method of {self.__class__.__name__} ' - 'is not implemented.') - - @abstractmethod - def postprocess( - self, - preds: List[DataSample], - visualization: List[np.ndarray], - return_datasample=False, - **kwargs, - ) -> dict: - """Process the predictions and visualization results from ``forward`` - and ``visualize``. - - This method should be responsible for the following tasks: - - 1. Convert datasamples into a json-serializable dict if needed. - 2. Pack the predictions and visualization results and return them. - 3. Dump or log the predictions. - - Customize your postprocess by overriding this method. Make sure - ``postprocess`` will return a dict with visualization results and - inference results. - - Args: - preds (List[Dict]): Predictions of the model. - visualization (np.ndarray): Visualized predictions. - return_datasample (bool): Whether to return results as datasamples. - Defaults to False. - - Returns: - dict: Inference and visualization results with key ``predictions`` - and ``visualization`` - - - ``visualization (Any)``: Returned by :meth:`visualize` - - ``predictions`` (dict or DataSample): Returned by - :meth:`forward` and processed in :meth:`postprocess`. - If ``return_datasample=False``, it usually should be a - json-serializable dict containing only basic data elements such - as strings and numbers. - """ - - @abstractmethod - def _init_pipeline(self, cfg: Config) -> Callable: - """Initialize the test pipeline. - - Return a pipeline to handle various input data, such as ``str``, - ``np.ndarray``. It is an abstract method in BaseInferencer, and should - be implemented in subclasses. - - The returned pipeline will be used to process a single data. - It will be used in :meth:`preprocess` like this: - - .. code-block:: python - def preprocess(self, inputs, batch_size, **kwargs): - ... - dataset = map(self.pipeline, dataset) - ... - """ - - def _get_chunk_data(self, inputs: Iterable, chunk_size: int): - """Get batch data from dataset. - - Args: - inputs (Iterable): An iterable dataset. - chunk_size (int): Equivalent to batch size. - - Yields: - list: batch data. - """ - inputs_iter = iter(inputs) - while True: - try: - chunk_data = [] - for _ in range(chunk_size): - processed_data = next(inputs_iter) - chunk_data.append(processed_data) - yield chunk_data - except StopIteration: - if chunk_data: - yield chunk_data - break - - def _dispatch_kwargs(self, **kwargs) -> Tuple[dict, dict, dict, dict]: - """Dispatch kwargs to preprocess(), forward(), visualize() and - postprocess() according to the actual demands. - - Returns: - Tuple[Dict, Dict, Dict, Dict]: kwargs passed to preprocess, - forward, visualize and postprocess respectively. - """ - # Ensure each argument only matches one function - method_kwargs = self.preprocess_kwargs | self.forward_kwargs | \ - self.visualize_kwargs | self.postprocess_kwargs - - union_kwargs = method_kwargs | set(kwargs.keys()) - if union_kwargs != method_kwargs: - unknown_kwargs = union_kwargs - method_kwargs - raise ValueError( - f'unknown argument {unknown_kwargs} for `preprocess`, ' - '`forward`, `visualize` and `postprocess`') - - preprocess_kwargs = {} - forward_kwargs = {} - visualize_kwargs = {} - postprocess_kwargs = {} - - for key, value in kwargs.items(): - if key in self.preprocess_kwargs: - preprocess_kwargs[key] = value - if key in self.forward_kwargs: - forward_kwargs[key] = value - if key in self.visualize_kwargs: - visualize_kwargs[key] = value - if key in self.postprocess_kwargs: - postprocess_kwargs[key] = value - - return ( - preprocess_kwargs, - forward_kwargs, - visualize_kwargs, - postprocess_kwargs, - ) - - @staticmethod - def list_models(pattern: Optional[str] = None): - """List models defined in metafile of corresponding packages. - - Args: - pattern (str | None): A wildcard pattern to match model names. - - Returns: - List[str]: a list of model names. - """ - return list_models(pattern=pattern) diff --git "a/spaces/Liu-LAB/GPT-academic/crazy_functions/chatglm\345\276\256\350\260\203\345\267\245\345\205\267.py" "b/spaces/Liu-LAB/GPT-academic/crazy_functions/chatglm\345\276\256\350\260\203\345\267\245\345\205\267.py" deleted file mode 100644 index 336d7cfc85ac159841758123fa057bd20a0bbbec..0000000000000000000000000000000000000000 --- "a/spaces/Liu-LAB/GPT-academic/crazy_functions/chatglm\345\276\256\350\260\203\345\267\245\345\205\267.py" +++ /dev/null @@ -1,141 +0,0 @@ -from toolbox import CatchException, update_ui, promote_file_to_downloadzone -from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency -import datetime, json - -def fetch_items(list_of_items, batch_size): - for i in range(0, len(list_of_items), batch_size): - yield list_of_items[i:i + batch_size] - -def string_to_options(arguments): - import argparse - import shlex - - # Create an argparse.ArgumentParser instance - parser = argparse.ArgumentParser() - - # Add command-line arguments - parser.add_argument("--llm_to_learn", type=str, help="LLM model to learn", default="gpt-3.5-turbo") - parser.add_argument("--prompt_prefix", type=str, help="Prompt prefix", default='') - parser.add_argument("--system_prompt", type=str, help="System prompt", default='') - parser.add_argument("--batch", type=int, help="System prompt", default=50) - parser.add_argument("--pre_seq_len", type=int, help="pre_seq_len", default=50) - parser.add_argument("--learning_rate", type=float, help="learning_rate", default=2e-2) - parser.add_argument("--num_gpus", type=int, help="num_gpus", default=1) - parser.add_argument("--json_dataset", type=str, help="json_dataset", default="") - parser.add_argument("--ptuning_directory", type=str, help="ptuning_directory", default="") - - - - # Parse the arguments - args = parser.parse_args(shlex.split(arguments)) - - return args - -@CatchException -def 微调数据集生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - """ - txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 - llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 - plugin_kwargs 插件模型的参数 - chatbot 聊天显示框的句柄,用于显示给用户 - history 聊天历史,前情提要 - system_prompt 给gpt的静默提醒 - web_port 当前软件运行的端口号 - """ - history = [] # 清空历史,以免输入溢出 - chatbot.append(("这是什么功能?", "[Local Message] 微调数据集生成")) - if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") - args = plugin_kwargs.get("advanced_arg", None) - if args is None: - chatbot.append(("没给定指令", "退出")) - yield from update_ui(chatbot=chatbot, history=history); return - else: - arguments = string_to_options(arguments=args) - - dat = [] - with open(txt, 'r', encoding='utf8') as f: - for line in f.readlines(): - json_dat = json.loads(line) - dat.append(json_dat["content"]) - - llm_kwargs['llm_model'] = arguments.llm_to_learn - for batch in fetch_items(dat, arguments.batch): - res = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency( - inputs_array=[f"{arguments.prompt_prefix}\n\n{b}" for b in (batch)], - inputs_show_user_array=[f"Show Nothing" for _ in (batch)], - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history_array=[[] for _ in (batch)], - sys_prompt_array=[arguments.system_prompt for _ in (batch)], - max_workers=10 # OpenAI所允许的最大并行过载 - ) - - with open(txt+'.generated.json', 'a+', encoding='utf8') as f: - for b, r in zip(batch, res[1::2]): - f.write(json.dumps({"content":b, "summary":r}, ensure_ascii=False)+'\n') - - promote_file_to_downloadzone(txt+'.generated.json', rename_file='generated.json', chatbot=chatbot) - return - - - -@CatchException -def 启动微调(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - """ - txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 - llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 - plugin_kwargs 插件模型的参数 - chatbot 聊天显示框的句柄,用于显示给用户 - history 聊天历史,前情提要 - system_prompt 给gpt的静默提醒 - web_port 当前软件运行的端口号 - """ - import subprocess - history = [] # 清空历史,以免输入溢出 - chatbot.append(("这是什么功能?", "[Local Message] 微调数据集生成")) - if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") - args = plugin_kwargs.get("advanced_arg", None) - if args is None: - chatbot.append(("没给定指令", "退出")) - yield from update_ui(chatbot=chatbot, history=history); return - else: - arguments = string_to_options(arguments=args) - - - - pre_seq_len = arguments.pre_seq_len # 128 - learning_rate = arguments.learning_rate # 2e-2 - num_gpus = arguments.num_gpus # 1 - json_dataset = arguments.json_dataset # 't_code.json' - ptuning_directory = arguments.ptuning_directory # '/home/hmp/ChatGLM2-6B/ptuning' - - command = f"torchrun --standalone --nnodes=1 --nproc-per-node={num_gpus} main.py \ - --do_train \ - --train_file AdvertiseGen/{json_dataset} \ - --validation_file AdvertiseGen/{json_dataset} \ - --preprocessing_num_workers 20 \ - --prompt_column content \ - --response_column summary \ - --overwrite_cache \ - --model_name_or_path THUDM/chatglm2-6b \ - --output_dir output/clothgen-chatglm2-6b-pt-{pre_seq_len}-{learning_rate} \ - --overwrite_output_dir \ - --max_source_length 256 \ - --max_target_length 256 \ - --per_device_train_batch_size 1 \ - --per_device_eval_batch_size 1 \ - --gradient_accumulation_steps 16 \ - --predict_with_generate \ - --max_steps 100 \ - --logging_steps 10 \ - --save_steps 20 \ - --learning_rate {learning_rate} \ - --pre_seq_len {pre_seq_len} \ - --quantization_bit 4" - - process = subprocess.Popen(command, shell=True, cwd=ptuning_directory) - try: - process.communicate(timeout=3600*24) - except subprocess.TimeoutExpired: - process.kill() - return diff --git a/spaces/Mahiruoshi/MyGO_VIts-bert/text/__init__.py b/spaces/Mahiruoshi/MyGO_VIts-bert/text/__init__.py deleted file mode 100644 index 8dd10db04d90f336f96a2447555264a488913c02..0000000000000000000000000000000000000000 --- a/spaces/Mahiruoshi/MyGO_VIts-bert/text/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -from text.symbols import * - - -_symbol_to_id = {s: i for i, s in enumerate(symbols)} - - -def cleaned_text_to_sequence(cleaned_text, tones, language): - """Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - Returns: - List of integers corresponding to the symbols in the text - """ - phones = [_symbol_to_id[symbol] for symbol in cleaned_text] - tone_start = language_tone_start_map[language] - tones = [i + tone_start for i in tones] - lang_id = language_id_map[language] - lang_ids = [lang_id for i in phones] - return phones, tones, lang_ids - - -def get_bert(norm_text, word2ph, language, device): - from .chinese_bert import get_bert_feature as zh_bert - from .english_bert_mock import get_bert_feature as en_bert - from .japanese_bert import get_bert_feature as jp_bert - - lang_bert_func_map = {"ZH": zh_bert, "EN": en_bert, "JP": jp_bert} - bert = lang_bert_func_map[language](norm_text, word2ph, device) - return bert diff --git a/spaces/Manjushri/Dall-E-Mini/style.css b/spaces/Manjushri/Dall-E-Mini/style.css deleted file mode 100644 index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000 --- a/spaces/Manjushri/Dall-E-Mini/style.css +++ /dev/null @@ -1,28 +0,0 @@ -body { - padding: 2rem; - font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif; -} - -h1 { - font-size: 16px; - margin-top: 0; -} - -p { - color: rgb(107, 114, 128); - font-size: 15px; - margin-bottom: 10px; - margin-top: 5px; -} - -.card { - max-width: 620px; - margin: 0 auto; - padding: 16px; - border: 1px solid lightgray; - border-radius: 16px; -} - -.card p:last-child { - margin-bottom: 0; -} diff --git a/spaces/Manjushri/MusicGen/app.py b/spaces/Manjushri/MusicGen/app.py deleted file mode 100644 index 247513bacaea7bcf797b936d7da52449a17be3a5..0000000000000000000000000000000000000000 --- a/spaces/Manjushri/MusicGen/app.py +++ /dev/null @@ -1,351 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -# Updated to account for UI changes from https://github.com/rkfg/audiocraft/blob/long/app.py -# also released under the MIT license. - -import argparse -from concurrent.futures import ProcessPoolExecutor -import os -import subprocess as sp -from tempfile import NamedTemporaryFile -import time -import warnings -import modin.pandas as pd -import torch -import gradio as gr - -from audiocraft.data.audio_utils import convert_audio -from audiocraft.data.audio import audio_write -from audiocraft.models import MusicGen - - -MODEL = None # Last used model -IS_BATCHED = "facebook/MusicGen" in os.environ.get('SPACE_ID', '') -MAX_BATCH_SIZE = 6 -BATCHED_DURATION = 15 -INTERRUPTING = False -# We have to wrap subprocess call to clean a bit the log when using gr.make_waveform -_old_call = sp.call - - -def _call_nostderr(*args, **kwargs): - # Avoid ffmpeg vomitting on the logs. - kwargs['stderr'] = sp.DEVNULL - kwargs['stdout'] = sp.DEVNULL - _old_call(*args, **kwargs) - - -sp.call = _call_nostderr -# Preallocating the pool of processes. -pool = ProcessPoolExecutor(3) -pool.__enter__() - - -def interrupt(): - global INTERRUPTING - INTERRUPTING = True - - -def make_waveform(*args, **kwargs): - # Further remove some warnings. - be = time.time() - with warnings.catch_warnings(): - warnings.simplefilter('ignore') - out = gr.make_waveform(*args, **kwargs) - print("Make a video took", time.time() - be) - return out - - -def load_model(version='melody'): - global MODEL - print("Loading model", version) - if MODEL is None or MODEL.name != version: - MODEL = MusicGen.get_pretrained(version) - - -def _do_predictions(texts, melodies, duration, progress=False, **gen_kwargs): - MODEL.set_generation_params(duration=duration, **gen_kwargs) - print("new batch", len(texts), texts, [None if m is None else (m[0], m[1].shape) for m in melodies]) - be = time.time() - processed_melodies = [] - target_sr = 32000 - target_ac = 1 - for melody in melodies: - if melody is None: - processed_melodies.append(None) - else: - sr, melody = melody[0], torch.from_numpy(melody[1]).to(MODEL.device).float().t() - if melody.dim() == 1: - melody = melody[None] - melody = melody[..., :int(sr * duration)] - melody = convert_audio(melody, sr, target_sr, target_ac) - processed_melodies.append(melody) - - if any(m is not None for m in processed_melodies): - outputs = MODEL.generate_with_chroma( - descriptions=texts, - melody_wavs=processed_melodies, - melody_sample_rate=target_sr, - progress=progress, - ) - else: - outputs = MODEL.generate(texts, progress=progress) - - outputs = outputs.detach().cpu().float() - out_files = [] - for output in outputs: - with NamedTemporaryFile("wb", suffix=".wav", delete=False) as file: - audio_write( - file.name, output, MODEL.sample_rate, strategy="loudness", - loudness_headroom_db=16, loudness_compressor=True, add_suffix=False) - out_files.append(pool.submit(make_waveform, file.name)) - res = [out_file.result() for out_file in out_files] - print("batch finished", len(texts), time.time() - be) - return res - - -def predict_batched(texts, melodies): - max_text_length = 512 - texts = [text[:max_text_length] for text in texts] - load_model('melody') - res = _do_predictions(texts, melodies, BATCHED_DURATION) - return [res] - - -def predict_full(model, text, melody, duration, topk, topp, temperature, cfg_coef, progress=gr.Progress()): - global INTERRUPTING - INTERRUPTING = False - topk = int(topk) - load_model(model) - - def _progress(generated, to_generate): - progress((generated, to_generate)) - if INTERRUPTING: - raise gr.Error("Interrupted.") - MODEL.set_custom_progress_callback(_progress) - - outs = _do_predictions( - [text], [melody], duration, progress=True, - top_k=topk, top_p=topp, temperature=temperature, cfg_coef=cfg_coef) - return outs[0] - -def toggle_audio_src(choice): - if choice == "mic": - return gr.update(source="microphone", value=None, label="Microphone") - else: - return gr.update(source="upload", value=None, label="File") - - -def ui_full(launch_kwargs): - with gr.Blocks() as interface: - gr.Markdown( - """ - # MusicGen - This is a demo for [MusicGen](https://github.com/facebookresearch/audiocraft), a simple and controllable model for music generation - presented at: ["Simple and Controllable Music Generation"](https://huggingface.co/papers/2306.05284) - """ - ) - with gr.Row(): - with gr.Column(): - with gr.Row(): - text = gr.Text(label="Input Text", interactive=True) - with gr.Column(): - radio = gr.Radio(["file", "mic"], value="file", label="Condition on a melody (optional) File or Mic") - melody = gr.Audio(source="upload", type="numpy", label="Melody Condition (optional)", interactive=True) - with gr.Row(): - submit = gr.Button("Submit") - # Adapted from https://github.com/rkfg/audiocraft/blob/long/app.py, MIT license. - #_ = gr.Button("Interrupt").click(fn=interrupt, queue=False) - with gr.Row(): - model = gr.Radio(["melody", "medium", "small"], label="Model", value="melody", interactive=True) - with gr.Row(): - duration = gr.Slider(minimum=1, maximum=16, value=8, label="Duration", interactive=True) - with gr.Row(): - topk = gr.Number(label="Top-k", value=250, interactive=True) - topp = gr.Number(label="Top-p", value=0, interactive=True) - temperature = gr.Number(label="Temperature", value=1.0, interactive=True) - cfg_coef = gr.Number(label="Classifier Free Guidance", value=3.0, interactive=True) - with gr.Column(): - output = gr.Video(label="Generated Music") - submit.click(predict_full, inputs=[model, text, melody, duration, topk, topp, temperature, cfg_coef], outputs=[output]) - radio.change(toggle_audio_src, radio, [melody], queue=False, show_progress=False) - gr.Examples( - fn=predict_full, - examples=[ - [ - "An 80s driving pop song with heavy drums and synth pads in the background", - "./assets/bach.mp3", - "melody" - ], - [ - "A cheerful country song with acoustic guitars", - "./assets/bolero_ravel.mp3", - "melody" - ], - [ - "90s rock song with electric guitar and heavy drums", - None, - "medium" - ], - [ - "a light and cheerly EDM track, with syncopated drums, aery pads, and strong emotions", - "./assets/bach.mp3", - "melody" - ], - [ - "lofi slow bpm electro chill with organic samples", - None, - "medium", - ], - ], - inputs=[text, melody, model], - outputs=[output] - ) - gr.Markdown( - """ - ### More details - - The model will generate a short music extract based on the description you provided. - The model can generate up to 30 seconds of audio in one pass. It is now possible - to extend the generation by feeding back the end of the previous chunk of audio. - This can take a long time, and the model might lose consistency. The model might also - decide at arbitrary positions that the song ends. - - **WARNING:** Choosing long durations will take a long time to generate (2min might take ~10min). An overlap of 12 seconds - is kept with the previously generated chunk, and 18 "new" seconds are generated each time. - - We present 4 model variations: - 1. Melody -- a music generation model capable of generating music condition on text and melody inputs. **Note**, you can also use text only. - 2. Small -- a 300M transformer decoder conditioned on text only. - 3. Medium -- a 1.5B transformer decoder conditioned on text only. - 4. Large -- a 3.3B transformer decoder conditioned on text only (might OOM for the longest sequences.) - - When using `melody`, you can optionaly provide a reference audio from - which a broad melody will be extracted. The model will then try to follow both the description and melody provided. - - You can also use your own GPU or a Google Colab by following the instructions on our repo. - See [github.com/facebookresearch/audiocraft](https://github.com/facebookresearch/audiocraft) - for more details. - """ - ) - - interface.queue(max_size=2).launch(**launch_kwargs) - - -def ui_batched(launch_kwargs): - with gr.Blocks() as demo: - gr.Markdown( - """ - # MusicGen - This is the demo for [MusicGen](https://github.com/facebookresearch/audiocraft), a simple and controllable model for music generation - presented at: ["Simple and Controllable Music Generation"](https://huggingface.co/papers/2306.05284). -
-
- Duplicate Space - for longer sequences, more control and no queue.

- """ - ) - with gr.Row(): - with gr.Column(): - with gr.Row(): - text = gr.Text(label="Describe your music", lines=2, interactive=True) - with gr.Column(): - radio = gr.Radio(["file", "mic"], value="file", label="Condition on a melody (optional) File or Mic") - melody = gr.Audio(source="upload", type="numpy", label="Melody Condition (optional)", interactive=True) - with gr.Row(): - submit = gr.Button("Generate") - with gr.Column(): - output = gr.Video(label="Generated Music") - submit.click(predict_batched, inputs=[text, melody], outputs=[output], batch=True, max_batch_size=MAX_BATCH_SIZE) - radio.change(toggle_audio_src, radio, [melody], queue=False, show_progress=False) - gr.Examples( - fn=predict_batched, - examples=[ - [ - "An 80s driving pop song with heavy drums and synth pads in the background", - "./assets/bach.mp3", - ], - [ - "A cheerful country song with acoustic guitars", - "./assets/bolero_ravel.mp3", - ], - [ - "90s rock song with electric guitar and heavy drums", - None, - ], - [ - "a light and cheerly EDM track, with syncopated drums, aery pads, and strong emotions bpm: 130", - "./assets/bach.mp3", - ], - [ - "lofi slow bpm electro chill with organic samples", - None, - ], - ], - inputs=[text, melody], - outputs=[output] - ) - gr.Markdown(""" - ### More details - The model will generate 12 seconds of audio based on the description you provided. - You can optionaly provide a reference audio from which a broad melody will be extracted. - The model will then try to follow both the description and melody provided. - All samples are generated with the `melody` model. - You can also use your own GPU or a Google Colab by following the instructions on our repo. - See [github.com/facebookresearch/audiocraft](https://github.com/facebookresearch/audiocraft) - for more details. - """) - - demo.queue(max_size=3).launch(**launch_kwargs) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument( - '--listen', - type=str, - default='0.0.0.0' if 'SPACE_ID' in os.environ else '127.0.0.1', - help='IP to listen on for connections to Gradio', - ) - parser.add_argument( - '--username', type=str, default='', help='Username for authentication' - ) - parser.add_argument( - '--password', type=str, default='', help='Password for authentication' - ) - parser.add_argument( - '--server_port', - type=int, - default=0, - help='Port to run the server listener on', - ) - parser.add_argument( - '--inbrowser', action='store_true', help='Open in browser' - ) - parser.add_argument( - '--share', action='store_true', help='Share the gradio UI' - ) - - args = parser.parse_args() - - launch_kwargs = {} - launch_kwargs['server_name'] = args.listen - - if args.username and args.password: - launch_kwargs['auth'] = (args.username, args.password) - if args.server_port: - launch_kwargs['server_port'] = args.server_port - if args.inbrowser: - launch_kwargs['inbrowser'] = args.inbrowser - if args.share: - launch_kwargs['share'] = args.share - - # Show the interface - if IS_BATCHED: - ui_batched(launch_kwargs) - else: - ui_full(launch_kwargs) \ No newline at end of file diff --git a/spaces/MarcusSu1216/XingTong/vdecoder/nsf_hifigan/utils.py b/spaces/MarcusSu1216/XingTong/vdecoder/nsf_hifigan/utils.py deleted file mode 100644 index 84bff024f4d2e2de194b2a88ee7bbe5f0d33f67c..0000000000000000000000000000000000000000 --- a/spaces/MarcusSu1216/XingTong/vdecoder/nsf_hifigan/utils.py +++ /dev/null @@ -1,68 +0,0 @@ -import glob -import os -import matplotlib -import torch -from torch.nn.utils import weight_norm -matplotlib.use("Agg") -import matplotlib.pylab as plt - - -def plot_spectrogram(spectrogram): - fig, ax = plt.subplots(figsize=(10, 2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - - fig.canvas.draw() - plt.close() - - return fig - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def apply_weight_norm(m): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - weight_norm(m) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size*dilation - dilation)/2) - - -def load_checkpoint(filepath, device): - assert os.path.isfile(filepath) - print("Loading '{}'".format(filepath)) - checkpoint_dict = torch.load(filepath, map_location=device) - print("Complete.") - return checkpoint_dict - - -def save_checkpoint(filepath, obj): - print("Saving checkpoint to {}".format(filepath)) - torch.save(obj, filepath) - print("Complete.") - - -def del_old_checkpoints(cp_dir, prefix, n_models=2): - pattern = os.path.join(cp_dir, prefix + '????????') - cp_list = glob.glob(pattern) # get checkpoint paths - cp_list = sorted(cp_list)# sort by iter - if len(cp_list) > n_models: # if more than n_models models are found - for cp in cp_list[:-n_models]:# delete the oldest models other than lastest n_models - open(cp, 'w').close()# empty file contents - os.unlink(cp)# delete file (move to trash when using Colab) - - -def scan_checkpoint(cp_dir, prefix): - pattern = os.path.join(cp_dir, prefix + '????????') - cp_list = glob.glob(pattern) - if len(cp_list) == 0: - return None - return sorted(cp_list)[-1] - diff --git a/spaces/Marshalls/testmtd/training/options/train_options.py b/spaces/Marshalls/testmtd/training/options/train_options.py deleted file mode 100644 index aef495cd05dbb06c4285a34c69d44f283eb84f06..0000000000000000000000000000000000000000 --- a/spaces/Marshalls/testmtd/training/options/train_options.py +++ /dev/null @@ -1,23 +0,0 @@ -from .base_options import BaseOptions -from pytorch_lightning import Trainer - -class TrainOptions(BaseOptions): - - def __init__(self): - super(TrainOptions, self).__init__() - parser = self.parser - parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model') - parser.add_argument('--nepoch_decay', type=int, default=100, help='# of epochs to linearly decay learning rate to zero') - parser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by , +, ...') - parser.add_argument('--optimizer', type=str, default='adam', help='the optimizer to use') - parser.add_argument('-lr', '--learning_rate', default=1e-4, type=float, help="learning rate") - parser.add_argument('--momentum', default=0, type=float) - parser.add_argument('--weight_decay', default=0, type=float) - parser.add_argument('--lr_policy', type=str, default='lambda', help='learning rate policy: lambda|step|plateau|cosine') - parser.add_argument('--warmup_epochs', type=int, default=10, help='the number of warmup epochs when using lr policy LinearWarmupCosineAnnealing') - parser.add_argument('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') - parser.add_argument('--lr_decay_factor', default=0.1, type=float, help="decay factor to use with multiplicative learning rate schedulers") - parser.add_argument('--lr_decay_milestones', type=str, default='[500,1000]', help='the milestones at which to decay the learning rate, when using the multi step lr policy') - parser = Trainer.add_argparse_args(parser) - self.parser = parser - self.is_train = True diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/ops/masked_conv.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/ops/masked_conv.py deleted file mode 100644 index cd514cc204c1d571ea5dc7e74b038c0f477a008b..0000000000000000000000000000000000000000 --- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/ops/masked_conv.py +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math - -import torch -import torch.nn as nn -from torch.autograd import Function -from torch.autograd.function import once_differentiable -from torch.nn.modules.utils import _pair - -from ..utils import ext_loader - -ext_module = ext_loader.load_ext( - '_ext', ['masked_im2col_forward', 'masked_col2im_forward']) - - -class MaskedConv2dFunction(Function): - - @staticmethod - def symbolic(g, features, mask, weight, bias, padding, stride): - return g.op( - 'mmcv::MMCVMaskedConv2d', - features, - mask, - weight, - bias, - padding_i=padding, - stride_i=stride) - - @staticmethod - def forward(ctx, features, mask, weight, bias, padding=0, stride=1): - assert mask.dim() == 3 and mask.size(0) == 1 - assert features.dim() == 4 and features.size(0) == 1 - assert features.size()[2:] == mask.size()[1:] - pad_h, pad_w = _pair(padding) - stride_h, stride_w = _pair(stride) - if stride_h != 1 or stride_w != 1: - raise ValueError( - 'Stride could not only be 1 in masked_conv2d currently.') - out_channel, in_channel, kernel_h, kernel_w = weight.size() - - batch_size = features.size(0) - out_h = int( - math.floor((features.size(2) + 2 * pad_h - - (kernel_h - 1) - 1) / stride_h + 1)) - out_w = int( - math.floor((features.size(3) + 2 * pad_w - - (kernel_h - 1) - 1) / stride_w + 1)) - mask_inds = torch.nonzero(mask[0] > 0, as_tuple=False) - output = features.new_zeros(batch_size, out_channel, out_h, out_w) - if mask_inds.numel() > 0: - mask_h_idx = mask_inds[:, 0].contiguous() - mask_w_idx = mask_inds[:, 1].contiguous() - data_col = features.new_zeros(in_channel * kernel_h * kernel_w, - mask_inds.size(0)) - ext_module.masked_im2col_forward( - features, - mask_h_idx, - mask_w_idx, - data_col, - kernel_h=kernel_h, - kernel_w=kernel_w, - pad_h=pad_h, - pad_w=pad_w) - - masked_output = torch.addmm(1, bias[:, None], 1, - weight.view(out_channel, -1), data_col) - ext_module.masked_col2im_forward( - masked_output, - mask_h_idx, - mask_w_idx, - output, - height=out_h, - width=out_w, - channels=out_channel) - return output - - @staticmethod - @once_differentiable - def backward(ctx, grad_output): - return (None, ) * 5 - - -masked_conv2d = MaskedConv2dFunction.apply - - -class MaskedConv2d(nn.Conv2d): - """A MaskedConv2d which inherits the official Conv2d. - - The masked forward doesn't implement the backward function and only - supports the stride parameter to be 1 currently. - """ - - def __init__(self, - in_channels, - out_channels, - kernel_size, - stride=1, - padding=0, - dilation=1, - groups=1, - bias=True): - super(MaskedConv2d, - self).__init__(in_channels, out_channels, kernel_size, stride, - padding, dilation, groups, bias) - - def forward(self, input, mask=None): - if mask is None: # fallback to the normal Conv2d - return super(MaskedConv2d, self).forward(input) - else: - return masked_conv2d(input, mask, self.weight, self.bias, - self.padding) diff --git a/spaces/MirageML/sjc/sd1/ldm/models/diffusion/plms.py b/spaces/MirageML/sjc/sd1/ldm/models/diffusion/plms.py deleted file mode 100644 index 78eeb1003aa45d27bdbfc6b4a1d7ccbff57cd2e3..0000000000000000000000000000000000000000 --- a/spaces/MirageML/sjc/sd1/ldm/models/diffusion/plms.py +++ /dev/null @@ -1,236 +0,0 @@ -"""SAMPLING ONLY.""" - -import torch -import numpy as np -from tqdm import tqdm -from functools import partial - -from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like - - -class PLMSSampler(object): - def __init__(self, model, schedule="linear", **kwargs): - super().__init__() - self.model = model - self.ddpm_num_timesteps = model.num_timesteps - self.schedule = schedule - - def register_buffer(self, name, attr): - if type(attr) == torch.Tensor: - if attr.device != torch.device("cuda"): - attr = attr.to(torch.device("cuda")) - setattr(self, name, attr) - - def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True): - if ddim_eta != 0: - raise ValueError('ddim_eta must be 0 for PLMS') - self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, - num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose) - alphas_cumprod = self.model.alphas_cumprod - assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' - to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) - - self.register_buffer('betas', to_torch(self.model.betas)) - self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) - self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)) - - # calculations for diffusion q(x_t | x_{t-1}) and others - self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) - self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu()))) - self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu()))) - self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu()))) - self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1))) - - # ddim sampling parameters - ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), - ddim_timesteps=self.ddim_timesteps, - eta=ddim_eta,verbose=verbose) - self.register_buffer('ddim_sigmas', ddim_sigmas) - self.register_buffer('ddim_alphas', ddim_alphas) - self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) - self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas)) - sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( - (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * ( - 1 - self.alphas_cumprod / self.alphas_cumprod_prev)) - self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps) - - @torch.no_grad() - def sample(self, - S, - batch_size, - shape, - conditioning=None, - callback=None, - normals_sequence=None, - img_callback=None, - quantize_x0=False, - eta=0., - mask=None, - x0=None, - temperature=1., - noise_dropout=0., - score_corrector=None, - corrector_kwargs=None, - verbose=True, - x_T=None, - log_every_t=100, - unconditional_guidance_scale=1., - unconditional_conditioning=None, - # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... - **kwargs - ): - if conditioning is not None: - if isinstance(conditioning, dict): - cbs = conditioning[list(conditioning.keys())[0]].shape[0] - if cbs != batch_size: - print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") - else: - if conditioning.shape[0] != batch_size: - print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") - - self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) - # sampling - C, H, W = shape - size = (batch_size, C, H, W) - print(f'Data shape for PLMS sampling is {size}') - - samples, intermediates = self.plms_sampling(conditioning, size, - callback=callback, - img_callback=img_callback, - quantize_denoised=quantize_x0, - mask=mask, x0=x0, - ddim_use_original_steps=False, - noise_dropout=noise_dropout, - temperature=temperature, - score_corrector=score_corrector, - corrector_kwargs=corrector_kwargs, - x_T=x_T, - log_every_t=log_every_t, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning, - ) - return samples, intermediates - - @torch.no_grad() - def plms_sampling(self, cond, shape, - x_T=None, ddim_use_original_steps=False, - callback=None, timesteps=None, quantize_denoised=False, - mask=None, x0=None, img_callback=None, log_every_t=100, - temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, - unconditional_guidance_scale=1., unconditional_conditioning=None,): - device = self.model.betas.device - b = shape[0] - if x_T is None: - img = torch.randn(shape, device=device) - else: - img = x_T - - if timesteps is None: - timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps - elif timesteps is not None and not ddim_use_original_steps: - subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1 - timesteps = self.ddim_timesteps[:subset_end] - - intermediates = {'x_inter': [img], 'pred_x0': [img]} - time_range = list(reversed(range(0,timesteps))) if ddim_use_original_steps else np.flip(timesteps) - total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] - print(f"Running PLMS Sampling with {total_steps} timesteps") - - iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps) - old_eps = [] - - for i, step in enumerate(iterator): - index = total_steps - i - 1 - ts = torch.full((b,), step, device=device, dtype=torch.long) - ts_next = torch.full((b,), time_range[min(i + 1, len(time_range) - 1)], device=device, dtype=torch.long) - - if mask is not None: - assert x0 is not None - img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? - img = img_orig * mask + (1. - mask) * img - - outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, - quantize_denoised=quantize_denoised, temperature=temperature, - noise_dropout=noise_dropout, score_corrector=score_corrector, - corrector_kwargs=corrector_kwargs, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning, - old_eps=old_eps, t_next=ts_next) - img, pred_x0, e_t = outs - old_eps.append(e_t) - if len(old_eps) >= 4: - old_eps.pop(0) - if callback: callback(i) - if img_callback: img_callback(pred_x0, i) - - if index % log_every_t == 0 or index == total_steps - 1: - intermediates['x_inter'].append(img) - intermediates['pred_x0'].append(pred_x0) - - return img, intermediates - - @torch.no_grad() - def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, - temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, - unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None): - b, *_, device = *x.shape, x.device - - def get_model_output(x, t): - if unconditional_conditioning is None or unconditional_guidance_scale == 1.: - e_t = self.model.apply_model(x, t, c) - else: - x_in = torch.cat([x] * 2) - t_in = torch.cat([t] * 2) - c_in = torch.cat([unconditional_conditioning, c]) - e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) - e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond) - - if score_corrector is not None: - assert self.model.parameterization == "eps" - e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) - - return e_t - - alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas - alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev - sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas - sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas - - def get_x_prev_and_pred_x0(e_t, index): - # select parameters corresponding to the currently considered timestep - a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) - a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) - sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) - sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device) - - # current prediction for x_0 - pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() - if quantize_denoised: - pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) - # direction pointing to x_t - dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t - noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature - if noise_dropout > 0.: - noise = torch.nn.functional.dropout(noise, p=noise_dropout) - x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise - return x_prev, pred_x0 - - e_t = get_model_output(x, t) - if len(old_eps) == 0: - # Pseudo Improved Euler (2nd order) - x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index) - e_t_next = get_model_output(x_prev, t_next) - e_t_prime = (e_t + e_t_next) / 2 - elif len(old_eps) == 1: - # 2nd order Pseudo Linear Multistep (Adams-Bashforth) - e_t_prime = (3 * e_t - old_eps[-1]) / 2 - elif len(old_eps) == 2: - # 3nd order Pseudo Linear Multistep (Adams-Bashforth) - e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12 - elif len(old_eps) >= 3: - # 4nd order Pseudo Linear Multistep (Adams-Bashforth) - e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24 - - x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index) - - return x_prev, pred_x0, e_t diff --git a/spaces/Miuzarte/SUI-svc-4.0/train.py b/spaces/Miuzarte/SUI-svc-4.0/train.py deleted file mode 100644 index 0fc80bf4aacf143feaf08575eb285910c0c8ce0a..0000000000000000000000000000000000000000 --- a/spaces/Miuzarte/SUI-svc-4.0/train.py +++ /dev/null @@ -1,297 +0,0 @@ -import logging -logging.getLogger('matplotlib').setLevel(logging.WARNING) -import os -import json -import argparse -import itertools -import math -import torch -from torch import nn, optim -from torch.nn import functional as F -from torch.utils.data import DataLoader -from torch.utils.tensorboard import SummaryWriter -import torch.multiprocessing as mp -import torch.distributed as dist -from torch.nn.parallel import DistributedDataParallel as DDP -from torch.cuda.amp import autocast, GradScaler - -import modules.commons as commons -import utils -from data_utils import TextAudioSpeakerLoader, TextAudioCollate -from models import ( - SynthesizerTrn, - MultiPeriodDiscriminator, -) -from modules.losses import ( - kl_loss, - generator_loss, discriminator_loss, feature_loss -) - -from modules.mel_processing import mel_spectrogram_torch, spec_to_mel_torch - -torch.backends.cudnn.benchmark = True -global_step = 0 - - -# os.environ['TORCH_DISTRIBUTED_DEBUG'] = 'INFO' - - -def main(): - """Assume Single Node Multi GPUs Training Only""" - assert torch.cuda.is_available(), "CPU training is not allowed." - hps = utils.get_hparams() - - n_gpus = torch.cuda.device_count() - os.environ['MASTER_ADDR'] = 'localhost' - os.environ['MASTER_PORT'] = hps.train.port - - mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,)) - - -def run(rank, n_gpus, hps): - global global_step - if rank == 0: - logger = utils.get_logger(hps.model_dir) - logger.info(hps) - utils.check_git_hash(hps.model_dir) - writer = SummaryWriter(log_dir=hps.model_dir) - writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) - - # for pytorch on win, backend use gloo - dist.init_process_group(backend= 'gloo' if os.name == 'nt' else 'nccl', init_method='env://', world_size=n_gpus, rank=rank) - torch.manual_seed(hps.train.seed) - torch.cuda.set_device(rank) - collate_fn = TextAudioCollate() - train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps) - train_loader = DataLoader(train_dataset, num_workers=8, shuffle=False, pin_memory=True, - batch_size=hps.train.batch_size,collate_fn=collate_fn) - if rank == 0: - eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps) - eval_loader = DataLoader(eval_dataset, num_workers=1, shuffle=False, - batch_size=1, pin_memory=False, - drop_last=False, collate_fn=collate_fn) - - net_g = SynthesizerTrn( - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - **hps.model).cuda(rank) - net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank) - optim_g = torch.optim.AdamW( - net_g.parameters(), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps) - optim_d = torch.optim.AdamW( - net_d.parameters(), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps) - net_g = DDP(net_g, device_ids=[rank]) # , find_unused_parameters=True) - net_d = DDP(net_d, device_ids=[rank]) - - skip_optimizer = True - try: - _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, - optim_g, skip_optimizer) - _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, - optim_d, skip_optimizer) - global_step = (epoch_str - 1) * len(train_loader) - except: - print("load old checkpoint failed...") - epoch_str = 1 - global_step = 0 - if skip_optimizer: - epoch_str = 1 - global_step = 0 - - scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) - scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) - - scaler = GradScaler(enabled=hps.train.fp16_run) - - for epoch in range(epoch_str, hps.train.epochs + 1): - if rank == 0: - train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, - [train_loader, eval_loader], logger, [writer, writer_eval]) - else: - train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, - [train_loader, None], None, None) - scheduler_g.step() - scheduler_d.step() - - -def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers): - net_g, net_d = nets - optim_g, optim_d = optims - scheduler_g, scheduler_d = schedulers - train_loader, eval_loader = loaders - if writers is not None: - writer, writer_eval = writers - - # train_loader.batch_sampler.set_epoch(epoch) - global global_step - - net_g.train() - net_d.train() - for batch_idx, items in enumerate(train_loader): - c, f0, spec, y, spk, lengths, uv = items - g = spk.cuda(rank, non_blocking=True) - spec, y = spec.cuda(rank, non_blocking=True), y.cuda(rank, non_blocking=True) - c = c.cuda(rank, non_blocking=True) - f0 = f0.cuda(rank, non_blocking=True) - uv = uv.cuda(rank, non_blocking=True) - lengths = lengths.cuda(rank, non_blocking=True) - mel = spec_to_mel_torch( - spec, - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.mel_fmin, - hps.data.mel_fmax) - - with autocast(enabled=hps.train.fp16_run): - y_hat, ids_slice, z_mask, \ - (z, z_p, m_p, logs_p, m_q, logs_q), pred_lf0, norm_lf0, lf0 = net_g(c, f0, uv, spec, g=g, c_lengths=lengths, - spec_lengths=lengths) - - y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length) - y_hat_mel = mel_spectrogram_torch( - y_hat.squeeze(1), - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.hop_length, - hps.data.win_length, - hps.data.mel_fmin, - hps.data.mel_fmax - ) - y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice - - # Discriminator - y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) - - with autocast(enabled=False): - loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g) - loss_disc_all = loss_disc - - optim_d.zero_grad() - scaler.scale(loss_disc_all).backward() - scaler.unscale_(optim_d) - grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) - scaler.step(optim_d) - - with autocast(enabled=hps.train.fp16_run): - # Generator - y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) - with autocast(enabled=False): - loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel - loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl - loss_fm = feature_loss(fmap_r, fmap_g) - loss_gen, losses_gen = generator_loss(y_d_hat_g) - loss_lf0 = F.mse_loss(pred_lf0, lf0) - loss_gen_all = loss_gen + loss_fm + loss_mel + loss_kl + loss_lf0 - optim_g.zero_grad() - scaler.scale(loss_gen_all).backward() - scaler.unscale_(optim_g) - grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None) - scaler.step(optim_g) - scaler.update() - - if rank == 0: - if global_step % hps.train.log_interval == 0: - lr = optim_g.param_groups[0]['lr'] - losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_kl] - logger.info('Train Epoch: {} [{:.0f}%]'.format( - epoch, - 100. * batch_idx / len(train_loader))) - logger.info([x.item() for x in losses] + [global_step, lr]) - - scalar_dict = {"loss/g/total": loss_gen_all, "loss/d/total": loss_disc_all, "learning_rate": lr, - "grad_norm_d": grad_norm_d, "grad_norm_g": grad_norm_g} - scalar_dict.update({"loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/kl": loss_kl, - "loss/g/lf0": loss_lf0}) - - # scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)}) - # scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)}) - # scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)}) - image_dict = { - "slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()), - "slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()), - "all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()), - "all/lf0": utils.plot_data_to_numpy(lf0[0, 0, :].cpu().numpy(), - pred_lf0[0, 0, :].detach().cpu().numpy()), - "all/norm_lf0": utils.plot_data_to_numpy(lf0[0, 0, :].cpu().numpy(), - norm_lf0[0, 0, :].detach().cpu().numpy()) - } - - utils.summarize( - writer=writer, - global_step=global_step, - images=image_dict, - scalars=scalar_dict - ) - - if global_step % hps.train.eval_interval == 0: - evaluate(hps, net_g, eval_loader, writer_eval) - utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch, - os.path.join(hps.model_dir, "G_{}.pth".format(global_step)), hps.train.eval_interval, global_step) - utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch, - os.path.join(hps.model_dir, "D_{}.pth".format(global_step)), hps.train.eval_interval, global_step) - global_step += 1 - - if rank == 0: - logger.info('====> Epoch: {}'.format(epoch)) - - -def evaluate(hps, generator, eval_loader, writer_eval): - generator.eval() - image_dict = {} - audio_dict = {} - with torch.no_grad(): - for batch_idx, items in enumerate(eval_loader): - c, f0, spec, y, spk, _, uv = items - g = spk[:1].cuda(0) - spec, y = spec[:1].cuda(0), y[:1].cuda(0) - c = c[:1].cuda(0) - f0 = f0[:1].cuda(0) - uv= uv[:1].cuda(0) - mel = spec_to_mel_torch( - spec, - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.mel_fmin, - hps.data.mel_fmax) - y_hat = generator.module.infer(c, f0, uv, g=g) - - y_hat_mel = mel_spectrogram_torch( - y_hat.squeeze(1).float(), - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.hop_length, - hps.data.win_length, - hps.data.mel_fmin, - hps.data.mel_fmax - ) - - audio_dict.update({ - f"gen/audio_{batch_idx}": y_hat[0], - f"gt/audio_{batch_idx}": y[0] - }) - image_dict.update({ - f"gen/mel": utils.plot_spectrogram_to_numpy(y_hat_mel[0].cpu().numpy()), - "gt/mel": utils.plot_spectrogram_to_numpy(mel[0].cpu().numpy()) - }) - utils.summarize( - writer=writer_eval, - global_step=global_step, - images=image_dict, - audios=audio_dict, - audio_sampling_rate=hps.data.sampling_rate - ) - generator.train() - - -if __name__ == "__main__": - main() diff --git a/spaces/Mountchicken/MAERec-Gradio/configs/textdet/dbnetpp/dbnetpp_resnet50-dcnv2_fpnc_100k_synthtext.py b/spaces/Mountchicken/MAERec-Gradio/configs/textdet/dbnetpp/dbnetpp_resnet50-dcnv2_fpnc_100k_synthtext.py deleted file mode 100644 index 7174055dae61e8e4406e891359aa38957acf6a24..0000000000000000000000000000000000000000 --- a/spaces/Mountchicken/MAERec-Gradio/configs/textdet/dbnetpp/dbnetpp_resnet50-dcnv2_fpnc_100k_synthtext.py +++ /dev/null @@ -1,44 +0,0 @@ -_base_ = [ - '_base_dbnetpp_resnet50-dcnv2_fpnc.py', - '../_base_/pretrain_runtime.py', - '../_base_/datasets/synthtext.py', - '../_base_/schedules/schedule_sgd_100k.py', -] - -train_pipeline = [ - dict(type='LoadImageFromFile', color_type='color_ignore_orientation'), - dict( - type='LoadOCRAnnotations', - with_bbox=True, - with_polygon=True, - with_label=True, - ), - dict(type='FixInvalidPolygon'), - dict( - type='TorchVisionWrapper', - op='ColorJitter', - brightness=32.0 / 255, - saturation=0.5), - dict( - type='ImgAugWrapper', - args=[['Fliplr', 0.5], - dict(cls='Affine', rotate=[-10, 10]), ['Resize', [0.5, 3.0]]]), - dict(type='RandomCrop', min_side_ratio=0.1), - dict(type='Resize', scale=(640, 640), keep_ratio=True), - dict(type='Pad', size=(640, 640)), - dict( - type='PackTextDetInputs', - meta_keys=('img_path', 'ori_shape', 'img_shape')) -] - -synthtext_textdet_train = _base_.synthtext_textdet_train -synthtext_textdet_train.pipeline = train_pipeline - -train_dataloader = dict( - batch_size=16, - num_workers=8, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=synthtext_textdet_train) - -auto_scale_lr = dict(base_batch_size=16) diff --git a/spaces/MrBodean/VoiceClone/synthesizer/synthesize.py b/spaces/MrBodean/VoiceClone/synthesizer/synthesize.py deleted file mode 100644 index ffc7dc2678e85006b9f66d910fcae3e307c521a8..0000000000000000000000000000000000000000 --- a/spaces/MrBodean/VoiceClone/synthesizer/synthesize.py +++ /dev/null @@ -1,97 +0,0 @@ -import torch -from torch.utils.data import DataLoader -from synthesizer.hparams import hparams_debug_string -from synthesizer.synthesizer_dataset import SynthesizerDataset, collate_synthesizer -from synthesizer.models.tacotron import Tacotron -from synthesizer.utils.text import text_to_sequence -from synthesizer.utils.symbols import symbols -import numpy as np -from pathlib import Path -from tqdm import tqdm -import platform - -def run_synthesis(in_dir, out_dir, model_dir, hparams): - # This generates ground truth-aligned mels for vocoder training - synth_dir = Path(out_dir).joinpath("mels_gta") - synth_dir.mkdir(exist_ok=True) - print(hparams_debug_string()) - - # Check for GPU - if torch.cuda.is_available(): - device = torch.device("cuda") - if hparams.synthesis_batch_size % torch.cuda.device_count() != 0: - raise ValueError("`hparams.synthesis_batch_size` must be evenly divisible by n_gpus!") - else: - device = torch.device("cpu") - print("Synthesizer using device:", device) - - # Instantiate Tacotron model - model = Tacotron(embed_dims=hparams.tts_embed_dims, - num_chars=len(symbols), - encoder_dims=hparams.tts_encoder_dims, - decoder_dims=hparams.tts_decoder_dims, - n_mels=hparams.num_mels, - fft_bins=hparams.num_mels, - postnet_dims=hparams.tts_postnet_dims, - encoder_K=hparams.tts_encoder_K, - lstm_dims=hparams.tts_lstm_dims, - postnet_K=hparams.tts_postnet_K, - num_highways=hparams.tts_num_highways, - dropout=0., # Use zero dropout for gta mels - stop_threshold=hparams.tts_stop_threshold, - speaker_embedding_size=hparams.speaker_embedding_size).to(device) - - # Load the weights - model_dir = Path(model_dir) - model_fpath = model_dir.joinpath(model_dir.stem).with_suffix(".pt") - print("\nLoading weights at %s" % model_fpath) - model.load(model_fpath) - print("Tacotron weights loaded from step %d" % model.step) - - # Synthesize using same reduction factor as the model is currently trained - r = np.int32(model.r) - - # Set model to eval mode (disable gradient and zoneout) - model.eval() - - # Initialize the dataset - in_dir = Path(in_dir) - metadata_fpath = in_dir.joinpath("train.txt") - mel_dir = in_dir.joinpath("mels") - embed_dir = in_dir.joinpath("embeds") - - dataset = SynthesizerDataset(metadata_fpath, mel_dir, embed_dir, hparams) - data_loader = DataLoader(dataset, - collate_fn=lambda batch: collate_synthesizer(batch, r, hparams), - batch_size=hparams.synthesis_batch_size, - num_workers=2 if platform.system() != "Windows" else 0, - shuffle=False, - pin_memory=True) - - # Generate GTA mels - meta_out_fpath = Path(out_dir).joinpath("synthesized.txt") - with open(meta_out_fpath, "w") as file: - for i, (texts, mels, embeds, idx) in tqdm(enumerate(data_loader), total=len(data_loader)): - texts = texts.to(device) - mels = mels.to(device) - embeds = embeds.to(device) - - # Parallelize model onto GPUS using workaround due to python bug - if device.type == "cuda" and torch.cuda.device_count() > 1: - _, mels_out, _ = data_parallel_workaround(model, texts, mels, embeds) - else: - _, mels_out, _, _ = model(texts, mels, embeds) - - for j, k in enumerate(idx): - # Note: outputs mel-spectrogram files and target ones have same names, just different folders - mel_filename = Path(synth_dir).joinpath(dataset.metadata[k][1]) - mel_out = mels_out[j].detach().cpu().numpy().T - - # Use the length of the ground truth mel to remove padding from the generated mels - mel_out = mel_out[:int(dataset.metadata[k][4])] - - # Write the spectrogram to disk - np.save(mel_filename, mel_out, allow_pickle=False) - - # Write metadata into the synthesized file - file.write("|".join(dataset.metadata[k])) diff --git a/spaces/Mrchuw/text-to-image_6_by_6/README.md b/spaces/Mrchuw/text-to-image_6_by_6/README.md deleted file mode 100644 index 99d798609ac4e3ed9667f7eda6752cc40766fa54..0000000000000000000000000000000000000000 --- a/spaces/Mrchuw/text-to-image_6_by_6/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Text To Image -emoji: 🌖 -colorFrom: gray -colorTo: purple -sdk: gradio -sdk_version: 3.20.0 -app_file: app.py -pinned: false -duplicated_from: xp3857/text-to-image ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/NATSpeech/DiffSpeech/data_gen/tts/runs/binarize.py b/spaces/NATSpeech/DiffSpeech/data_gen/tts/runs/binarize.py deleted file mode 100644 index e89aeb4795e749c64e565ecb26dfd0c8e3232801..0000000000000000000000000000000000000000 --- a/spaces/NATSpeech/DiffSpeech/data_gen/tts/runs/binarize.py +++ /dev/null @@ -1,17 +0,0 @@ -import utils.commons.single_thread_env # NOQA -from utils.commons.hparams import hparams, set_hparams -import importlib - - -def binarize(): - binarizer_cls = hparams.get("binarizer_cls", 'data_gen.tts.base_binarizer.BaseBinarizer') - pkg = ".".join(binarizer_cls.split(".")[:-1]) - cls_name = binarizer_cls.split(".")[-1] - binarizer_cls = getattr(importlib.import_module(pkg), cls_name) - print("| Binarizer: ", binarizer_cls) - binarizer_cls().process() - - -if __name__ == '__main__': - set_hparams() - binarize() diff --git a/spaces/NATSpeech/PortaSpeech/tasks/tts/vocoder_infer/base_vocoder.py b/spaces/NATSpeech/PortaSpeech/tasks/tts/vocoder_infer/base_vocoder.py deleted file mode 100644 index 0ab88f4e78be66ba1821e5a6720193b1d614f4f5..0000000000000000000000000000000000000000 --- a/spaces/NATSpeech/PortaSpeech/tasks/tts/vocoder_infer/base_vocoder.py +++ /dev/null @@ -1,63 +0,0 @@ -import librosa -from utils.audio import librosa_wav2spec -from utils.commons.hparams import hparams -import numpy as np - -REGISTERED_VOCODERS = {} - - -def register_vocoder(name): - def _f(cls): - REGISTERED_VOCODERS[name] = cls - return cls - - return _f - - -def get_vocoder_cls(vocoder_name): - return REGISTERED_VOCODERS.get(vocoder_name) - - -class BaseVocoder: - def spec2wav(self, mel): - """ - - :param mel: [T, 80] - :return: wav: [T'] - """ - - raise NotImplementedError - - @staticmethod - def wav2spec(wav_fn): - """ - - :param wav_fn: str - :return: wav, mel: [T, 80] - """ - wav_spec_dict = librosa_wav2spec(wav_fn, fft_size=hparams['fft_size'], - hop_size=hparams['hop_size'], - win_length=hparams['win_size'], - num_mels=hparams['audio_num_mel_bins'], - fmin=hparams['fmin'], - fmax=hparams['fmax'], - sample_rate=hparams['audio_sample_rate'], - loud_norm=hparams['loud_norm']) - wav = wav_spec_dict['wav'] - mel = wav_spec_dict['mel'] - return wav, mel - - @staticmethod - def wav2mfcc(wav_fn): - fft_size = hparams['fft_size'] - hop_size = hparams['hop_size'] - win_length = hparams['win_size'] - sample_rate = hparams['audio_sample_rate'] - wav, _ = librosa.core.load(wav_fn, sr=sample_rate) - mfcc = librosa.feature.mfcc(y=wav, sr=sample_rate, n_mfcc=13, - n_fft=fft_size, hop_length=hop_size, - win_length=win_length, pad_mode="constant", power=1.0) - mfcc_delta = librosa.feature.delta(mfcc, order=1) - mfcc_delta_delta = librosa.feature.delta(mfcc, order=2) - mfcc = np.concatenate([mfcc, mfcc_delta, mfcc_delta_delta]).T - return mfcc diff --git a/spaces/NCTCMumbai/NCTC/models/official/nlp/bert/input_pipeline.py b/spaces/NCTCMumbai/NCTC/models/official/nlp/bert/input_pipeline.py deleted file mode 100644 index ed3fd173d4379a75ab1e2e5a9ba0bbdcbaa0be42..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/official/nlp/bert/input_pipeline.py +++ /dev/null @@ -1,285 +0,0 @@ -# Copyright 2019 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""BERT model input pipelines.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - - -def decode_record(record, name_to_features): - """Decodes a record to a TensorFlow example.""" - example = tf.io.parse_single_example(record, name_to_features) - - # tf.Example only supports tf.int64, but the TPU only supports tf.int32. - # So cast all int64 to int32. - for name in list(example.keys()): - t = example[name] - if t.dtype == tf.int64: - t = tf.cast(t, tf.int32) - example[name] = t - - return example - - -def single_file_dataset(input_file, name_to_features): - """Creates a single-file dataset to be passed for BERT custom training.""" - # For training, we want a lot of parallel reading and shuffling. - # For eval, we want no shuffling and parallel reading doesn't matter. - d = tf.data.TFRecordDataset(input_file) - d = d.map( - lambda record: decode_record(record, name_to_features), - num_parallel_calls=tf.data.experimental.AUTOTUNE) - - # When `input_file` is a path to a single file or a list - # containing a single path, disable auto sharding so that - # same input file is sent to all workers. - if isinstance(input_file, str) or len(input_file) == 1: - options = tf.data.Options() - options.experimental_distribute.auto_shard_policy = ( - tf.data.experimental.AutoShardPolicy.OFF) - d = d.with_options(options) - return d - - -def create_pretrain_dataset(input_patterns, - seq_length, - max_predictions_per_seq, - batch_size, - is_training=True, - input_pipeline_context=None, - use_next_sentence_label=True, - use_position_id=False, - output_fake_labels=True): - """Creates input dataset from (tf)records files for pretraining.""" - name_to_features = { - 'input_ids': - tf.io.FixedLenFeature([seq_length], tf.int64), - 'input_mask': - tf.io.FixedLenFeature([seq_length], tf.int64), - 'segment_ids': - tf.io.FixedLenFeature([seq_length], tf.int64), - 'masked_lm_positions': - tf.io.FixedLenFeature([max_predictions_per_seq], tf.int64), - 'masked_lm_ids': - tf.io.FixedLenFeature([max_predictions_per_seq], tf.int64), - 'masked_lm_weights': - tf.io.FixedLenFeature([max_predictions_per_seq], tf.float32), - } - if use_next_sentence_label: - name_to_features['next_sentence_labels'] = tf.io.FixedLenFeature([1], - tf.int64) - if use_position_id: - name_to_features['position_ids'] = tf.io.FixedLenFeature([seq_length], - tf.int64) - for input_pattern in input_patterns: - if not tf.io.gfile.glob(input_pattern): - raise ValueError('%s does not match any files.' % input_pattern) - - dataset = tf.data.Dataset.list_files(input_patterns, shuffle=is_training) - - if input_pipeline_context and input_pipeline_context.num_input_pipelines > 1: - dataset = dataset.shard(input_pipeline_context.num_input_pipelines, - input_pipeline_context.input_pipeline_id) - if is_training: - dataset = dataset.repeat() - - # We set shuffle buffer to exactly match total number of - # training files to ensure that training data is well shuffled. - input_files = [] - for input_pattern in input_patterns: - input_files.extend(tf.io.gfile.glob(input_pattern)) - dataset = dataset.shuffle(len(input_files)) - - # In parallel, create tf record dataset for each train files. - # cycle_length = 8 means that up to 8 files will be read and deserialized in - # parallel. You may want to increase this number if you have a large number of - # CPU cores. - dataset = dataset.interleave( - tf.data.TFRecordDataset, - cycle_length=8, - num_parallel_calls=tf.data.experimental.AUTOTUNE) - - if is_training: - dataset = dataset.shuffle(100) - - decode_fn = lambda record: decode_record(record, name_to_features) - dataset = dataset.map( - decode_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE) - - def _select_data_from_record(record): - """Filter out features to use for pretraining.""" - x = { - 'input_word_ids': record['input_ids'], - 'input_mask': record['input_mask'], - 'input_type_ids': record['segment_ids'], - 'masked_lm_positions': record['masked_lm_positions'], - 'masked_lm_ids': record['masked_lm_ids'], - 'masked_lm_weights': record['masked_lm_weights'], - } - if use_next_sentence_label: - x['next_sentence_labels'] = record['next_sentence_labels'] - if use_position_id: - x['position_ids'] = record['position_ids'] - - # TODO(hongkuny): Remove the fake labels after migrating bert pretraining. - if output_fake_labels: - return (x, record['masked_lm_weights']) - else: - return x - - dataset = dataset.map( - _select_data_from_record, - num_parallel_calls=tf.data.experimental.AUTOTUNE) - dataset = dataset.batch(batch_size, drop_remainder=is_training) - dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE) - return dataset - - -def create_classifier_dataset(file_path, - seq_length, - batch_size, - is_training=True, - input_pipeline_context=None, - label_type=tf.int64, - include_sample_weights=False): - """Creates input dataset from (tf)records files for train/eval.""" - name_to_features = { - 'input_ids': tf.io.FixedLenFeature([seq_length], tf.int64), - 'input_mask': tf.io.FixedLenFeature([seq_length], tf.int64), - 'segment_ids': tf.io.FixedLenFeature([seq_length], tf.int64), - 'label_ids': tf.io.FixedLenFeature([], label_type), - } - if include_sample_weights: - name_to_features['weight'] = tf.io.FixedLenFeature([], tf.float32) - dataset = single_file_dataset(file_path, name_to_features) - - # The dataset is always sharded by number of hosts. - # num_input_pipelines is the number of hosts rather than number of cores. - if input_pipeline_context and input_pipeline_context.num_input_pipelines > 1: - dataset = dataset.shard(input_pipeline_context.num_input_pipelines, - input_pipeline_context.input_pipeline_id) - - def _select_data_from_record(record): - x = { - 'input_word_ids': record['input_ids'], - 'input_mask': record['input_mask'], - 'input_type_ids': record['segment_ids'] - } - y = record['label_ids'] - if include_sample_weights: - w = record['weight'] - return (x, y, w) - return (x, y) - - if is_training: - dataset = dataset.shuffle(100) - dataset = dataset.repeat() - - dataset = dataset.map( - _select_data_from_record, - num_parallel_calls=tf.data.experimental.AUTOTUNE) - dataset = dataset.batch(batch_size, drop_remainder=is_training) - dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE) - return dataset - - -def create_squad_dataset(file_path, - seq_length, - batch_size, - is_training=True, - input_pipeline_context=None): - """Creates input dataset from (tf)records files for train/eval.""" - name_to_features = { - 'input_ids': tf.io.FixedLenFeature([seq_length], tf.int64), - 'input_mask': tf.io.FixedLenFeature([seq_length], tf.int64), - 'segment_ids': tf.io.FixedLenFeature([seq_length], tf.int64), - } - if is_training: - name_to_features['start_positions'] = tf.io.FixedLenFeature([], tf.int64) - name_to_features['end_positions'] = tf.io.FixedLenFeature([], tf.int64) - else: - name_to_features['unique_ids'] = tf.io.FixedLenFeature([], tf.int64) - - dataset = single_file_dataset(file_path, name_to_features) - - # The dataset is always sharded by number of hosts. - # num_input_pipelines is the number of hosts rather than number of cores. - if input_pipeline_context and input_pipeline_context.num_input_pipelines > 1: - dataset = dataset.shard(input_pipeline_context.num_input_pipelines, - input_pipeline_context.input_pipeline_id) - - def _select_data_from_record(record): - """Dispatches record to features and labels.""" - x, y = {}, {} - for name, tensor in record.items(): - if name in ('start_positions', 'end_positions'): - y[name] = tensor - elif name == 'input_ids': - x['input_word_ids'] = tensor - elif name == 'segment_ids': - x['input_type_ids'] = tensor - else: - x[name] = tensor - return (x, y) - - if is_training: - dataset = dataset.shuffle(100) - dataset = dataset.repeat() - - dataset = dataset.map( - _select_data_from_record, - num_parallel_calls=tf.data.experimental.AUTOTUNE) - dataset = dataset.batch(batch_size, drop_remainder=True) - dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE) - return dataset - - -def create_retrieval_dataset(file_path, - seq_length, - batch_size, - input_pipeline_context=None): - """Creates input dataset from (tf)records files for scoring.""" - name_to_features = { - 'input_ids': tf.io.FixedLenFeature([seq_length], tf.int64), - 'input_mask': tf.io.FixedLenFeature([seq_length], tf.int64), - 'segment_ids': tf.io.FixedLenFeature([seq_length], tf.int64), - 'int_iden': tf.io.FixedLenFeature([1], tf.int64), - } - dataset = single_file_dataset(file_path, name_to_features) - - # The dataset is always sharded by number of hosts. - # num_input_pipelines is the number of hosts rather than number of cores. - if input_pipeline_context and input_pipeline_context.num_input_pipelines > 1: - dataset = dataset.shard(input_pipeline_context.num_input_pipelines, - input_pipeline_context.input_pipeline_id) - - def _select_data_from_record(record): - x = { - 'input_word_ids': record['input_ids'], - 'input_mask': record['input_mask'], - 'input_type_ids': record['segment_ids'] - } - y = record['int_iden'] - return (x, y) - - dataset = dataset.map( - _select_data_from_record, - num_parallel_calls=tf.data.experimental.AUTOTUNE) - dataset = dataset.batch(batch_size, drop_remainder=False) - dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE) - return dataset diff --git a/spaces/NCTCMumbai/NCTC/models/official/vision/detection/utils/object_detection/region_similarity_calculator.py b/spaces/NCTCMumbai/NCTC/models/official/vision/detection/utils/object_detection/region_similarity_calculator.py deleted file mode 100644 index 0af2ce495ad53c9df0f8d2eb79f7431b02ab430e..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/official/vision/detection/utils/object_detection/region_similarity_calculator.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright 2017 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Region Similarity Calculators for BoxLists. - -Region Similarity Calculators compare a pairwise measure of similarity -between the boxes in two BoxLists. -""" -from abc import ABCMeta -from abc import abstractmethod - -import tensorflow as tf - - -def area(boxlist, scope=None): - """Computes area of boxes. - - Args: - boxlist: BoxList holding N boxes - scope: name scope. - - Returns: - a tensor with shape [N] representing box areas. - """ - if not scope: - scope = 'Area' - with tf.name_scope(scope): - y_min, x_min, y_max, x_max = tf.split( - value=boxlist.get(), num_or_size_splits=4, axis=1) - return tf.squeeze((y_max - y_min) * (x_max - x_min), [1]) - - -def intersection(boxlist1, boxlist2, scope=None): - """Compute pairwise intersection areas between boxes. - - Args: - boxlist1: BoxList holding N boxes - boxlist2: BoxList holding M boxes - scope: name scope. - - Returns: - a tensor with shape [N, M] representing pairwise intersections - """ - if not scope: - scope = 'Intersection' - with tf.name_scope(scope): - y_min1, x_min1, y_max1, x_max1 = tf.split( - value=boxlist1.get(), num_or_size_splits=4, axis=1) - y_min2, x_min2, y_max2, x_max2 = tf.split( - value=boxlist2.get(), num_or_size_splits=4, axis=1) - all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(a=y_max2)) - all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(a=y_min2)) - intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin) - all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(a=x_max2)) - all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(a=x_min2)) - intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin) - return intersect_heights * intersect_widths - - -def iou(boxlist1, boxlist2, scope=None): - """Computes pairwise intersection-over-union between box collections. - - Args: - boxlist1: BoxList holding N boxes - boxlist2: BoxList holding M boxes - scope: name scope. - - Returns: - a tensor with shape [N, M] representing pairwise iou scores. - """ - if not scope: - scope = 'IOU' - with tf.name_scope(scope): - intersections = intersection(boxlist1, boxlist2) - areas1 = area(boxlist1) - areas2 = area(boxlist2) - unions = ( - tf.expand_dims(areas1, 1) + tf.expand_dims(areas2, 0) - intersections) - return tf.where( - tf.equal(intersections, 0.0), tf.zeros_like(intersections), - tf.truediv(intersections, unions)) - - -class RegionSimilarityCalculator(object): - """Abstract base class for region similarity calculator.""" - __metaclass__ = ABCMeta - - def compare(self, boxlist1, boxlist2, scope=None): - """Computes matrix of pairwise similarity between BoxLists. - - This op (to be overriden) computes a measure of pairwise similarity between - the boxes in the given BoxLists. Higher values indicate more similarity. - - Note that this method simply measures similarity and does not explicitly - perform a matching. - - Args: - boxlist1: BoxList holding N boxes. - boxlist2: BoxList holding M boxes. - scope: Op scope name. Defaults to 'Compare' if None. - - Returns: - a (float32) tensor of shape [N, M] with pairwise similarity score. - """ - if not scope: - scope = 'Compare' - with tf.name_scope(scope) as scope: - return self._compare(boxlist1, boxlist2) - - @abstractmethod - def _compare(self, boxlist1, boxlist2): - pass - - -class IouSimilarity(RegionSimilarityCalculator): - """Class to compute similarity based on Intersection over Union (IOU) metric. - - This class computes pairwise similarity between two BoxLists based on IOU. - """ - - def _compare(self, boxlist1, boxlist2): - """Compute pairwise IOU similarity between the two BoxLists. - - Args: - boxlist1: BoxList holding N boxes. - boxlist2: BoxList holding M boxes. - - Returns: - A tensor with shape [N, M] representing pairwise iou scores. - """ - return iou(boxlist1, boxlist2) diff --git a/spaces/Naveentalluri/NaveenGenAIAvatar/README.md b/spaces/Naveentalluri/NaveenGenAIAvatar/README.md deleted file mode 100644 index d5f9d88d03c625785bdc54e40a2fc5b7f0719dfa..0000000000000000000000000000000000000000 --- a/spaces/Naveentalluri/NaveenGenAIAvatar/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: NaveenGenAIAvatar -emoji: 🔥 -colorFrom: yellow -colorTo: yellow -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/data/pad_dataset.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/data/pad_dataset.py deleted file mode 100644 index 8075bba6a9efc5f8421368ee0b2ae66afe3f5009..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/data/pad_dataset.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from fairseq.data import data_utils - -from . import BaseWrapperDataset - - -class PadDataset(BaseWrapperDataset): - def __init__(self, dataset, pad_idx, left_pad): - super().__init__(dataset) - self.pad_idx = pad_idx - self.left_pad = left_pad - - def collater(self, samples): - return data_utils.collate_tokens(samples, self.pad_idx, left_pad=self.left_pad) - - -class LeftPadDataset(PadDataset): - def __init__(self, dataset, pad_idx): - super().__init__(dataset, pad_idx, left_pad=True) - - -class RightPadDataset(PadDataset): - def __init__(self, dataset, pad_idx): - super().__init__(dataset, pad_idx, left_pad=False) diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/__init__.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/__init__.py deleted file mode 100644 index 44bb24ae614941f23fea29c56d60167650c39bcb..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -try: - from fairseq.version import __version__ # noqa -except ImportError: - pass diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/models/nat/insertion_transformer.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/models/nat/insertion_transformer.py deleted file mode 100644 index bc28000f59a3b9e8098f9fe710cc8335d39eea3e..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/models/nat/insertion_transformer.py +++ /dev/null @@ -1,280 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import numpy as np -import torch -import torch.nn.functional as F -from fairseq.models import register_model, register_model_architecture -from fairseq.models.nat import ( - FairseqNATModel, - LevenshteinTransformerDecoder, - LevenshteinTransformerModel, - ensemble_decoder, -) -from fairseq.models.transformer import Linear -from fairseq.modules.transformer_sentence_encoder import init_bert_params -from fairseq.utils import new_arange - - -class NegativeDistanceScore(object): - def __init__(self): - - # pre-compute some values - self.scores = {} - - self.scores[0.5] = self.compute_score_full(50, 0.5) - self.scores[1.0] = self.compute_score_full(50, 1.0) - self.scores[2.0] = self.compute_score_full(50, 2.0) - - def __call__(self, i, L, tau): - if (tau is None) or (tau > 1000): - return 1 / L - - if tau in self.scores: - if L < self.scores[tau].shape[0]: - return self.scores[tau][L - 1, i] - return self.compute_score(L, tau)[i] - - def compute_score(self, L, tau): - s = np.array([-abs(L / 2 - i) / tau for i in range(L)]) - s = np.exp(s - s.max()) - return s / s.sum() - - def compute_score_full(self, L, tau): - s = -abs(np.arange(0, L - 1)[:, None] / 2 - np.arange(L)[None, :]) / tau - s = np.tril(s, 0) + np.triu(s - float("inf"), 1) - s = np.exp(s - s.max(1, keepdims=True)) - return s / s.sum(1, keepdims=True) - - -neg_scorer = NegativeDistanceScore() - - -def _get_ins_targets(in_tokens, out_tokens, padding_idx, unk_idx, vocab_size, tau=None): - try: - from fairseq import libnat - except ImportError as e: - import sys - - sys.stderr.write("ERROR: missing libnat. run `pip install --editable .`\n") - raise e - - B = in_tokens.size(0) - T = in_tokens.size(1) - V = vocab_size - - with torch.cuda.device_of(in_tokens): - in_tokens_list = [ - [t for t in s if t != padding_idx] for i, s in enumerate(in_tokens.tolist()) - ] - out_tokens_list = [ - [t for t in s if t != padding_idx] - for i, s in enumerate(out_tokens.tolist()) - ] - - full_labels = libnat.suggested_ed2_path( - in_tokens_list, out_tokens_list, padding_idx - ) - insert_labels = [a[:-1] for a in full_labels] - - # numericalize1 - insert_label_tensors = in_tokens.new_zeros(B * (T - 1) * V).float() - insert_index, insert_labels = zip( - *[ - (w + (j + i * (T - 1)) * V, neg_scorer(k, len(label), tau)) - for i, labels in enumerate(insert_labels) - for j, label in enumerate(labels[1:-1]) - for k, w in enumerate(label) - ] - ) # HACK 1:-1 - insert_index, insert_labels = [ - torch.tensor(list(a), device=in_tokens.device) - for a in [insert_index, insert_labels] - ] - insert_label_tensors.scatter_(0, insert_index.long(), insert_labels) - insert_label_tensors = insert_label_tensors.view(B, T - 1, V) - - return insert_label_tensors - - -def _apply_ins_words(in_tokens, in_scores, word_ins_pred, word_ins_scores, padding_idx): - - padding_masks = in_tokens[:, 1:].eq(padding_idx) - word_ins_scores.masked_fill_(padding_masks, 0.0) - word_ins_pred.masked_fill_(padding_masks, padding_idx) - - in_coords = new_arange(in_tokens).type_as(in_scores) - - # shift all padding predictions to infinite - out_coords = (in_coords[:, 1:] - 0.5).masked_fill( - word_ins_pred.eq(padding_idx), float("inf") - ) - out_coords = torch.cat([in_coords, out_coords], 1).sort(-1)[1] - out_tokens = torch.cat([in_tokens, word_ins_pred], 1).gather(1, out_coords) - out_scores = torch.cat([in_scores, word_ins_scores], 1).gather(1, out_coords) - return out_tokens, out_scores - - -@register_model("insertion_transformer") -class InsertionTransformerModel(LevenshteinTransformerModel): - def __init__(self, args, encoder, decoder): - super().__init__(args, encoder, decoder) - - @staticmethod - def add_args(parser): - FairseqNATModel.add_args(parser) - parser.add_argument("--label-tau", default=None, type=float) - - @classmethod - def build_decoder(cls, args, tgt_dict, embed_tokens): - decoder = InsertionTransformerDecoder(args, tgt_dict, embed_tokens) - if getattr(args, "apply_bert_init", False): - decoder.apply(init_bert_params) - return decoder - - def forward( - self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs - ): - - assert tgt_tokens is not None, "forward function only supports training." - - # encoding - encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) - - # generate training labels for insertion - word_ins_out = self.decoder.forward_word_ins( - normalize=False, - prev_output_tokens=prev_output_tokens, - encoder_out=encoder_out, - ) - - word_ins_tgt = _get_ins_targets( - prev_output_tokens, - tgt_tokens, - self.pad, - self.unk, - len(self.tgt_dict), - tau=self.decoder.label_tau, - ).type_as(word_ins_out) - word_ins_masks = prev_output_tokens[:, 1:].ne(self.pad) - - return { - "word_ins": { - "out": word_ins_out, - "tgt": word_ins_tgt, - "mask": word_ins_masks, - "ls": self.args.label_smoothing, - "nll_loss": True, - } - } - - def forward_decoder( - self, decoder_out, encoder_out, eos_penalty=0.0, max_ratio=None, **kwargs - ): - - output_tokens = decoder_out.output_tokens - output_scores = decoder_out.output_scores - history = decoder_out.history - - # TODO: decoding for InsertionTransformer - word_ins_score = self.decoder.forward_word_ins( - normalize=True, prev_output_tokens=output_tokens, encoder_out=encoder_out - ) - - if eos_penalty > 0.0: - word_ins_score[:, :, self.pad] -= eos_penalty - word_ins_score, word_ins_pred = word_ins_score.max(-1) - output_tokens, output_scores = _apply_ins_words( - output_tokens, output_scores, word_ins_pred, word_ins_score, self.pad - ) - - # delete some unnecessary paddings - cut_off = output_tokens.ne(self.pad).sum(1).max() - output_tokens = output_tokens[:, :cut_off] - output_scores = output_scores[:, :cut_off] - - if history is not None: - history.append(output_tokens.clone()) - - return decoder_out._replace( - output_tokens=output_tokens, - output_scores=output_scores, - attn=None, - history=history, - ) - - -class InsertionTransformerDecoder(LevenshteinTransformerDecoder): - def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): - # use the TransformerDecoder's __init__ - super(LevenshteinTransformerDecoder, self).__init__( - args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn - ) - - self.dictionary = dictionary - self.bos = dictionary.bos() - self.unk = dictionary.unk() - self.eos = dictionary.eos() - self.pool_out = Linear(self.output_embed_dim * 2, self.output_embed_dim) - - self.label_tau = getattr(args, "label_tau", None) - - @ensemble_decoder - def forward_word_ins(self, normalize, encoder_out, prev_output_tokens): - features = self.extract_features(prev_output_tokens, encoder_out=encoder_out)[0] - features = self.pool_out( - torch.cat([features[:, :-1, :], features[:, 1:, :]], 2) - ) - decoder_out = self.output_layer(features) - return F.log_softmax(decoder_out, -1) if normalize else decoder_out - - def forward_mask_ins(self, *args, **kwargs): - raise NotImplementedError - - def forward_word_del(self, *args, **kwargs): - raise NotImplementedError - - -@register_model_architecture("insertion_transformer", "insertion_transformer") -def insertion_base_architecture(args): - args.encoder_embed_path = getattr(args, "encoder_embed_path", None) - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) - args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) - args.encoder_layers = getattr(args, "encoder_layers", 6) - args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) - args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) - args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) - args.decoder_embed_path = getattr(args, "decoder_embed_path", None) - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) - args.decoder_ffn_embed_dim = getattr( - args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim - ) - args.decoder_layers = getattr(args, "decoder_layers", 6) - args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) - args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) - args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) - args.attention_dropout = getattr(args, "attention_dropout", 0.0) - args.activation_dropout = getattr(args, "activation_dropout", 0.0) - args.activation_fn = getattr(args, "activation_fn", "relu") - args.dropout = getattr(args, "dropout", 0.1) - args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) - args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) - args.share_decoder_input_output_embed = getattr( - args, "share_decoder_input_output_embed", False - ) - args.share_all_embeddings = getattr(args, "share_all_embeddings", False) - args.no_token_positional_embeddings = getattr( - args, "no_token_positional_embeddings", False - ) - args.adaptive_input = getattr(args, "adaptive_input", False) - args.apply_bert_init = getattr(args, "apply_bert_init", False) - - args.decoder_output_dim = getattr( - args, "decoder_output_dim", args.decoder_embed_dim - ) - args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) - - # special for insertion transformer - args.label_tau = getattr(args, "label_tau", None) diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/modules/quantization/scalar/modules/qemb.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/modules/quantization/scalar/modules/qemb.py deleted file mode 100644 index d6cf06e5872cb86e5c2e726153c7a80c78db9d1e..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/modules/quantization/scalar/modules/qemb.py +++ /dev/null @@ -1,147 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from ..ops import emulate_int - - -class IntEmbedding(nn.Module): - """ - Quantized counterpart of the nn.Embedding module that applies QuantNoise during training. - - Args: - - num_embeddings: number of tokens - - embedding_dim: embedding dimension - - p: amount of noise to inject (0 = no quantization, 1 = quantize all the weights) - - bits: number of bits - - method: choose among {"tensor", "histogram", "channel"} - - update_step: recompute scale and zero_point every update_steps iterations - - Remarks: - - We use the straight-through estimator so that the gradients - back-propagate nicely in the network, this is implemented with - the detach() trick - - Parameters scale and zero_point are recomputed every update_step - forward pass to reduce the overhead - - At test time, the weights are fully quantized - """ - - def __init__( - self, - num_embeddings, - embedding_dim, - padding_idx=None, - max_norm=None, - norm_type=2.0, - scale_grad_by_freq=False, - sparse=False, - _weight=None, - p=0, - update_step=1000, - bits=8, - method="histogram", - ): - super(IntEmbedding, self).__init__() - self.num_embeddings = num_embeddings - self.embedding_dim = embedding_dim - if padding_idx is not None: - if padding_idx > 0: - assert ( - padding_idx < self.num_embeddings - ), "Padding_idx must be within num_embeddings" - elif padding_idx < 0: - assert ( - padding_idx >= -self.num_embeddings - ), "Padding_idx must be within num_embeddings" - padding_idx = self.num_embeddings + padding_idx - self.padding_idx = padding_idx - self.max_norm = max_norm - self.norm_type = norm_type - self.scale_grad_by_freq = scale_grad_by_freq - if _weight is None: - self.weight = nn.Parameter(torch.Tensor(num_embeddings, embedding_dim)) - self.reset_parameters() - else: - assert list(_weight.shape) == [ - num_embeddings, - embedding_dim, - ], "Shape of weight does not match num_embeddings and embedding_dim" - self.weight = nn.Parameter(_weight) - self.sparse = sparse - - # quantization parameters - self.p = p - self.bits = bits - self.method = method - self.update_step = update_step - self.counter = 0 - - def reset_parameters(self): - nn.init.normal_(self.weight) - if self.padding_idx is not None: - with torch.no_grad(): - self.weight[self.padding_idx].fill_(0) - - def forward(self, input): - # train with QuantNoise and evaluate the fully quantized network - p = self.p if self.training else 1 - - # update parameters every 1000 iterations - if self.counter % self.update_step == 0: - self.scale = None - self.zero_point = None - self.counter += 1 - - # quantize weight - weight_quantized, self.scale, self.zero_point = emulate_int( - self.weight.detach(), - bits=self.bits, - method=self.method, - scale=self.scale, - zero_point=self.zero_point, - ) - - # mask to apply noise - mask = torch.zeros_like(self.weight) - mask.bernoulli_(1 - p) - noise = (weight_quantized - self.weight).masked_fill(mask.bool(), 0) - - # using straight-through estimator (STE) - clamp_low = -self.scale * self.zero_point - clamp_high = self.scale * (2 ** self.bits - 1 - self.zero_point) - weight = ( - torch.clamp(self.weight, clamp_low.item(), clamp_high.item()) - + noise.detach() - ) - - # return output - output = F.embedding( - input, - weight, - self.padding_idx, - self.max_norm, - self.norm_type, - self.scale_grad_by_freq, - self.sparse, - ) - return output - - def extra_repr(self): - s = "{num_embeddings}, {embedding_dim}" - if self.padding_idx is not None: - s += ", padding_idx={padding_idx}" - if self.max_norm is not None: - s += ", max_norm={max_norm}" - if self.norm_type != 2: - s += ", norm_type={norm_type}" - if self.scale_grad_by_freq is not False: - s += ", scale_grad_by_freq={scale_grad_by_freq}" - if self.sparse is not False: - s += ", sparse=True" - s += "quant_noise={p}, bits={bits}, method={method}" - return s.format(**self.__dict__) diff --git a/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/backtranslation/tokenized_bleu.sh b/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/backtranslation/tokenized_bleu.sh deleted file mode 100644 index c6d6aaa193f6059299bc98909324fe4b9b060372..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/backtranslation/tokenized_bleu.sh +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/bash - -if [ $# -ne 5 ]; then - echo "usage: $0 [dataset=wmt14/full] [langpair=en-de] [databin] [bpecode] [model]" - exit -fi - - -DATASET=$1 -LANGPAIR=$2 -DATABIN=$3 -BPECODE=$4 -MODEL=$5 - -SRCLANG=$(echo $LANGPAIR | cut -d '-' -f 1) -TGTLANG=$(echo $LANGPAIR | cut -d '-' -f 2) - - -BPEROOT=examples/backtranslation/subword-nmt/subword_nmt -if [ ! -e $BPEROOT ]; then - BPEROOT=subword-nmt/subword_nmt - if [ ! -e $BPEROOT ]; then - echo 'Cloning Subword NMT repository (for BPE pre-processing)...' - git clone https://github.com/rsennrich/subword-nmt.git - fi -fi - - -TMP_REF=$(mktemp) - -sacrebleu -t $DATASET -l $LANGPAIR --echo ref -q \ -| sacremoses normalize -l $TGTLANG -q \ -| sacremoses tokenize -a -l $TGTLANG -q \ -> $TMP_REF - -sacrebleu -t $DATASET -l $LANGPAIR --echo src -q \ -| sacremoses normalize -l $SRCLANG -q \ -| sacremoses tokenize -a -l $SRCLANG -q \ -| python $BPEROOT/apply_bpe.py -c $BPECODE \ -| fairseq-interactive $DATABIN --path $MODEL \ - -s $SRCLANG -t $TGTLANG \ - --beam 5 --remove-bpe --buffer-size 1024 --max-tokens 8000 \ -| grep ^H- | cut -f 3- \ -| fairseq-score --ref $TMP_REF - -rm -f $TMP_REF diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/speech_to_text/README.md b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/speech_to_text/README.md deleted file mode 100644 index f639d300d342f8de1392c98bfc44ec8690188539..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/speech_to_text/README.md +++ /dev/null @@ -1,77 +0,0 @@ -# Speech-to-Text (S2T) Modeling - -[https://www.aclweb.org/anthology/2020.aacl-demo.6](https://www.aclweb.org/anthology/2020.aacl-demo.6.pdf) - -Speech recognition (ASR) and speech-to-text translation (ST) with fairseq. - -## Data Preparation -S2T modeling data consists of source speech features, target text and other optional information -(source text, speaker id, etc.). Fairseq S2T uses per-dataset-split TSV manifest files -to store these information. Each data field is represented by a column in the TSV file. - -Unlike text token embeddings, speech features (e.g. log mel-scale filter banks) are usually fixed -during model training and can be pre-computed. The manifest file contains the path to -either the feature file in NumPy format or the WAV/FLAC audio file. For the latter, -features will be extracted on-the-fly by fairseq S2T. Optionally, feature/audio files can be packed -into uncompressed ZIP files (then accessed via byte offset and length) to improve I/O performance. - -Fairseq S2T also employs a YAML file for data related configurations: tokenizer type and dictionary path -for the target text, feature transforms such as CMVN (cepstral mean and variance normalization) and SpecAugment, -temperature-based resampling, etc. - -## Model Training -Fairseq S2T uses the unified `fairseq-train` interface for model training. It requires arguments `--task speech_to_text`, - `--arch ` and `--config-yaml `. - -## Inference & Evaluation -Fairseq S2T uses the unified `fairseq-generate`/`fairseq-interactive` interface for inference and evaluation. It -requires arguments `--task speech_to_text` and `--config-yaml `. The interactive console takes -audio paths (one per line) as inputs. - - -## Examples -- [Speech Recognition (ASR) on LibriSpeech](docs/librispeech_example.md) - -- [Speech-to-Text Translation (ST) on MuST-C](docs/mustc_example.md) - -- [Speech-to-Text Translation (ST) on CoVoST 2](docs/covost_example.md) - -- [Speech-to-Text Translation (ST) on Multilingual TEDx](docs/mtedx_example.md) -- [Simultaneous Speech-to-Text Translation (SimulST) on MuST-C](docs/simulst_mustc_example.md) - -## Updates -- 02/04/2021: Added interactive decoding (`fairseq-interactive`) support. Examples: - [ASR (LibriSpeech)](docs/librispeech_example.md#interactive-decoding) - and [ST (CoVoST 2)](docs/covost_example.md#interactive-decoding). -- 01/08/2021: Several fixes for S2T Transformer model, inference-time de-tokenization, scorer configuration and data - preparation scripts. We also add pre-trained models to the examples and revise the instructions. - Breaking changes: the data preparation scripts now extract filterbank features without CMVN. CMVN is instead applied - on-the-fly (defined in the config YAML). - -## What's Next -- We are migrating the old fairseq [ASR example](../speech_recognition) into this S2T framework and - merging the features from both sides. -- The following papers also base their experiments on fairseq S2T. We are adding more examples for replication. - - [Improving Cross-Lingual Transfer Learning for End-to-End Speech Recognition with Speech Translation (Wang et al., 2020)](https://arxiv.org/abs/2006.05474) - - [Self-Supervised Representations Improve End-to-End Speech Translation (Wu et al., 2020)](https://arxiv.org/abs/2006.12124) - - [Self-Training for End-to-End Speech Translation (Pino et al., 2020)](https://arxiv.org/abs/2006.02490) - - [CoVoST: A Diverse Multilingual Speech-To-Text Translation Corpus (Wang et al., 2020)](https://arxiv.org/abs/2002.01320) - - [Harnessing Indirect Training Data for End-to-End Automatic Speech Translation: Tricks of the Trade (Pino et al., 2019)](https://arxiv.org/abs/1909.06515) - -## Citation -Please cite as: -``` -@inproceedings{wang2020fairseqs2t, - title = {fairseq S2T: Fast Speech-to-Text Modeling with fairseq}, - author = {Changhan Wang and Yun Tang and Xutai Ma and Anne Wu and Dmytro Okhonko and Juan Pino}, - booktitle = {Proceedings of the 2020 Conference of the Asian Chapter of the Association for Computational Linguistics (AACL): System Demonstrations}, - year = {2020}, -} - -@inproceedings{ott2019fairseq, - title = {fairseq: A Fast, Extensible Toolkit for Sequence Modeling}, - author = {Myle Ott and Sergey Edunov and Alexei Baevski and Angela Fan and Sam Gross and Nathan Ng and David Grangier and Michael Auli}, - booktitle = {Proceedings of NAACL-HLT 2019: Demonstrations}, - year = {2019}, -} -``` diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/wav2vec/unsupervised/__init__.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/wav2vec/unsupervised/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/data/pad_dataset.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/data/pad_dataset.py deleted file mode 100644 index 8075bba6a9efc5f8421368ee0b2ae66afe3f5009..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/data/pad_dataset.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from fairseq.data import data_utils - -from . import BaseWrapperDataset - - -class PadDataset(BaseWrapperDataset): - def __init__(self, dataset, pad_idx, left_pad): - super().__init__(dataset) - self.pad_idx = pad_idx - self.left_pad = left_pad - - def collater(self, samples): - return data_utils.collate_tokens(samples, self.pad_idx, left_pad=self.left_pad) - - -class LeftPadDataset(PadDataset): - def __init__(self, dataset, pad_idx): - super().__init__(dataset, pad_idx, left_pad=True) - - -class RightPadDataset(PadDataset): - def __init__(self, dataset, pad_idx): - super().__init__(dataset, pad_idx, left_pad=False) diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/models/lstm.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/models/lstm.py deleted file mode 100644 index e1e66a7d50fa1b1b313e9d1a6e7862ac9bfaa074..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/models/lstm.py +++ /dev/null @@ -1,753 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from typing import Dict, List, Optional, Tuple - -import torch -import torch.nn as nn -import torch.nn.functional as F -from fairseq import utils -from fairseq.models import ( - FairseqEncoder, - FairseqEncoderDecoderModel, - FairseqIncrementalDecoder, - register_model, - register_model_architecture, -) -from fairseq.modules import AdaptiveSoftmax, FairseqDropout -from torch import Tensor - - -DEFAULT_MAX_SOURCE_POSITIONS = 1e5 -DEFAULT_MAX_TARGET_POSITIONS = 1e5 - - -@register_model("lstm") -class LSTMModel(FairseqEncoderDecoderModel): - def __init__(self, encoder, decoder): - super().__init__(encoder, decoder) - - @staticmethod - def add_args(parser): - """Add model-specific arguments to the parser.""" - # fmt: off - parser.add_argument('--dropout', type=float, metavar='D', - help='dropout probability') - parser.add_argument('--encoder-embed-dim', type=int, metavar='N', - help='encoder embedding dimension') - parser.add_argument('--encoder-embed-path', type=str, metavar='STR', - help='path to pre-trained encoder embedding') - parser.add_argument('--encoder-freeze-embed', action='store_true', - help='freeze encoder embeddings') - parser.add_argument('--encoder-hidden-size', type=int, metavar='N', - help='encoder hidden size') - parser.add_argument('--encoder-layers', type=int, metavar='N', - help='number of encoder layers') - parser.add_argument('--encoder-bidirectional', action='store_true', - help='make all layers of encoder bidirectional') - parser.add_argument('--decoder-embed-dim', type=int, metavar='N', - help='decoder embedding dimension') - parser.add_argument('--decoder-embed-path', type=str, metavar='STR', - help='path to pre-trained decoder embedding') - parser.add_argument('--decoder-freeze-embed', action='store_true', - help='freeze decoder embeddings') - parser.add_argument('--decoder-hidden-size', type=int, metavar='N', - help='decoder hidden size') - parser.add_argument('--decoder-layers', type=int, metavar='N', - help='number of decoder layers') - parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N', - help='decoder output embedding dimension') - parser.add_argument('--decoder-attention', type=str, metavar='BOOL', - help='decoder attention') - parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', - help='comma separated list of adaptive softmax cutoff points. ' - 'Must be used with adaptive_loss criterion') - parser.add_argument('--share-decoder-input-output-embed', default=False, - action='store_true', - help='share decoder input and output embeddings') - parser.add_argument('--share-all-embeddings', default=False, action='store_true', - help='share encoder, decoder and output embeddings' - ' (requires shared dictionary and embed dim)') - - # Granular dropout settings (if not specified these default to --dropout) - parser.add_argument('--encoder-dropout-in', type=float, metavar='D', - help='dropout probability for encoder input embedding') - parser.add_argument('--encoder-dropout-out', type=float, metavar='D', - help='dropout probability for encoder output') - parser.add_argument('--decoder-dropout-in', type=float, metavar='D', - help='dropout probability for decoder input embedding') - parser.add_argument('--decoder-dropout-out', type=float, metavar='D', - help='dropout probability for decoder output') - # fmt: on - - @classmethod - def build_model(cls, args, task): - """Build a new model instance.""" - # make sure that all args are properly defaulted (in case there are any new ones) - base_architecture(args) - - if args.encoder_layers != args.decoder_layers: - raise ValueError("--encoder-layers must match --decoder-layers") - - max_source_positions = getattr( - args, "max_source_positions", DEFAULT_MAX_SOURCE_POSITIONS - ) - max_target_positions = getattr( - args, "max_target_positions", DEFAULT_MAX_TARGET_POSITIONS - ) - - def load_pretrained_embedding_from_file(embed_path, dictionary, embed_dim): - num_embeddings = len(dictionary) - padding_idx = dictionary.pad() - embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx) - embed_dict = utils.parse_embedding(embed_path) - utils.print_embed_overlap(embed_dict, dictionary) - return utils.load_embedding(embed_dict, dictionary, embed_tokens) - - if args.encoder_embed_path: - pretrained_encoder_embed = load_pretrained_embedding_from_file( - args.encoder_embed_path, task.source_dictionary, args.encoder_embed_dim - ) - else: - num_embeddings = len(task.source_dictionary) - pretrained_encoder_embed = Embedding( - num_embeddings, args.encoder_embed_dim, task.source_dictionary.pad() - ) - - if args.share_all_embeddings: - # double check all parameters combinations are valid - if task.source_dictionary != task.target_dictionary: - raise ValueError("--share-all-embeddings requires a joint dictionary") - if args.decoder_embed_path and ( - args.decoder_embed_path != args.encoder_embed_path - ): - raise ValueError( - "--share-all-embed not compatible with --decoder-embed-path" - ) - if args.encoder_embed_dim != args.decoder_embed_dim: - raise ValueError( - "--share-all-embeddings requires --encoder-embed-dim to " - "match --decoder-embed-dim" - ) - pretrained_decoder_embed = pretrained_encoder_embed - args.share_decoder_input_output_embed = True - else: - # separate decoder input embeddings - pretrained_decoder_embed = None - if args.decoder_embed_path: - pretrained_decoder_embed = load_pretrained_embedding_from_file( - args.decoder_embed_path, - task.target_dictionary, - args.decoder_embed_dim, - ) - # one last double check of parameter combinations - if args.share_decoder_input_output_embed and ( - args.decoder_embed_dim != args.decoder_out_embed_dim - ): - raise ValueError( - "--share-decoder-input-output-embeddings requires " - "--decoder-embed-dim to match --decoder-out-embed-dim" - ) - - if args.encoder_freeze_embed: - pretrained_encoder_embed.weight.requires_grad = False - if args.decoder_freeze_embed: - pretrained_decoder_embed.weight.requires_grad = False - - encoder = LSTMEncoder( - dictionary=task.source_dictionary, - embed_dim=args.encoder_embed_dim, - hidden_size=args.encoder_hidden_size, - num_layers=args.encoder_layers, - dropout_in=args.encoder_dropout_in, - dropout_out=args.encoder_dropout_out, - bidirectional=args.encoder_bidirectional, - pretrained_embed=pretrained_encoder_embed, - max_source_positions=max_source_positions, - ) - decoder = LSTMDecoder( - dictionary=task.target_dictionary, - embed_dim=args.decoder_embed_dim, - hidden_size=args.decoder_hidden_size, - out_embed_dim=args.decoder_out_embed_dim, - num_layers=args.decoder_layers, - dropout_in=args.decoder_dropout_in, - dropout_out=args.decoder_dropout_out, - attention=utils.eval_bool(args.decoder_attention), - encoder_output_units=encoder.output_units, - pretrained_embed=pretrained_decoder_embed, - share_input_output_embed=args.share_decoder_input_output_embed, - adaptive_softmax_cutoff=( - utils.eval_str_list(args.adaptive_softmax_cutoff, type=int) - if args.criterion == "adaptive_loss" - else None - ), - max_target_positions=max_target_positions, - residuals=False, - ) - return cls(encoder, decoder) - - def forward( - self, - src_tokens, - src_lengths, - prev_output_tokens, - incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, - ): - encoder_out = self.encoder(src_tokens, src_lengths=src_lengths) - decoder_out = self.decoder( - prev_output_tokens, - encoder_out=encoder_out, - incremental_state=incremental_state, - ) - return decoder_out - - -class LSTMEncoder(FairseqEncoder): - """LSTM encoder.""" - - def __init__( - self, - dictionary, - embed_dim=512, - hidden_size=512, - num_layers=1, - dropout_in=0.1, - dropout_out=0.1, - bidirectional=False, - left_pad=True, - pretrained_embed=None, - padding_idx=None, - max_source_positions=DEFAULT_MAX_SOURCE_POSITIONS, - ): - super().__init__(dictionary) - self.num_layers = num_layers - self.dropout_in_module = FairseqDropout( - dropout_in*1.0, module_name=self.__class__.__name__ - ) - self.dropout_out_module = FairseqDropout( - dropout_out*1.0, module_name=self.__class__.__name__ - ) - self.bidirectional = bidirectional - self.hidden_size = hidden_size - self.max_source_positions = max_source_positions - - num_embeddings = len(dictionary) - self.padding_idx = padding_idx if padding_idx is not None else dictionary.pad() - if pretrained_embed is None: - self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx) - else: - self.embed_tokens = pretrained_embed - - self.lstm = LSTM( - input_size=embed_dim, - hidden_size=hidden_size, - num_layers=num_layers, - dropout=self.dropout_out_module.p if num_layers > 1 else 0.0, - bidirectional=bidirectional, - ) - self.left_pad = left_pad - - self.output_units = hidden_size - if bidirectional: - self.output_units *= 2 - - def forward( - self, - src_tokens: Tensor, - src_lengths: Tensor, - enforce_sorted: bool = True, - ): - """ - Args: - src_tokens (LongTensor): tokens in the source language of - shape `(batch, src_len)` - src_lengths (LongTensor): lengths of each source sentence of - shape `(batch)` - enforce_sorted (bool, optional): if True, `src_tokens` is - expected to contain sequences sorted by length in a - decreasing order. If False, this condition is not - required. Default: True. - """ - if self.left_pad: - # nn.utils.rnn.pack_padded_sequence requires right-padding; - # convert left-padding to right-padding - src_tokens = utils.convert_padding_direction( - src_tokens, - torch.zeros_like(src_tokens).fill_(self.padding_idx), - left_to_right=True, - ) - - bsz, seqlen = src_tokens.size() - - # embed tokens - x = self.embed_tokens(src_tokens) - x = self.dropout_in_module(x) - - # B x T x C -> T x B x C - x = x.transpose(0, 1) - - # pack embedded source tokens into a PackedSequence - packed_x = nn.utils.rnn.pack_padded_sequence( - x, src_lengths.cpu(), enforce_sorted=enforce_sorted - ) - - # apply LSTM - if self.bidirectional: - state_size = 2 * self.num_layers, bsz, self.hidden_size - else: - state_size = self.num_layers, bsz, self.hidden_size - h0 = x.new_zeros(*state_size) - c0 = x.new_zeros(*state_size) - packed_outs, (final_hiddens, final_cells) = self.lstm(packed_x, (h0, c0)) - - # unpack outputs and apply dropout - x, _ = nn.utils.rnn.pad_packed_sequence( - packed_outs, padding_value=self.padding_idx * 1.0 - ) - x = self.dropout_out_module(x) - assert list(x.size()) == [seqlen, bsz, self.output_units] - - if self.bidirectional: - final_hiddens = self.combine_bidir(final_hiddens, bsz) - final_cells = self.combine_bidir(final_cells, bsz) - - encoder_padding_mask = src_tokens.eq(self.padding_idx).t() - - return tuple( - ( - x, # seq_len x batch x hidden - final_hiddens, # num_layers x batch x num_directions*hidden - final_cells, # num_layers x batch x num_directions*hidden - encoder_padding_mask, # seq_len x batch - ) - ) - - def combine_bidir(self, outs, bsz: int): - out = outs.view(self.num_layers, 2, bsz, -1).transpose(1, 2).contiguous() - return out.view(self.num_layers, bsz, -1) - - def reorder_encoder_out(self, encoder_out: Tuple[Tensor, Tensor, Tensor, Tensor], new_order): - return tuple( - ( - encoder_out[0].index_select(1, new_order), - encoder_out[1].index_select(1, new_order), - encoder_out[2].index_select(1, new_order), - encoder_out[3].index_select(1, new_order), - ) - ) - - def max_positions(self): - """Maximum input length supported by the encoder.""" - return self.max_source_positions - - -class AttentionLayer(nn.Module): - def __init__(self, input_embed_dim, source_embed_dim, output_embed_dim, bias=False): - super().__init__() - - self.input_proj = Linear(input_embed_dim, source_embed_dim, bias=bias) - self.output_proj = Linear( - input_embed_dim + source_embed_dim, output_embed_dim, bias=bias - ) - - def forward(self, input, source_hids, encoder_padding_mask): - # input: bsz x input_embed_dim - # source_hids: srclen x bsz x source_embed_dim - - # x: bsz x source_embed_dim - x = self.input_proj(input) - - # compute attention - attn_scores = (source_hids * x.unsqueeze(0)).sum(dim=2) - - # don't attend over padding - if encoder_padding_mask is not None: - attn_scores = ( - attn_scores.float() - .masked_fill_(encoder_padding_mask, float("-inf")) - .type_as(attn_scores) - ) # FP16 support: cast to float and back - - attn_scores = F.softmax(attn_scores, dim=0) # srclen x bsz - - # sum weighted sources - x = (attn_scores.unsqueeze(2) * source_hids).sum(dim=0) - - x = torch.tanh(self.output_proj(torch.cat((x, input), dim=1))) - return x, attn_scores - - -class LSTMDecoder(FairseqIncrementalDecoder): - """LSTM decoder.""" - - def __init__( - self, - dictionary, - embed_dim=512, - hidden_size=512, - out_embed_dim=512, - num_layers=1, - dropout_in=0.1, - dropout_out=0.1, - attention=True, - encoder_output_units=512, - pretrained_embed=None, - share_input_output_embed=False, - adaptive_softmax_cutoff=None, - max_target_positions=DEFAULT_MAX_TARGET_POSITIONS, - residuals=False, - ): - super().__init__(dictionary) - self.dropout_in_module = FairseqDropout( - dropout_in*1.0, module_name=self.__class__.__name__ - ) - self.dropout_out_module = FairseqDropout( - dropout_out*1.0, module_name=self.__class__.__name__ - ) - self.hidden_size = hidden_size - self.share_input_output_embed = share_input_output_embed - self.need_attn = True - self.max_target_positions = max_target_positions - self.residuals = residuals - self.num_layers = num_layers - - self.adaptive_softmax = None - num_embeddings = len(dictionary) - padding_idx = dictionary.pad() - if pretrained_embed is None: - self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx) - else: - self.embed_tokens = pretrained_embed - - self.encoder_output_units = encoder_output_units - if encoder_output_units != hidden_size and encoder_output_units != 0: - self.encoder_hidden_proj = Linear(encoder_output_units, hidden_size) - self.encoder_cell_proj = Linear(encoder_output_units, hidden_size) - else: - self.encoder_hidden_proj = self.encoder_cell_proj = None - - # disable input feeding if there is no encoder - # input feeding is described in arxiv.org/abs/1508.04025 - input_feed_size = 0 if encoder_output_units == 0 else hidden_size - self.layers = nn.ModuleList( - [ - LSTMCell( - input_size=input_feed_size + embed_dim - if layer == 0 - else hidden_size, - hidden_size=hidden_size, - ) - for layer in range(num_layers) - ] - ) - - if attention: - # TODO make bias configurable - self.attention = AttentionLayer( - hidden_size, encoder_output_units, hidden_size, bias=False - ) - else: - self.attention = None - - if hidden_size != out_embed_dim: - self.additional_fc = Linear(hidden_size, out_embed_dim) - - if adaptive_softmax_cutoff is not None: - # setting adaptive_softmax dropout to dropout_out for now but can be redefined - self.adaptive_softmax = AdaptiveSoftmax( - num_embeddings, - hidden_size, - adaptive_softmax_cutoff, - dropout=dropout_out, - ) - elif not self.share_input_output_embed: - self.fc_out = Linear(out_embed_dim, num_embeddings, dropout=dropout_out) - - def forward( - self, - prev_output_tokens, - encoder_out: Optional[Tuple[Tensor, Tensor, Tensor, Tensor]] = None, - incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, - src_lengths: Optional[Tensor] = None, - ): - x, attn_scores = self.extract_features( - prev_output_tokens, encoder_out, incremental_state - ) - return self.output_layer(x), attn_scores - - def extract_features( - self, - prev_output_tokens, - encoder_out: Optional[Tuple[Tensor, Tensor, Tensor, Tensor]] = None, - incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, - ): - """ - Similar to *forward* but only return features. - """ - # get outputs from encoder - if encoder_out is not None: - encoder_outs = encoder_out[0] - encoder_hiddens = encoder_out[1] - encoder_cells = encoder_out[2] - encoder_padding_mask = encoder_out[3] - else: - encoder_outs = torch.empty(0) - encoder_hiddens = torch.empty(0) - encoder_cells = torch.empty(0) - encoder_padding_mask = torch.empty(0) - srclen = encoder_outs.size(0) - - if incremental_state is not None and len(incremental_state) > 0: - prev_output_tokens = prev_output_tokens[:, -1:] - - bsz, seqlen = prev_output_tokens.size() - - # embed tokens - x = self.embed_tokens(prev_output_tokens) - x = self.dropout_in_module(x) - - # B x T x C -> T x B x C - x = x.transpose(0, 1) - - # initialize previous states (or get from cache during incremental generation) - if incremental_state is not None and len(incremental_state) > 0: - prev_hiddens, prev_cells, input_feed = self.get_cached_state( - incremental_state - ) - elif encoder_out is not None: - # setup recurrent cells - prev_hiddens = [encoder_hiddens[i] for i in range(self.num_layers)] - prev_cells = [encoder_cells[i] for i in range(self.num_layers)] - if self.encoder_hidden_proj is not None: - prev_hiddens = [self.encoder_hidden_proj(y) for y in prev_hiddens] - prev_cells = [self.encoder_cell_proj(y) for y in prev_cells] - input_feed = x.new_zeros(bsz, self.hidden_size) - else: - # setup zero cells, since there is no encoder - zero_state = x.new_zeros(bsz, self.hidden_size) - prev_hiddens = [zero_state for i in range(self.num_layers)] - prev_cells = [zero_state for i in range(self.num_layers)] - input_feed = None - - assert ( - srclen > 0 or self.attention is None - ), "attention is not supported if there are no encoder outputs" - attn_scores: Optional[Tensor] = ( - x.new_zeros(srclen, seqlen, bsz) if self.attention is not None else None - ) - outs = [] - for j in range(seqlen): - # input feeding: concatenate context vector from previous time step - if input_feed is not None: - input = torch.cat((x[j, :, :], input_feed), dim=1) - else: - input = x[j] - - for i, rnn in enumerate(self.layers): - # recurrent cell - hidden, cell = rnn(input, (prev_hiddens[i], prev_cells[i])) - - # hidden state becomes the input to the next layer - input = self.dropout_out_module(hidden) - if self.residuals: - input = input + prev_hiddens[i] - - # save state for next time step - prev_hiddens[i] = hidden - prev_cells[i] = cell - - # apply attention using the last layer's hidden state - if self.attention is not None: - assert attn_scores is not None - out, attn_scores[:, j, :] = self.attention( - hidden, encoder_outs, encoder_padding_mask - ) - else: - out = hidden - out = self.dropout_out_module(out) - - # input feeding - if input_feed is not None: - input_feed = out - - # save final output - outs.append(out) - - # Stack all the necessary tensors together and store - prev_hiddens_tensor = torch.stack(prev_hiddens) - prev_cells_tensor = torch.stack(prev_cells) - cache_state = torch.jit.annotate( - Dict[str, Optional[Tensor]], - { - "prev_hiddens": prev_hiddens_tensor, - "prev_cells": prev_cells_tensor, - "input_feed": input_feed, - }, - ) - self.set_incremental_state(incremental_state, "cached_state", cache_state) - - # collect outputs across time steps - x = torch.cat(outs, dim=0).view(seqlen, bsz, self.hidden_size) - - # T x B x C -> B x T x C - x = x.transpose(1, 0) - - if hasattr(self, "additional_fc") and self.adaptive_softmax is None: - x = self.additional_fc(x) - x = self.dropout_out_module(x) - # srclen x tgtlen x bsz -> bsz x tgtlen x srclen - if not self.training and self.need_attn and self.attention is not None: - assert attn_scores is not None - attn_scores = attn_scores.transpose(0, 2) - else: - attn_scores = None - return x, attn_scores - - def output_layer(self, x): - """Project features to the vocabulary size.""" - if self.adaptive_softmax is None: - if self.share_input_output_embed: - x = F.linear(x, self.embed_tokens.weight) - else: - x = self.fc_out(x) - return x - - def get_cached_state( - self, - incremental_state: Dict[str, Dict[str, Optional[Tensor]]], - ) -> Tuple[List[Tensor], List[Tensor], Optional[Tensor]]: - cached_state = self.get_incremental_state(incremental_state, "cached_state") - assert cached_state is not None - prev_hiddens_ = cached_state["prev_hiddens"] - assert prev_hiddens_ is not None - prev_cells_ = cached_state["prev_cells"] - assert prev_cells_ is not None - prev_hiddens = [prev_hiddens_[i] for i in range(self.num_layers)] - prev_cells = [prev_cells_[j] for j in range(self.num_layers)] - input_feed = cached_state[ - "input_feed" - ] # can be None for decoder-only language models - return prev_hiddens, prev_cells, input_feed - - def reorder_incremental_state( - self, - incremental_state: Dict[str, Dict[str, Optional[Tensor]]], - new_order: Tensor, - ): - if incremental_state is None or len(incremental_state) == 0: - return - prev_hiddens, prev_cells, input_feed = self.get_cached_state(incremental_state) - prev_hiddens = [p.index_select(0, new_order) for p in prev_hiddens] - prev_cells = [p.index_select(0, new_order) for p in prev_cells] - if input_feed is not None: - input_feed = input_feed.index_select(0, new_order) - cached_state_new = torch.jit.annotate( - Dict[str, Optional[Tensor]], - { - "prev_hiddens": torch.stack(prev_hiddens), - "prev_cells": torch.stack(prev_cells), - "input_feed": input_feed, - }, - ) - self.set_incremental_state(incremental_state, "cached_state", cached_state_new), - return - - def max_positions(self): - """Maximum output length supported by the decoder.""" - return self.max_target_positions - - def make_generation_fast_(self, need_attn=False, **kwargs): - self.need_attn = need_attn - - -def Embedding(num_embeddings, embedding_dim, padding_idx): - m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) - nn.init.uniform_(m.weight, -0.1, 0.1) - nn.init.constant_(m.weight[padding_idx], 0) - return m - - -def LSTM(input_size, hidden_size, **kwargs): - m = nn.LSTM(input_size, hidden_size, **kwargs) - for name, param in m.named_parameters(): - if "weight" in name or "bias" in name: - param.data.uniform_(-0.1, 0.1) - return m - - -def LSTMCell(input_size, hidden_size, **kwargs): - m = nn.LSTMCell(input_size, hidden_size, **kwargs) - for name, param in m.named_parameters(): - if "weight" in name or "bias" in name: - param.data.uniform_(-0.1, 0.1) - return m - - -def Linear(in_features, out_features, bias=True, dropout=0.0): - """Linear layer (input: N x T x C)""" - m = nn.Linear(in_features, out_features, bias=bias) - m.weight.data.uniform_(-0.1, 0.1) - if bias: - m.bias.data.uniform_(-0.1, 0.1) - return m - - -@register_model_architecture("lstm", "lstm") -def base_architecture(args): - args.dropout = getattr(args, "dropout", 0.1) - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) - args.encoder_embed_path = getattr(args, "encoder_embed_path", None) - args.encoder_freeze_embed = getattr(args, "encoder_freeze_embed", False) - args.encoder_hidden_size = getattr( - args, "encoder_hidden_size", args.encoder_embed_dim - ) - args.encoder_layers = getattr(args, "encoder_layers", 1) - args.encoder_bidirectional = getattr(args, "encoder_bidirectional", False) - args.encoder_dropout_in = getattr(args, "encoder_dropout_in", args.dropout) - args.encoder_dropout_out = getattr(args, "encoder_dropout_out", args.dropout) - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) - args.decoder_embed_path = getattr(args, "decoder_embed_path", None) - args.decoder_freeze_embed = getattr(args, "decoder_freeze_embed", False) - args.decoder_hidden_size = getattr( - args, "decoder_hidden_size", args.decoder_embed_dim - ) - args.decoder_layers = getattr(args, "decoder_layers", 1) - args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 512) - args.decoder_attention = getattr(args, "decoder_attention", "1") - args.decoder_dropout_in = getattr(args, "decoder_dropout_in", args.dropout) - args.decoder_dropout_out = getattr(args, "decoder_dropout_out", args.dropout) - args.share_decoder_input_output_embed = getattr( - args, "share_decoder_input_output_embed", False - ) - args.share_all_embeddings = getattr(args, "share_all_embeddings", False) - args.adaptive_softmax_cutoff = getattr( - args, "adaptive_softmax_cutoff", "10000,50000,200000" - ) - - -@register_model_architecture("lstm", "lstm_wiseman_iwslt_de_en") -def lstm_wiseman_iwslt_de_en(args): - args.dropout = getattr(args, "dropout", 0.1) - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256) - args.encoder_dropout_in = getattr(args, "encoder_dropout_in", 0) - args.encoder_dropout_out = getattr(args, "encoder_dropout_out", 0) - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256) - args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 256) - args.decoder_dropout_in = getattr(args, "decoder_dropout_in", 0) - args.decoder_dropout_out = getattr(args, "decoder_dropout_out", args.dropout) - base_architecture(args) - - -@register_model_architecture("lstm", "lstm_luong_wmt_en_de") -def lstm_luong_wmt_en_de(args): - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1000) - args.encoder_layers = getattr(args, "encoder_layers", 4) - args.encoder_dropout_out = getattr(args, "encoder_dropout_out", 0) - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1000) - args.decoder_layers = getattr(args, "decoder_layers", 4) - args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 1000) - args.decoder_dropout_out = getattr(args, "decoder_dropout_out", 0) - base_architecture(args) diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/models/transformer/transformer_legacy.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/models/transformer/transformer_legacy.py deleted file mode 100644 index af9646740a79ce720eeba513e2d994b39509ac49..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/models/transformer/transformer_legacy.py +++ /dev/null @@ -1,275 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from fairseq.dataclass.utils import gen_parser_from_dataclass -from fairseq.models import ( - register_model, - register_model_architecture, -) -from fairseq.models.transformer.transformer_config import ( - TransformerConfig, - DEFAULT_MAX_SOURCE_POSITIONS, - DEFAULT_MAX_TARGET_POSITIONS, - DEFAULT_MIN_PARAMS_TO_WRAP, -) -from fairseq.models.transformer.transformer_base import ( - TransformerModelBase, -) - - -@register_model("transformer") -class TransformerModel(TransformerModelBase): - """ - This is the legacy implementation of the transformer model that - uses argparse for configuration. - """ - - @classmethod - def hub_models(cls): - # fmt: off - - def moses_subword(path): - return { - 'path': path, - 'tokenizer': 'moses', - 'bpe': 'subword_nmt', - } - - def moses_fastbpe(path): - return { - 'path': path, - 'tokenizer': 'moses', - 'bpe': 'fastbpe', - } - - def spm(path): - return { - 'path': path, - 'bpe': 'sentencepiece', - 'tokenizer': 'space', - } - - return { - 'transformer.wmt14.en-fr': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/wmt14.en-fr.joined-dict.transformer.tar.bz2'), - 'transformer.wmt16.en-de': 'https://dl.fbaipublicfiles.com/fairseq/models/wmt16.en-de.joined-dict.transformer.tar.bz2', - 'transformer.wmt18.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/wmt18.en-de.ensemble.tar.gz'), - 'transformer.wmt19.en-de': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.ensemble.tar.gz'), - 'transformer.wmt19.en-ru': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.ensemble.tar.gz'), - 'transformer.wmt19.de-en': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.ensemble.tar.gz'), - 'transformer.wmt19.ru-en': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.ensemble.tar.gz'), - 'transformer.wmt19.en-de.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.single_model.tar.gz'), - 'transformer.wmt19.en-ru.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.single_model.tar.gz'), - 'transformer.wmt19.de-en.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.single_model.tar.gz'), - 'transformer.wmt19.ru-en.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.single_model.tar.gz'), - 'transformer.wmt20.en-ta': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-ta.single.tar.gz'), - 'transformer.wmt20.en-iu.news': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-iu.news.single.tar.gz'), - 'transformer.wmt20.en-iu.nh': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-iu.nh.single.tar.gz'), - 'transformer.wmt20.ta-en': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.ta-en.single.tar.gz'), - 'transformer.wmt20.iu-en.news': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.iu-en.news.single.tar.gz'), - 'transformer.wmt20.iu-en.nh': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.iu-en.nh.single.tar.gz'), - 'transformer.flores101.mm100.615M': spm('https://dl.fbaipublicfiles.com/flores101/pretrained_models/flores101_mm100_615M.tar.gz'), - 'transformer.flores101.mm100.175M': spm('https://dl.fbaipublicfiles.com/flores101/pretrained_models/flores101_mm100_175M.tar.gz'), - } - # fmt: on - - def __init__(self, args, encoder, decoder): - cfg = TransformerConfig.from_namespace(args) - super().__init__(cfg, encoder, decoder) - self.args = args - - @classmethod - def add_args(cls, parser): - """Add model-specific arguments to the parser.""" - # we want to build the args recursively in this case. - # do not set defaults so that settings defaults from various architectures still works - gen_parser_from_dataclass( - parser, TransformerConfig(), delete_default=True, with_prefix="" - ) - - @classmethod - def build_model(cls, args, task): - """Build a new model instance.""" - - # make sure all arguments are present in older models - base_architecture(args) - - if args.encoder_layers_to_keep: - args.encoder_layers = len(args.encoder_layers_to_keep.split(",")) - if args.decoder_layers_to_keep: - args.decoder_layers = len(args.decoder_layers_to_keep.split(",")) - - if getattr(args, "max_source_positions", None) is None: - args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS - if getattr(args, "max_target_positions", None) is None: - args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS - - src_dict, tgt_dict = task.source_dictionary, task.target_dictionary - - if args.share_all_embeddings: - if src_dict != tgt_dict: - raise ValueError("--share-all-embeddings requires a joined dictionary") - if args.encoder_embed_dim != args.decoder_embed_dim: - raise ValueError( - "--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim" - ) - if args.decoder_embed_path and ( - args.decoder_embed_path != args.encoder_embed_path - ): - raise ValueError( - "--share-all-embeddings not compatible with --decoder-embed-path" - ) - args.share_decoder_input_output_embed = True - - if getattr(args, "offload_activations", False): - args.checkpoint_activations = True # offloading implies checkpointing - - if not args.share_all_embeddings: - args.min_params_to_wrap = getattr( - args, "min_params_to_wrap", DEFAULT_MIN_PARAMS_TO_WRAP - ) - cfg = TransformerConfig.from_namespace(args) - return super().build_model(cfg, task) - - @classmethod - def build_embedding(cls, args, dictionary, embed_dim, path=None): - return super().build_embedding( - TransformerConfig.from_namespace(args), dictionary, embed_dim, path - ) - - @classmethod - def build_encoder(cls, args, src_dict, embed_tokens): - return super().build_encoder( - TransformerConfig.from_namespace(args), src_dict, embed_tokens - ) - - @classmethod - def build_decoder(cls, args, tgt_dict, embed_tokens): - return super().build_decoder( - TransformerConfig.from_namespace(args), tgt_dict, embed_tokens - ) - - -# architectures - - -@register_model_architecture("transformer", "transformer_tiny") -def tiny_architecture(args): - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 64) - args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 64) - args.encoder_layers = getattr(args, "encoder_layers", 2) - args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 2) - args.decoder_layers = getattr(args, "decoder_layers", 2) - args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 2) - return base_architecture(args) - - -@register_model_architecture("transformer", "transformer") -def base_architecture(args): - args.encoder_embed_path = getattr(args, "encoder_embed_path", None) - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) - args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) - args.encoder_layers = getattr(args, "encoder_layers", 6) - args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) - args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) - args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) - args.decoder_embed_path = getattr(args, "decoder_embed_path", None) - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) - args.decoder_ffn_embed_dim = getattr( - args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim - ) - args.decoder_layers = getattr(args, "decoder_layers", 6) - args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) - args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) - args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) - args.attention_dropout = getattr(args, "attention_dropout", 0.0) - args.activation_dropout = getattr(args, "activation_dropout", 0.0) - args.activation_fn = getattr(args, "activation_fn", "relu") - args.dropout = getattr(args, "dropout", 0.1) - args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) - args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) - args.share_decoder_input_output_embed = getattr( - args, "share_decoder_input_output_embed", False - ) - args.share_all_embeddings = getattr(args, "share_all_embeddings", False) - args.no_token_positional_embeddings = getattr( - args, "no_token_positional_embeddings", False - ) - args.adaptive_input = getattr(args, "adaptive_input", False) - args.no_cross_attention = getattr(args, "no_cross_attention", False) - args.cross_self_attention = getattr(args, "cross_self_attention", False) - - args.decoder_output_dim = getattr( - args, "decoder_output_dim", args.decoder_embed_dim - ) - args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) - - args.no_scale_embedding = getattr(args, "no_scale_embedding", False) - args.layernorm_embedding = getattr(args, "layernorm_embedding", False) - args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False) - args.checkpoint_activations = getattr(args, "checkpoint_activations", False) - args.offload_activations = getattr(args, "offload_activations", False) - if args.offload_activations: - args.checkpoint_activations = True - args.encoder_layers_to_keep = getattr(args, "encoder_layers_to_keep", None) - args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None) - args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0) - args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0) - args.quant_noise_pq = getattr(args, "quant_noise_pq", 0) - args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8) - args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0) - - -@register_model_architecture("transformer", "transformer_iwslt_de_en") -def transformer_iwslt_de_en(args): - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) - args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024) - args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4) - args.encoder_layers = getattr(args, "encoder_layers", 6) - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) - args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024) - args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4) - args.decoder_layers = getattr(args, "decoder_layers", 6) - base_architecture(args) - - -@register_model_architecture("transformer", "transformer_wmt_en_de") -def transformer_wmt_en_de(args): - base_architecture(args) - - -# parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017) -@register_model_architecture("transformer", "transformer_vaswani_wmt_en_de_big") -def transformer_vaswani_wmt_en_de_big(args): - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) - args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096) - args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) - args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024) - args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096) - args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) - args.dropout = getattr(args, "dropout", 0.3) - base_architecture(args) - - -@register_model_architecture("transformer", "transformer_vaswani_wmt_en_fr_big") -def transformer_vaswani_wmt_en_fr_big(args): - args.dropout = getattr(args, "dropout", 0.1) - transformer_vaswani_wmt_en_de_big(args) - - -@register_model_architecture("transformer", "transformer_wmt_en_de_big") -def transformer_wmt_en_de_big(args): - args.attention_dropout = getattr(args, "attention_dropout", 0.1) - transformer_vaswani_wmt_en_de_big(args) - - -# default parameters used in tensor2tensor implementation -@register_model_architecture("transformer", "transformer_wmt_en_de_big_t2t") -def transformer_wmt_en_de_big_t2t(args): - args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True) - args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True) - args.attention_dropout = getattr(args, "attention_dropout", 0.1) - args.activation_dropout = getattr(args, "activation_dropout", 0.1) - transformer_vaswani_wmt_en_de_big(args) diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/tests/speech_recognition/test_vggtransformer.py b/spaces/OFA-Sys/OFA-vqa/fairseq/tests/speech_recognition/test_vggtransformer.py deleted file mode 100644 index 4dc73b8c7379970dc0bcc16fcb088a64a1bd7e3b..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/tests/speech_recognition/test_vggtransformer.py +++ /dev/null @@ -1,135 +0,0 @@ -#!/usr/bin/env python3 - -# import models/encoder/decoder to be tested -from examples.speech_recognition.models.vggtransformer import ( - TransformerDecoder, - VGGTransformerEncoder, - VGGTransformerModel, - vggtransformer_1, - vggtransformer_2, - vggtransformer_base, -) - -# import base test class -from .asr_test_base import ( - DEFAULT_TEST_VOCAB_SIZE, - TestFairseqDecoderBase, - TestFairseqEncoderBase, - TestFairseqEncoderDecoderModelBase, - get_dummy_dictionary, - get_dummy_encoder_output, - get_dummy_input, -) - - -class VGGTransformerModelTest_mid(TestFairseqEncoderDecoderModelBase): - def setUp(self): - def override_config(args): - """ - vggtrasformer_1 use 14 layers of transformer, - for testing purpose, it is too expensive. For fast turn-around - test, reduce the number of layers to 3. - """ - args.transformer_enc_config = ( - "((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 3" - ) - - super().setUp() - extra_args_setter = [vggtransformer_1, override_config] - - self.setUpModel(VGGTransformerModel, extra_args_setter) - self.setUpInput(get_dummy_input(T=50, D=80, B=5, K=DEFAULT_TEST_VOCAB_SIZE)) - - -class VGGTransformerModelTest_big(TestFairseqEncoderDecoderModelBase): - def setUp(self): - def override_config(args): - """ - vggtrasformer_2 use 16 layers of transformer, - for testing purpose, it is too expensive. For fast turn-around - test, reduce the number of layers to 3. - """ - args.transformer_enc_config = ( - "((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 3" - ) - - super().setUp() - extra_args_setter = [vggtransformer_2, override_config] - - self.setUpModel(VGGTransformerModel, extra_args_setter) - self.setUpInput(get_dummy_input(T=50, D=80, B=5, K=DEFAULT_TEST_VOCAB_SIZE)) - - -class VGGTransformerModelTest_base(TestFairseqEncoderDecoderModelBase): - def setUp(self): - def override_config(args): - """ - vggtrasformer_base use 12 layers of transformer, - for testing purpose, it is too expensive. For fast turn-around - test, reduce the number of layers to 3. - """ - args.transformer_enc_config = ( - "((512, 8, 2048, True, 0.15, 0.15, 0.15),) * 3" - ) - - super().setUp() - extra_args_setter = [vggtransformer_base, override_config] - - self.setUpModel(VGGTransformerModel, extra_args_setter) - self.setUpInput(get_dummy_input(T=50, D=80, B=5, K=DEFAULT_TEST_VOCAB_SIZE)) - - -class VGGTransformerEncoderTest(TestFairseqEncoderBase): - def setUp(self): - super().setUp() - - self.setUpInput(get_dummy_input(T=50, D=80, B=5)) - - def test_forward(self): - print("1. test standard vggtransformer") - self.setUpEncoder(VGGTransformerEncoder(input_feat_per_channel=80)) - super().test_forward() - print("2. test vggtransformer with limited right context") - self.setUpEncoder( - VGGTransformerEncoder( - input_feat_per_channel=80, transformer_context=(-1, 5) - ) - ) - super().test_forward() - print("3. test vggtransformer with limited left context") - self.setUpEncoder( - VGGTransformerEncoder( - input_feat_per_channel=80, transformer_context=(5, -1) - ) - ) - super().test_forward() - print("4. test vggtransformer with limited right context and sampling") - self.setUpEncoder( - VGGTransformerEncoder( - input_feat_per_channel=80, - transformer_context=(-1, 12), - transformer_sampling=(2, 2), - ) - ) - super().test_forward() - print("5. test vggtransformer with windowed context and sampling") - self.setUpEncoder( - VGGTransformerEncoder( - input_feat_per_channel=80, - transformer_context=(12, 12), - transformer_sampling=(2, 2), - ) - ) - - -class TransformerDecoderTest(TestFairseqDecoderBase): - def setUp(self): - super().setUp() - - dict = get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE) - decoder = TransformerDecoder(dict) - dummy_encoder_output = get_dummy_encoder_output(encoder_out_shape=(50, 5, 256)) - - self.setUpDecoder(decoder) - self.setUpInput(dummy_encoder_output) - self.setUpPrevOutputTokens() diff --git a/spaces/OFA-Sys/expertllama/app.py b/spaces/OFA-Sys/expertllama/app.py deleted file mode 100644 index a218786da57f59f4eda034bd5f452a5b4829d717..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/expertllama/app.py +++ /dev/null @@ -1,135 +0,0 @@ -''' -CREDIT: -script adapted from [alpaca](https://huggingface.co/spaces/tloen/alpaca-lora/blob/main/app.py). -''' - -import gradio as gr -import random -import time -import transformers -import os -import json -import torch -import argparse -from tqdm import tqdm -from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig - - -def apply_delta(base_model_path, target_model_path, delta_path): - print(f"Loading the delta weights from {delta_path}") - delta_tokenizer = LlamaTokenizer.from_pretrained(delta_path, use_fast=False) - delta = LlamaForCausalLM.from_pretrained( - delta_path, low_cpu_mem_usage=True, torch_dtype=torch.float16 - ) - - print(f"Loading the base model from {base_model_path}") - base_tokenizer = LlamaTokenizer.from_pretrained(base_model_path, use_fast=False) - base = LlamaForCausalLM.from_pretrained( - base_model_path, low_cpu_mem_usage=True, torch_dtype=torch.float16 - ) - - # following alpaca training recipe, we have added new initialized tokens - DEFAULT_PAD_TOKEN = "[PAD]" - DEFAULT_EOS_TOKEN = "
" - DEFAULT_BOS_TOKEN = "" - DEFAULT_UNK_TOKEN = "" - special_tokens_dict = { - "pad_token": DEFAULT_PAD_TOKEN, - "eos_token": DEFAULT_EOS_TOKEN, - "bos_token": DEFAULT_BOS_TOKEN, - "unk_token": DEFAULT_UNK_TOKEN, - } - num_new_tokens = base_tokenizer.add_special_tokens(special_tokens_dict) - base.resize_token_embeddings(len(base_tokenizer)) - input_embeddings = base.get_input_embeddings().weight.data - output_embeddings = base.get_output_embeddings().weight.data - - input_embeddings[-num_new_tokens:] = 0 - output_embeddings[-num_new_tokens:] = 0 - - print("Applying the delta") - target_weights = {} - for name, param in tqdm(base.state_dict().items(), desc="Applying delta"): - assert name in delta.state_dict() - param.data += delta.state_dict()[name] - target_weights[name] = param.data - - print(f"Saving the target model to {target_model_path}") - base.load_state_dict(target_weights) - # base.save_pretrained(target_model_path) - # delta_tokenizer.save_pretrained(target_model_path) - - delta = None - - return base, delta_tokenizer - - -base_weights = 'decapoda-research/llama-7b-hf' -target_weights = 'expertllama' # local path -delta_weights = 'OFA-Sys/expertllama-7b-delta' -model, tokenizer = apply_delta(base_weights, target_weights, delta_weights) -model = model.to(torch.float) - -if torch.__version__ >= "2": - model = torch.compile(model) - -def respond( - instruction, - temperature=0.1, - top_p=0.75, - top_k=40, - num_beams=4, - max_new_tokens=128, - **kwargs, -): - # prompt wrapper, only single-turn is allowed for now - prompt = f"### Human:\n{instruction}\n\n### Assistant:\n" - inputs = tokenizer( - prompt, - return_tensors="pt", - add_special_tokens=False - ) - generation_config = GenerationConfig( - temperature=temperature, - top_p=top_p, - top_k=top_k, - num_beams=num_beams, - **kwargs, - ) - with torch.no_grad(): - generation_output = model.generate( - input_ids=inputs["input_ids"], - generation_config=generation_config, - return_dict_in_generate=True, - output_scores=True, - max_new_tokens=max_new_tokens, - ) - response = tokenizer.decode(generation_output.sequences[0][:-2]).split("### Assistant:\n", 1)[1] - return response - - -g = gr.Interface( - fn=respond, - inputs=[ - gr.components.Textbox( - lines=2, label="Instruction" - ), - gr.components.Slider(minimum=0, maximum=1, value=0.1, label="Temperature"), - gr.components.Slider(minimum=0, maximum=1, value=0.75, label="Top p"), - gr.components.Slider(minimum=0, maximum=100, step=1, value=40, label="Top k"), - gr.components.Slider(minimum=1, maximum=4, step=1, value=4, label="Beams"), - gr.components.Slider( - minimum=1, maximum=768, step=1, value=512, label="Max tokens" - ), - ], - outputs=[ - gr.inputs.Textbox( - lines=8, - label="Output", - ) - ], - title="ExpertLLaMA", - description="ExpertLLaMA is an open-source chatbot trained on expert-like data produced with GPT-3.5, see our [project repo](https://github.com/OFA-Sys/ExpertLLaMA) for details.", -) -g.queue(concurrency_count=1) -g.launch() \ No newline at end of file diff --git a/spaces/ORI-Muchim/RaidenTTS/modules.py b/spaces/ORI-Muchim/RaidenTTS/modules.py deleted file mode 100644 index 9c7fd9cd6eb8b7e0ec0e08957e970744a374a924..0000000000000000000000000000000000000000 --- a/spaces/ORI-Muchim/RaidenTTS/modules.py +++ /dev/null @@ -1,390 +0,0 @@ -import copy -import math -import numpy as np -import scipy -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm - -import commons -from commons import init_weights, get_padding -from transforms import piecewise_rational_quadratic_transform - - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential( - nn.ReLU(), - nn.Dropout(p_dropout)) - for _ in range(n_layers-1): - self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size ** i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size, - groups=channels, dilation=dilation, padding=padding - )) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0): - super(WN, self).__init__() - assert(kernel_size % 2 == 1) - self.hidden_channels =hidden_channels - self.kernel_size = kernel_size, - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') - - for i in range(n_layers): - dilation = dilation_rate ** i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size, - dilation=dilation, padding=padding) - in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply( - x_in, - g_l, - n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:,:self.hidden_channels,:] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:,self.hidden_channels:,:] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels,1)) - self.logs = nn.Parameter(torch.zeros(channels,1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1,2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels]*2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1,2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - -class ConvFlow(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.) - self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_derivatives = h[..., 2 * self.num_bins:] - - x1, logabsdet = piecewise_rational_quadratic_transform(x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails='linear', - tail_bound=self.tail_bound - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1,2]) - if not reverse: - return x, logdet - else: - return x diff --git a/spaces/OgiKazus/vits-uma-genshin-honkai/attentions.py b/spaces/OgiKazus/vits-uma-genshin-honkai/attentions.py deleted file mode 100644 index 86bc73b5fe98cc7b443e9078553920346c996707..0000000000000000000000000000000000000000 --- a/spaces/OgiKazus/vits-uma-genshin-honkai/attentions.py +++ /dev/null @@ -1,300 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -from modules import LayerNorm - - -class Encoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert t_s == t_t, "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert t_s == t_t, "Local attention is only available for self-attention." - block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) - output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) - output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]])) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]])) - x_flat = x.view([batch, heads, length**2 + length*(length -1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/Olivier-Truong/faster-whisper-webui-v2/src/__init__.py b/spaces/Olivier-Truong/faster-whisper-webui-v2/src/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/inpainting_src/ldm_inpainting/ldm/modules/diffusionmodules/__init__.py b/spaces/OpenGVLab/InternGPT/iGPT/models/inpainting_src/ldm_inpainting/ldm/modules/diffusionmodules/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/OpenGVLab/InternGPT/third-party/lama/docker/3_evaluate.sh b/spaces/OpenGVLab/InternGPT/third-party/lama/docker/3_evaluate.sh deleted file mode 100644 index d01e0a39da620e38c1ebf28beead59f286437321..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/third-party/lama/docker/3_evaluate.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env bash - - -if (( $# < 3 )) -then - echo "Usage: $0 original_dataset_dir predictions_dir output_dir [other arguments to evaluate_predicts.py]" - exit 1 -fi - -CURDIR="$(dirname $0)" -SRCDIR="$CURDIR/.." -SRCDIR="$(realpath $SRCDIR)" - -ORIG_DATASET_LOCAL_DIR="$(realpath $1)" -PREDICTIONS_LOCAL_DIR="$(realpath $2)" -OUTPUT_LOCAL_DIR="$(realpath $3)" -shift 3 - -mkdir -p "$OUTPUT_LOCAL_DIR" - -docker run \ - -v "$SRCDIR":/home/user/project \ - -v "$ORIG_DATASET_LOCAL_DIR":/data/orig_dataset \ - -v "$PREDICTIONS_LOCAL_DIR":/data/predictions \ - -v "$OUTPUT_LOCAL_DIR":/data/output \ - -u $(id -u):$(id -g) \ - --name="lama-eval" \ - --rm \ - windj007/lama \ - /home/user/project/bin/evaluate_predicts.py \ - /home/user/project/configs/eval2_cpu.yaml \ - /data/orig_dataset \ - /data/predictions \ - /data/output/metrics.yaml \ - $@ diff --git a/spaces/OptimalScale/Robin-7b/lmflow/datasets/dataset.py b/spaces/OptimalScale/Robin-7b/lmflow/datasets/dataset.py deleted file mode 100644 index 8228d20ab4165515c2d1d09ae679473a53dbb6ed..0000000000000000000000000000000000000000 --- a/spaces/OptimalScale/Robin-7b/lmflow/datasets/dataset.py +++ /dev/null @@ -1,308 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -"""This Python code defines a class Dataset with methods for initializing, loading, -and manipulating datasets from different backends such as Hugging Face and JSON. - -The `Dataset` class includes methods for loading datasets from a dictionary and a Hugging -Face dataset, mapping datasets, and retrieving the backend dataset and arguments. -""" - - - -# Importing necessary libraries and modules -import json -from pathlib import Path -from typing import Optional - -from datasets import load_dataset -from datasets import Dataset as HFDataset - -from lmflow.args import DatasetArguments - -DATASET_TYPES = [ - "text_only", - "text2text", -] - -KEY_TYPE = "type" -KEY_INSTANCES = "instances" - -class Dataset: - r""" - Initializes the Dataset object with the given parameters. - - Parameters - ------------ - data_args : DatasetArguments object. - Contains the arguments required to load the dataset. - - backend : str, default="huggingface" - A string representing the dataset backend. Defaults to "huggingface". - - args : Optional. - Positional arguments. - - kwargs : Optional. - Keyword arguments. - """ - def __init__(self, data_args=None, backend: str="huggingface", *args, **kwargs): - self.data_args = data_args - self.backend = backend - self.backend_dataset = None - self.type = None # Original type of the dataset - self.dataset_path = data_args.dataset_path - - if data_args.dataset_path is None: - return - - if backend == "huggingface": - data_files = [ - x.absolute().as_posix() - for x in Path(self.dataset_path).glob("*.json") - ] - - # Iterate through all the files and ensure they have the same data type - for single_file in data_files: - with open(single_file) as fin: - json_data = json.load(fin) - if KEY_TYPE not in json_data.keys(): - raise ValueError( - f'"{KEY_TYPE}" field must be specified for data, e.g.' - '{\n' - f' "{KEY_TYPE}: "text_only",\n' - f' "{KEY_INSTANCES}": [\n' - ' { "text": "Sentence 1: This is a sentence." }\n' - ' { "text": "Sentence 2: This is another sentence." }\n' - f' ]\n' - '}' - ) - - if self.type is None: - self.type = json_data[KEY_TYPE] - elif self.type != json_data[KEY_TYPE]: - raise ValueError( - 'All task files must have same data types. Previous' - f' files have type "{self.type}", but in file' - f' {single_file}, it has type "{self.type}".' - ) - - # Load the dataset using the HuggingFace dataset library - extensions = "json" - raw_dataset = load_dataset( - extensions, - data_files=data_files, - field=KEY_INSTANCES, - split="train", - use_auth_token=None, - ) - self.backend_dataset = raw_dataset - elif backend == "json": - # TODO (@Jiachun) - pass - else: - raise NotImplementedError(f'Unsupported dataset backend "{backend}"') - - - def _check_data_type(self): - # TODO: check if data type and data structure matches, raise messages - # with hints - pass - - - def from_dict(self, dict_obj: dict, *args, **kwargs): - r""" - Create a Dataset object from a dictionary. - - Return a Dataset given a dict with format: - { - "type": TYPE, - "instances": [ - { - "key_1": VALUE_1.1, - "key_2": VALUE_1.2, - ... - }, - { - "key_1": VALUE_2.1, - "key_2": VALUE_2.2, - ... - }, - ... - ] - } - - Parameters - ----------- - - dict_obj : dict. - A dictionary containing the dataset information. - - args : Optional. - Positional arguments. - - kwargs : Optional. - Keyword arguments. - - Returns - --------- - - self : Dataset object. - """ - if self.backend == "huggingface": - if KEY_TYPE not in dict_obj: - raise ValueError( - f'"{KEY_TYPE}" must be provided to initialize a dataset' - ) - if KEY_INSTANCES not in dict_obj: - raise ValueError( - f'"{KEY_INSTANCES}" must be provided to initialize a dataset' - ) - - self.type = dict_obj[KEY_TYPE] - - hf_dict = {} - if len(dict_obj[KEY_INSTANCES]) > 0: - for key in dict_obj[KEY_INSTANCES][0].keys(): - hf_dict[key] = [ instance[key] for instance in dict_obj[KEY_INSTANCES] ] - - self.backend_dataset = HFDataset.from_dict(hf_dict, *args, **kwargs) - return self - else: - raise NotImplementedError( - f'Currently .from_dict is not supported for backend "{backend}"' - ) - - - @classmethod - def create_from_dict(cls, dict_obj, *args, **kwargs): - r""" - Returns - -------- - - Returns a Dataset object given a dict. - """ - empty_data_args = DatasetArguments(dataset_path=None) - dataset = Dataset(empty_data_args) - return dataset.from_dict(dict_obj) - - - def to_dict(self): - r""" - Returns - --------- - - Return a dict represents the dataset: - { - "type": TYPE, - "instances": [ - { - "key_1": VALUE_1.1, - "key_2": VALUE_1.2, - ... - }, - { - "key_1": VALUE_2.1, - "key_2": VALUE_2.2, - ... - }, - ... - ] - } - - A python dict object represents the content of this dataset. - """ - if self.backend == "huggingface": - dict_obj = {} - dict_obj[KEY_TYPE] = self.get_type() - - hf_dict = self.backend_dataset.to_dict() - dict_obj[KEY_INSTANCES] = [] - - first_key = None - for key in hf_dict.keys(): - first_key = key - break - - if first_key is not None: - num_instances = len(hf_dict[first_key]) - dict_obj[KEY_INSTANCES] = [ - { - key: hf_dict[key][i] for key in hf_dict.keys() - } - for i in range(num_instances) - ] - - return dict_obj - else: - raise NotImplementedError( - f'Current .to_dict is not supported for backend "{backend}"' - ) - - - def map(self, *args, **kwargs): - r""" - Parameters - ------------ - args : Optional. - Positional arguments. - - kwargs : Optional. - Keyword arguments. - - Returns - --------- - - self : Dataset object. - """ - # If the dataset uses Hugging Face as the backend, - # call the `map()` function of the Hugging Face backend dataset - if self.backend == "huggingface": - # Set the mapped dataset as the backend dataset of the current dataset - mapped_backend_dataset = self.backend_dataset.map(*args, **kwargs) - self.backend_dataset = mapped_backend_dataset - return self - else: - # If the backend is not Hugging Face, raise a NotImplementedError - raise NotImplementedError( - f'Currently .map is not supported for backend "{backend}"' - ) - - - def get_backend(self) -> Optional[str]: - r""" - Returns - --------- - - self.backend - """ - return self.backend - - - def get_backend_dataset(self): - r""" - Returns - --------- - - self.backend_dataset - """ - return self.backend_dataset - - - def get_data_args(self): - r""" - Returns - --------- - - self.data_args - """ - return self.data_args - - - def get_type(self): - r""" - Returns - --------- - - self.type - """ - return self.type diff --git a/spaces/PAIR/PAIR-Diffusion/annotator/OneFormer/datasets/prepare_coco_semantic_annos_from_panoptic_annos.py b/spaces/PAIR/PAIR-Diffusion/annotator/OneFormer/datasets/prepare_coco_semantic_annos_from_panoptic_annos.py deleted file mode 100644 index 3090c9bc2f9a63156a4132e89c635613691eb350..0000000000000000000000000000000000000000 --- a/spaces/PAIR/PAIR-Diffusion/annotator/OneFormer/datasets/prepare_coco_semantic_annos_from_panoptic_annos.py +++ /dev/null @@ -1,84 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright (c) Facebook, Inc. and its affiliates. - -import functools -import json -import multiprocessing as mp -import numpy as np -import os -import time -from fvcore.common.download import download -from panopticapi.utils import rgb2id -from PIL import Image - -from detectron2.data.datasets.builtin_meta import COCO_CATEGORIES - - -def _process_panoptic_to_semantic(input_panoptic, output_semantic, segments, id_map): - panoptic = np.asarray(Image.open(input_panoptic), dtype=np.uint32) - panoptic = rgb2id(panoptic) - output = np.zeros_like(panoptic, dtype=np.uint8) + 255 - for seg in segments: - cat_id = seg["category_id"] - new_cat_id = id_map[cat_id] - output[panoptic == seg["id"]] = new_cat_id - Image.fromarray(output).save(output_semantic) - - -def separate_coco_semantic_from_panoptic(panoptic_json, panoptic_root, sem_seg_root, categories): - """ - Create semantic segmentation annotations from panoptic segmentation - annotations, to be used by PanopticFPN. - It maps all thing categories to class 0, and maps all unlabeled pixels to class 255. - It maps all stuff categories to contiguous ids starting from 1. - Args: - panoptic_json (str): path to the panoptic json file, in COCO's format. - panoptic_root (str): a directory with panoptic annotation files, in COCO's format. - sem_seg_root (str): a directory to output semantic annotation files - categories (list[dict]): category metadata. Each dict needs to have: - "id": corresponds to the "category_id" in the json annotations - "isthing": 0 or 1 - """ - os.makedirs(sem_seg_root, exist_ok=True) - - id_map = {} # map from category id to id in the output semantic annotation - assert len(categories) <= 254 - for i, k in enumerate(categories): - id_map[k["id"]] = i - # what is id = 0? - # id_map[0] = 255 - print(id_map) - - with open(panoptic_json) as f: - obj = json.load(f) - - pool = mp.Pool(processes=max(mp.cpu_count() // 2, 4)) - - def iter_annotations(): - for anno in obj["annotations"]: - file_name = anno["file_name"] - segments = anno["segments_info"] - input = os.path.join(panoptic_root, file_name) - output = os.path.join(sem_seg_root, file_name) - yield input, output, segments - - print("Start writing to {} ...".format(sem_seg_root)) - start = time.time() - pool.starmap( - functools.partial(_process_panoptic_to_semantic, id_map=id_map), - iter_annotations(), - chunksize=100, - ) - print("Finished. time: {:.2f}s".format(time.time() - start)) - - -if __name__ == "__main__": - dataset_dir = os.path.join(os.getenv("DETECTRON2_DATASETS", "datasets"), "coco") - for s in ["val2017", "train2017"]: - separate_coco_semantic_from_panoptic( - os.path.join(dataset_dir, "annotations/panoptic_{}.json".format(s)), - os.path.join(dataset_dir, "panoptic_{}".format(s)), - os.path.join(dataset_dir, "panoptic_semseg_{}".format(s)), - COCO_CATEGORIES, - ) diff --git a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/ops/corner_pool.py b/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/ops/corner_pool.py deleted file mode 100644 index a33d798b43d405e4c86bee4cd6389be21ca9c637..0000000000000000000000000000000000000000 --- a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/ops/corner_pool.py +++ /dev/null @@ -1,161 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -from torch import nn -from torch.autograd import Function - -from ..utils import ext_loader - -ext_module = ext_loader.load_ext('_ext', [ - 'top_pool_forward', 'top_pool_backward', 'bottom_pool_forward', - 'bottom_pool_backward', 'left_pool_forward', 'left_pool_backward', - 'right_pool_forward', 'right_pool_backward' -]) - -_mode_dict = {'top': 0, 'bottom': 1, 'left': 2, 'right': 3} - - -class TopPoolFunction(Function): - - @staticmethod - def symbolic(g, input): - output = g.op( - 'mmcv::MMCVCornerPool', input, mode_i=int(_mode_dict['top'])) - return output - - @staticmethod - def forward(ctx, input): - output = ext_module.top_pool_forward(input) - ctx.save_for_backward(input) - return output - - @staticmethod - def backward(ctx, grad_output): - input, = ctx.saved_tensors - output = ext_module.top_pool_backward(input, grad_output) - return output - - -class BottomPoolFunction(Function): - - @staticmethod - def symbolic(g, input): - output = g.op( - 'mmcv::MMCVCornerPool', input, mode_i=int(_mode_dict['bottom'])) - return output - - @staticmethod - def forward(ctx, input): - output = ext_module.bottom_pool_forward(input) - ctx.save_for_backward(input) - return output - - @staticmethod - def backward(ctx, grad_output): - input, = ctx.saved_tensors - output = ext_module.bottom_pool_backward(input, grad_output) - return output - - -class LeftPoolFunction(Function): - - @staticmethod - def symbolic(g, input): - output = g.op( - 'mmcv::MMCVCornerPool', input, mode_i=int(_mode_dict['left'])) - return output - - @staticmethod - def forward(ctx, input): - output = ext_module.left_pool_forward(input) - ctx.save_for_backward(input) - return output - - @staticmethod - def backward(ctx, grad_output): - input, = ctx.saved_tensors - output = ext_module.left_pool_backward(input, grad_output) - return output - - -class RightPoolFunction(Function): - - @staticmethod - def symbolic(g, input): - output = g.op( - 'mmcv::MMCVCornerPool', input, mode_i=int(_mode_dict['right'])) - return output - - @staticmethod - def forward(ctx, input): - output = ext_module.right_pool_forward(input) - ctx.save_for_backward(input) - return output - - @staticmethod - def backward(ctx, grad_output): - input, = ctx.saved_tensors - output = ext_module.right_pool_backward(input, grad_output) - return output - - -class CornerPool(nn.Module): - """Corner Pooling. - - Corner Pooling is a new type of pooling layer that helps a - convolutional network better localize corners of bounding boxes. - - Please refer to https://arxiv.org/abs/1808.01244 for more details. - Code is modified from https://github.com/princeton-vl/CornerNet-Lite. - - Args: - mode(str): Pooling orientation for the pooling layer - - - 'bottom': Bottom Pooling - - 'left': Left Pooling - - 'right': Right Pooling - - 'top': Top Pooling - - Returns: - Feature map after pooling. - """ - - pool_functions = { - 'bottom': BottomPoolFunction, - 'left': LeftPoolFunction, - 'right': RightPoolFunction, - 'top': TopPoolFunction, - } - - cummax_dim_flip = { - 'bottom': (2, False), - 'left': (3, True), - 'right': (3, False), - 'top': (2, True), - } - - def __init__(self, mode): - super(CornerPool, self).__init__() - assert mode in self.pool_functions - self.mode = mode - self.corner_pool = self.pool_functions[mode] - - def forward(self, x): - if torch.__version__ != 'parrots' and torch.__version__ >= '1.5.0': - if torch.onnx.is_in_onnx_export(): - assert torch.__version__ >= '1.7.0', \ - 'When `cummax` serves as an intermediate component whose '\ - 'outputs is used as inputs for another modules, it\'s '\ - 'expected that pytorch version must be >= 1.7.0, '\ - 'otherwise Error appears like: `RuntimeError: tuple '\ - 'appears in op that does not forward tuples, unsupported '\ - 'kind: prim::PythonOp`.' - - dim, flip = self.cummax_dim_flip[self.mode] - if flip: - x = x.flip(dim) - pool_tensor, _ = torch.cummax(x, dim=dim) - if flip: - pool_tensor = pool_tensor.flip(dim) - return pool_tensor - else: - return self.corner_pool.apply(x) diff --git a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/ops/voxelize.py b/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/ops/voxelize.py deleted file mode 100644 index ca3226a4fbcbfe58490fa2ea8e1c16b531214121..0000000000000000000000000000000000000000 --- a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/ops/voxelize.py +++ /dev/null @@ -1,132 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -from torch import nn -from torch.autograd import Function -from torch.nn.modules.utils import _pair - -from ..utils import ext_loader - -ext_module = ext_loader.load_ext( - '_ext', ['dynamic_voxelize_forward', 'hard_voxelize_forward']) - - -class _Voxelization(Function): - - @staticmethod - def forward(ctx, - points, - voxel_size, - coors_range, - max_points=35, - max_voxels=20000): - """Convert kitti points(N, >=3) to voxels. - - Args: - points (torch.Tensor): [N, ndim]. Points[:, :3] contain xyz points - and points[:, 3:] contain other information like reflectivity. - voxel_size (tuple or float): The size of voxel with the shape of - [3]. - coors_range (tuple or float): The coordinate range of voxel with - the shape of [6]. - max_points (int, optional): maximum points contained in a voxel. if - max_points=-1, it means using dynamic_voxelize. Default: 35. - max_voxels (int, optional): maximum voxels this function create. - for second, 20000 is a good choice. Users should shuffle points - before call this function because max_voxels may drop points. - Default: 20000. - - Returns: - voxels_out (torch.Tensor): Output voxels with the shape of [M, - max_points, ndim]. Only contain points and returned when - max_points != -1. - coors_out (torch.Tensor): Output coordinates with the shape of - [M, 3]. - num_points_per_voxel_out (torch.Tensor): Num points per voxel with - the shape of [M]. Only returned when max_points != -1. - """ - if max_points == -1 or max_voxels == -1: - coors = points.new_zeros(size=(points.size(0), 3), dtype=torch.int) - ext_module.dynamic_voxelize_forward(points, coors, voxel_size, - coors_range, 3) - return coors - else: - voxels = points.new_zeros( - size=(max_voxels, max_points, points.size(1))) - coors = points.new_zeros(size=(max_voxels, 3), dtype=torch.int) - num_points_per_voxel = points.new_zeros( - size=(max_voxels, ), dtype=torch.int) - voxel_num = ext_module.hard_voxelize_forward( - points, voxels, coors, num_points_per_voxel, voxel_size, - coors_range, max_points, max_voxels, 3) - # select the valid voxels - voxels_out = voxels[:voxel_num] - coors_out = coors[:voxel_num] - num_points_per_voxel_out = num_points_per_voxel[:voxel_num] - return voxels_out, coors_out, num_points_per_voxel_out - - -voxelization = _Voxelization.apply - - -class Voxelization(nn.Module): - """Convert kitti points(N, >=3) to voxels. - - Please refer to `PVCNN `_ for more - details. - - Args: - voxel_size (tuple or float): The size of voxel with the shape of [3]. - point_cloud_range (tuple or float): The coordinate range of voxel with - the shape of [6]. - max_num_points (int): maximum points contained in a voxel. if - max_points=-1, it means using dynamic_voxelize. - max_voxels (int, optional): maximum voxels this function create. - for second, 20000 is a good choice. Users should shuffle points - before call this function because max_voxels may drop points. - Default: 20000. - """ - - def __init__(self, - voxel_size, - point_cloud_range, - max_num_points, - max_voxels=20000): - super().__init__() - - self.voxel_size = voxel_size - self.point_cloud_range = point_cloud_range - self.max_num_points = max_num_points - if isinstance(max_voxels, tuple): - self.max_voxels = max_voxels - else: - self.max_voxels = _pair(max_voxels) - - point_cloud_range = torch.tensor( - point_cloud_range, dtype=torch.float32) - voxel_size = torch.tensor(voxel_size, dtype=torch.float32) - grid_size = (point_cloud_range[3:] - - point_cloud_range[:3]) / voxel_size - grid_size = torch.round(grid_size).long() - input_feat_shape = grid_size[:2] - self.grid_size = grid_size - # the origin shape is as [x-len, y-len, z-len] - # [w, h, d] -> [d, h, w] - self.pcd_shape = [*input_feat_shape, 1][::-1] - - def forward(self, input): - if self.training: - max_voxels = self.max_voxels[0] - else: - max_voxels = self.max_voxels[1] - - return voxelization(input, self.voxel_size, self.point_cloud_range, - self.max_num_points, max_voxels) - - def __repr__(self): - s = self.__class__.__name__ + '(' - s += 'voxel_size=' + str(self.voxel_size) - s += ', point_cloud_range=' + str(self.point_cloud_range) - s += ', max_num_points=' + str(self.max_num_points) - s += ', max_voxels=' + str(self.max_voxels) - s += ')' - return s diff --git a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmseg/models/losses/utils.py b/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmseg/models/losses/utils.py deleted file mode 100644 index 85aec9f3045240c3de96a928324ae8f5c3aebe8b..0000000000000000000000000000000000000000 --- a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmseg/models/losses/utils.py +++ /dev/null @@ -1,121 +0,0 @@ -import functools - -import annotator.uniformer.mmcv as mmcv -import numpy as np -import torch.nn.functional as F - - -def get_class_weight(class_weight): - """Get class weight for loss function. - - Args: - class_weight (list[float] | str | None): If class_weight is a str, - take it as a file name and read from it. - """ - if isinstance(class_weight, str): - # take it as a file path - if class_weight.endswith('.npy'): - class_weight = np.load(class_weight) - else: - # pkl, json or yaml - class_weight = mmcv.load(class_weight) - - return class_weight - - -def reduce_loss(loss, reduction): - """Reduce loss as specified. - - Args: - loss (Tensor): Elementwise loss tensor. - reduction (str): Options are "none", "mean" and "sum". - - Return: - Tensor: Reduced loss tensor. - """ - reduction_enum = F._Reduction.get_enum(reduction) - # none: 0, elementwise_mean:1, sum: 2 - if reduction_enum == 0: - return loss - elif reduction_enum == 1: - return loss.mean() - elif reduction_enum == 2: - return loss.sum() - - -def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): - """Apply element-wise weight and reduce loss. - - Args: - loss (Tensor): Element-wise loss. - weight (Tensor): Element-wise weights. - reduction (str): Same as built-in losses of PyTorch. - avg_factor (float): Avarage factor when computing the mean of losses. - - Returns: - Tensor: Processed loss values. - """ - # if weight is specified, apply element-wise weight - if weight is not None: - assert weight.dim() == loss.dim() - if weight.dim() > 1: - assert weight.size(1) == 1 or weight.size(1) == loss.size(1) - loss = loss * weight - - # if avg_factor is not specified, just reduce the loss - if avg_factor is None: - loss = reduce_loss(loss, reduction) - else: - # if reduction is mean, then average the loss by avg_factor - if reduction == 'mean': - loss = loss.sum() / avg_factor - # if reduction is 'none', then do nothing, otherwise raise an error - elif reduction != 'none': - raise ValueError('avg_factor can not be used with reduction="sum"') - return loss - - -def weighted_loss(loss_func): - """Create a weighted version of a given loss function. - - To use this decorator, the loss function must have the signature like - `loss_func(pred, target, **kwargs)`. The function only needs to compute - element-wise loss without any reduction. This decorator will add weight - and reduction arguments to the function. The decorated function will have - the signature like `loss_func(pred, target, weight=None, reduction='mean', - avg_factor=None, **kwargs)`. - - :Example: - - >>> import torch - >>> @weighted_loss - >>> def l1_loss(pred, target): - >>> return (pred - target).abs() - - >>> pred = torch.Tensor([0, 2, 3]) - >>> target = torch.Tensor([1, 1, 1]) - >>> weight = torch.Tensor([1, 0, 1]) - - >>> l1_loss(pred, target) - tensor(1.3333) - >>> l1_loss(pred, target, weight) - tensor(1.) - >>> l1_loss(pred, target, reduction='none') - tensor([1., 1., 2.]) - >>> l1_loss(pred, target, weight, avg_factor=2) - tensor(1.5000) - """ - - @functools.wraps(loss_func) - def wrapper(pred, - target, - weight=None, - reduction='mean', - avg_factor=None, - **kwargs): - # get element-wise loss - loss = loss_func(pred, target, **kwargs) - loss = weight_reduce_loss(loss, weight, reduction, avg_factor) - return loss - - return wrapper diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/ice-9/runq.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/ice-9/runq.go deleted file mode 100644 index a5eb40984342b818728ae710f73c39b23c36b590..0000000000000000000000000000000000000000 Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/ice-9/runq.go and /dev/null differ diff --git a/spaces/PeepDaSlan9/AutoGPT/tests/__init__.py b/spaces/PeepDaSlan9/AutoGPT/tests/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/models/backbones/resnest.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/models/backbones/resnest.py deleted file mode 100644 index b45a837f395230029e9d4194ff9f7f2f8f7067b0..0000000000000000000000000000000000000000 --- a/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/models/backbones/resnest.py +++ /dev/null @@ -1,314 +0,0 @@ -import math - -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.utils.checkpoint as cp -from annotator.uniformer.mmcv.cnn import build_conv_layer, build_norm_layer - -from ..builder import BACKBONES -from ..utils import ResLayer -from .resnet import Bottleneck as _Bottleneck -from .resnet import ResNetV1d - - -class RSoftmax(nn.Module): - """Radix Softmax module in ``SplitAttentionConv2d``. - - Args: - radix (int): Radix of input. - groups (int): Groups of input. - """ - - def __init__(self, radix, groups): - super().__init__() - self.radix = radix - self.groups = groups - - def forward(self, x): - batch = x.size(0) - if self.radix > 1: - x = x.view(batch, self.groups, self.radix, -1).transpose(1, 2) - x = F.softmax(x, dim=1) - x = x.reshape(batch, -1) - else: - x = torch.sigmoid(x) - return x - - -class SplitAttentionConv2d(nn.Module): - """Split-Attention Conv2d in ResNeSt. - - Args: - in_channels (int): Same as nn.Conv2d. - out_channels (int): Same as nn.Conv2d. - kernel_size (int | tuple[int]): Same as nn.Conv2d. - stride (int | tuple[int]): Same as nn.Conv2d. - padding (int | tuple[int]): Same as nn.Conv2d. - dilation (int | tuple[int]): Same as nn.Conv2d. - groups (int): Same as nn.Conv2d. - radix (int): Radix of SpltAtConv2d. Default: 2 - reduction_factor (int): Reduction factor of inter_channels. Default: 4. - conv_cfg (dict): Config dict for convolution layer. Default: None, - which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. Default: None. - dcn (dict): Config dict for DCN. Default: None. - """ - - def __init__(self, - in_channels, - channels, - kernel_size, - stride=1, - padding=0, - dilation=1, - groups=1, - radix=2, - reduction_factor=4, - conv_cfg=None, - norm_cfg=dict(type='BN'), - dcn=None): - super(SplitAttentionConv2d, self).__init__() - inter_channels = max(in_channels * radix // reduction_factor, 32) - self.radix = radix - self.groups = groups - self.channels = channels - self.with_dcn = dcn is not None - self.dcn = dcn - fallback_on_stride = False - if self.with_dcn: - fallback_on_stride = self.dcn.pop('fallback_on_stride', False) - if self.with_dcn and not fallback_on_stride: - assert conv_cfg is None, 'conv_cfg must be None for DCN' - conv_cfg = dcn - self.conv = build_conv_layer( - conv_cfg, - in_channels, - channels * radix, - kernel_size, - stride=stride, - padding=padding, - dilation=dilation, - groups=groups * radix, - bias=False) - self.norm0_name, norm0 = build_norm_layer( - norm_cfg, channels * radix, postfix=0) - self.add_module(self.norm0_name, norm0) - self.relu = nn.ReLU(inplace=True) - self.fc1 = build_conv_layer( - None, channels, inter_channels, 1, groups=self.groups) - self.norm1_name, norm1 = build_norm_layer( - norm_cfg, inter_channels, postfix=1) - self.add_module(self.norm1_name, norm1) - self.fc2 = build_conv_layer( - None, inter_channels, channels * radix, 1, groups=self.groups) - self.rsoftmax = RSoftmax(radix, groups) - - @property - def norm0(self): - """nn.Module: the normalization layer named "norm0" """ - return getattr(self, self.norm0_name) - - @property - def norm1(self): - """nn.Module: the normalization layer named "norm1" """ - return getattr(self, self.norm1_name) - - def forward(self, x): - x = self.conv(x) - x = self.norm0(x) - x = self.relu(x) - - batch, rchannel = x.shape[:2] - batch = x.size(0) - if self.radix > 1: - splits = x.view(batch, self.radix, -1, *x.shape[2:]) - gap = splits.sum(dim=1) - else: - gap = x - gap = F.adaptive_avg_pool2d(gap, 1) - gap = self.fc1(gap) - - gap = self.norm1(gap) - gap = self.relu(gap) - - atten = self.fc2(gap) - atten = self.rsoftmax(atten).view(batch, -1, 1, 1) - - if self.radix > 1: - attens = atten.view(batch, self.radix, -1, *atten.shape[2:]) - out = torch.sum(attens * splits, dim=1) - else: - out = atten * x - return out.contiguous() - - -class Bottleneck(_Bottleneck): - """Bottleneck block for ResNeSt. - - Args: - inplane (int): Input planes of this block. - planes (int): Middle planes of this block. - groups (int): Groups of conv2. - width_per_group (int): Width per group of conv2. 64x4d indicates - ``groups=64, width_per_group=4`` and 32x8d indicates - ``groups=32, width_per_group=8``. - radix (int): Radix of SpltAtConv2d. Default: 2 - reduction_factor (int): Reduction factor of inter_channels in - SplitAttentionConv2d. Default: 4. - avg_down_stride (bool): Whether to use average pool for stride in - Bottleneck. Default: True. - kwargs (dict): Key word arguments for base class. - """ - expansion = 4 - - def __init__(self, - inplanes, - planes, - groups=1, - base_width=4, - base_channels=64, - radix=2, - reduction_factor=4, - avg_down_stride=True, - **kwargs): - """Bottleneck block for ResNeSt.""" - super(Bottleneck, self).__init__(inplanes, planes, **kwargs) - - if groups == 1: - width = self.planes - else: - width = math.floor(self.planes * - (base_width / base_channels)) * groups - - self.avg_down_stride = avg_down_stride and self.conv2_stride > 1 - - self.norm1_name, norm1 = build_norm_layer( - self.norm_cfg, width, postfix=1) - self.norm3_name, norm3 = build_norm_layer( - self.norm_cfg, self.planes * self.expansion, postfix=3) - - self.conv1 = build_conv_layer( - self.conv_cfg, - self.inplanes, - width, - kernel_size=1, - stride=self.conv1_stride, - bias=False) - self.add_module(self.norm1_name, norm1) - self.with_modulated_dcn = False - self.conv2 = SplitAttentionConv2d( - width, - width, - kernel_size=3, - stride=1 if self.avg_down_stride else self.conv2_stride, - padding=self.dilation, - dilation=self.dilation, - groups=groups, - radix=radix, - reduction_factor=reduction_factor, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - dcn=self.dcn) - delattr(self, self.norm2_name) - - if self.avg_down_stride: - self.avd_layer = nn.AvgPool2d(3, self.conv2_stride, padding=1) - - self.conv3 = build_conv_layer( - self.conv_cfg, - width, - self.planes * self.expansion, - kernel_size=1, - bias=False) - self.add_module(self.norm3_name, norm3) - - def forward(self, x): - - def _inner_forward(x): - identity = x - - out = self.conv1(x) - out = self.norm1(out) - out = self.relu(out) - - if self.with_plugins: - out = self.forward_plugin(out, self.after_conv1_plugin_names) - - out = self.conv2(out) - - if self.avg_down_stride: - out = self.avd_layer(out) - - if self.with_plugins: - out = self.forward_plugin(out, self.after_conv2_plugin_names) - - out = self.conv3(out) - out = self.norm3(out) - - if self.with_plugins: - out = self.forward_plugin(out, self.after_conv3_plugin_names) - - if self.downsample is not None: - identity = self.downsample(x) - - out += identity - - return out - - if self.with_cp and x.requires_grad: - out = cp.checkpoint(_inner_forward, x) - else: - out = _inner_forward(x) - - out = self.relu(out) - - return out - - -@BACKBONES.register_module() -class ResNeSt(ResNetV1d): - """ResNeSt backbone. - - Args: - groups (int): Number of groups of Bottleneck. Default: 1 - base_width (int): Base width of Bottleneck. Default: 4 - radix (int): Radix of SpltAtConv2d. Default: 2 - reduction_factor (int): Reduction factor of inter_channels in - SplitAttentionConv2d. Default: 4. - avg_down_stride (bool): Whether to use average pool for stride in - Bottleneck. Default: True. - kwargs (dict): Keyword arguments for ResNet. - """ - - arch_settings = { - 50: (Bottleneck, (3, 4, 6, 3)), - 101: (Bottleneck, (3, 4, 23, 3)), - 152: (Bottleneck, (3, 8, 36, 3)), - 200: (Bottleneck, (3, 24, 36, 3)) - } - - def __init__(self, - groups=1, - base_width=4, - radix=2, - reduction_factor=4, - avg_down_stride=True, - **kwargs): - self.groups = groups - self.base_width = base_width - self.radix = radix - self.reduction_factor = reduction_factor - self.avg_down_stride = avg_down_stride - super(ResNeSt, self).__init__(**kwargs) - - def make_res_layer(self, **kwargs): - """Pack all blocks in a stage into a ``ResLayer``.""" - return ResLayer( - groups=self.groups, - base_width=self.base_width, - base_channels=self.base_channels, - radix=self.radix, - reduction_factor=self.reduction_factor, - avg_down_stride=self.avg_down_stride, - **kwargs) diff --git a/spaces/Pie31415/control-animation/annotator/util.py b/spaces/Pie31415/control-animation/annotator/util.py deleted file mode 100644 index 90831643d19cc1b9b0940df3d4fd4d846ba74a05..0000000000000000000000000000000000000000 --- a/spaces/Pie31415/control-animation/annotator/util.py +++ /dev/null @@ -1,38 +0,0 @@ -import numpy as np -import cv2 -import os - - -annotator_ckpts_path = os.path.join(os.path.dirname(__file__), 'ckpts') - - -def HWC3(x): - assert x.dtype == np.uint8 - if x.ndim == 2: - x = x[:, :, None] - assert x.ndim == 3 - H, W, C = x.shape - assert C == 1 or C == 3 or C == 4 - if C == 3: - return x - if C == 1: - return np.concatenate([x, x, x], axis=2) - if C == 4: - color = x[:, :, 0:3].astype(np.float32) - alpha = x[:, :, 3:4].astype(np.float32) / 255.0 - y = color * alpha + 255.0 * (1.0 - alpha) - y = y.clip(0, 255).astype(np.uint8) - return y - - -def resize_image(input_image, resolution): - H, W, C = input_image.shape - H = float(H) - W = float(W) - k = float(resolution) / min(H, W) - H *= k - W *= k - H = int(np.round(H / 64.0)) * 64 - W = int(np.round(W / 64.0)) * 64 - img = cv2.resize(input_image, (W, H), interpolation=cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA) - return img diff --git a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/data/datasets/object365.py b/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/data/datasets/object365.py deleted file mode 100644 index 0106a059a565d77d9a52b8e0131c0c3db19c7b94..0000000000000000000000000000000000000000 --- a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/data/datasets/object365.py +++ /dev/null @@ -1,8 +0,0 @@ -import torch -import torchvision -import torch.utils.data as data -from maskrcnn_benchmark.data.datasets.coco_dt import CocoDetectionTSV - - -class Object365DetectionTSV(CocoDetectionTSV): - pass diff --git a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/models/__init__.py b/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/models/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Plachta/VALL-E-X/modules/embedding.py b/spaces/Plachta/VALL-E-X/modules/embedding.py deleted file mode 100644 index 17f6c316da3de6a432f4d43f9563800fdb6d58c4..0000000000000000000000000000000000000000 --- a/spaces/Plachta/VALL-E-X/modules/embedding.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright 2023 (authors: Feiteng Li) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import math - -import torch -import torch.nn as nn - - -class TokenEmbedding(nn.Module): - def __init__( - self, - dim_model: int, - vocab_size: int, - dropout: float = 0.0, - ): - super().__init__() - - self.vocab_size = vocab_size - self.dim_model = dim_model - - self.dropout = torch.nn.Dropout(p=dropout) - self.word_embeddings = nn.Embedding(self.vocab_size, self.dim_model) - - @property - def weight(self) -> torch.Tensor: - return self.word_embeddings.weight - - def embedding(self, index: int) -> torch.Tensor: - return self.word_embeddings.weight[index : index + 1] - - def forward(self, x: torch.Tensor): - X = self.word_embeddings(x) - X = self.dropout(X) - - return X - - -class SinePositionalEmbedding(nn.Module): - def __init__( - self, - dim_model: int, - dropout: float = 0.0, - scale: bool = False, - alpha: bool = False, - ): - super().__init__() - self.dim_model = dim_model - self.x_scale = math.sqrt(dim_model) if scale else 1.0 - self.alpha = nn.Parameter(torch.ones(1), requires_grad=alpha) - self.dropout = torch.nn.Dropout(p=dropout) - - self.reverse = False - self.pe = None - self.extend_pe(torch.tensor(0.0).expand(1, 4000)) - - def extend_pe(self, x): - """Reset the positional encodings.""" - if self.pe is not None: - if self.pe.size(1) >= x.size(1): - if self.pe.dtype != x.dtype or self.pe.device != x.device: - self.pe = self.pe.to(dtype=x.dtype, device=x.device) - return - pe = torch.zeros(x.size(1), self.dim_model) - if self.reverse: - position = torch.arange( - x.size(1) - 1, -1, -1.0, dtype=torch.float32 - ).unsqueeze(1) - else: - position = torch.arange( - 0, x.size(1), dtype=torch.float32 - ).unsqueeze(1) - div_term = torch.exp( - torch.arange(0, self.dim_model, 2, dtype=torch.float32) - * -(math.log(10000.0) / self.dim_model) - ) - pe[:, 0::2] = torch.sin(position * div_term) - pe[:, 1::2] = torch.cos(position * div_term) - pe = pe.unsqueeze(0) - self.pe = pe.to(device=x.device, dtype=x.dtype).detach() - - def forward(self, x: torch.Tensor) -> torch.Tensor: - self.extend_pe(x) - output = x.unsqueeze(-1) if x.ndim == 2 else x - output = output * self.x_scale + self.alpha * self.pe[:, : x.size(1)] - return self.dropout(output) diff --git a/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/tests/modules/test_codebooks_patterns.py b/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/tests/modules/test_codebooks_patterns.py deleted file mode 100644 index b658f4779a369f9ec8dde692a61b7f0fe3485724..0000000000000000000000000000000000000000 --- a/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/tests/modules/test_codebooks_patterns.py +++ /dev/null @@ -1,246 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import pytest -import torch - -from audiocraft.modules.codebooks_patterns import ( - DelayedPatternProvider, - ParallelPatternProvider, - Pattern, - UnrolledPatternProvider, -) - - -class TestParallelPatternProvider: - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [0, 1, 16, 100]) - def test_get_pattern(self, n_q: int, timesteps: int): - provider = ParallelPatternProvider(n_q) - pattern = provider.get_pattern(timesteps) - # + 1 to account for 1st step - assert len(pattern.layout) == timesteps + 1 - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [8, 16, 100]) - def test_pattern_content(self, n_q: int, timesteps: int): - provider = ParallelPatternProvider(n_q) - pattern = provider.get_pattern(timesteps) - for s, v in enumerate(pattern.layout): - for i, code in enumerate(v): - assert i == code.q - assert code.t == s - 1 # account for the 1st empty step - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [8, 16, 100]) - def test_pattern_max_delay(self, n_q: int, timesteps: int): - provider = ParallelPatternProvider(n_q) - pattern = provider.get_pattern(timesteps) - assert pattern.max_delay == 0 - assert len(pattern.valid_layout) == len(pattern.layout) - pattern.max_delay - - -class TestDelayedPatternProvider: - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [0, 1, 16, 100]) - def test_get_pattern(self, n_q: int, timesteps: int): - delays = [ - list(range(n_q)), - [0] + [1] * (n_q - 1), - [0] + [4] * (n_q - 1), - ] - for delay in delays: - provider = DelayedPatternProvider(n_q, delay) - pattern = provider.get_pattern(timesteps) - # + 1 to account for 1st step - assert len(pattern.layout) == timesteps + max(delay) + 1 - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [8, 16, 100]) - def test_pattern_content(self, n_q: int, timesteps: int): - provider = DelayedPatternProvider(n_q) - pattern = provider.get_pattern(timesteps) - for s, v in enumerate(pattern.layout): - for i, code in enumerate(v): - assert i == code.q - assert code.t == max(0, s - code.q - 1) - - @pytest.mark.parametrize("timesteps", [8, 16, 100]) - @pytest.mark.parametrize("delay", [[0, 1, 2, 3], [0, 1, 1, 1], [0, 3, 3, 3], [0, 3]]) - def test_pattern_max_delay(self, timesteps: int, delay: list): - provider = DelayedPatternProvider(len(delay), delay) - pattern = provider.get_pattern(timesteps) - assert pattern.max_delay == max(delay) - assert len(pattern.valid_layout) == len(pattern.layout) - pattern.max_delay - - -class TestUnrolledPatternProvider: - - @pytest.mark.parametrize("timesteps", [0, 1, 16]) - @pytest.mark.parametrize("flattening", [[0, 1, 2], [0, 1, 1]]) - @pytest.mark.parametrize("delays", [[0, 0, 0], [0, 5, 5]]) - def test_get_pattern(self, timesteps: int, flattening: list, delays: list): - n_q = len(flattening) - max_delay = max(delays) - provider = UnrolledPatternProvider(n_q, flattening, delays) - pattern = provider.get_pattern(timesteps) - assert len(pattern.layout) == provider.num_virtual_steps(timesteps) + max_delay - - @pytest.mark.parametrize("timesteps", [0, 1, 16]) - @pytest.mark.parametrize("flattening", [[0, 1, 2], [0, 1, 1]]) - @pytest.mark.parametrize("delays", [[0, 0, 0], [0, 5, 5]]) - def test_pattern_max_delay(self, timesteps: int, flattening: list, delays: list): - n_q = len(flattening) - max_delay = max(delays) - provider = UnrolledPatternProvider(n_q, flattening, delays) - pattern = provider.get_pattern(timesteps) - assert pattern.max_delay == max_delay - - -class TestPattern: - - def ref_build_pattern_sequence(self, z: torch.Tensor, pattern: Pattern, special_token: int): - """Reference method to build the sequence from the pattern without using fancy scatter.""" - bs, n_q, T = z.shape - z = z.cpu().numpy() - assert n_q == pattern.n_q - assert T <= pattern.timesteps - inp = torch.full((bs, n_q, len(pattern.layout)), special_token, dtype=torch.long).numpy() - inp[:] = special_token - for s, v in enumerate(pattern.layout): - for (t, q) in v: - if t < T: - inp[:, q, s] = z[:, q, t] - return torch.from_numpy(inp) - - def ref_revert_pattern_sequence(self, z: torch.Tensor, pattern: Pattern, special_token: int): - """Reference method to revert the sequence from the pattern without using fancy scatter.""" - z = z.cpu().numpy() - bs, n_q, S = z.shape - assert pattern.n_q == n_q - inp = torch.full((bs, pattern.n_q, pattern.timesteps), special_token, dtype=torch.long).numpy() - inp[:] = special_token - for s, v in enumerate(pattern.layout): - for (t, q) in v: - if t < pattern.timesteps: - inp[:, q, t] = z[:, q, s] - return torch.from_numpy(inp) - - def ref_revert_pattern_logits(self, z: torch.Tensor, pattern: Pattern, special_token: float): - """Reference method to revert the logits from the pattern without using fancy scatter.""" - z = z.cpu().numpy() - bs, card, n_q, S = z.shape - assert pattern.n_q == n_q - ref_layout = pattern.layout - inp = torch.full((bs, card, pattern.n_q, pattern.timesteps), special_token, dtype=torch.float).numpy() - inp[:] = special_token - for s, v in enumerate(ref_layout[1:]): - if s < S: - for (t, q) in v: - if t < pattern.timesteps: - inp[:, :, q, t] = z[:, :, q, s] - return torch.from_numpy(inp) - - def _get_pattern_providers(self, n_q: int): - pattern_provider_1 = ParallelPatternProvider(n_q) - pattern_provider_2 = DelayedPatternProvider(n_q, list(range(n_q))) - pattern_provider_3 = DelayedPatternProvider(n_q, [0] + [1] * (n_q - 1)) - pattern_provider_4 = UnrolledPatternProvider( - n_q, flattening=list(range(n_q)), delays=[0] * n_q - ) - pattern_provider_5 = UnrolledPatternProvider( - n_q, flattening=[0] + [1] * (n_q - 1), delays=[0] * n_q - ) - pattern_provider_6 = UnrolledPatternProvider( - n_q, flattening=[0] + [1] * (n_q - 1), delays=[0] + [5] * (n_q - 1) - ) - return [ - pattern_provider_1, - pattern_provider_2, - pattern_provider_3, - pattern_provider_4, - pattern_provider_5, - pattern_provider_6, - ] - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [16, 72]) - def test_build_pattern_sequence(self, n_q: int, timesteps: int): - bs = 2 - card = 256 - special_token = card - - pattern_providers = self._get_pattern_providers(n_q) - for pattern_provider in pattern_providers: - pattern = pattern_provider.get_pattern(timesteps) - # we can correctly build the sequence from the pattern - z = torch.randint(0, card, (bs, n_q, timesteps)) - ref_res = self.ref_build_pattern_sequence(z, pattern, special_token) - res, indexes, mask = pattern.build_pattern_sequence(z, special_token) - assert (res == ref_res).float().mean() == 1.0 - - # expected assertion fails on the number of timesteps - invalid_timesteps = [timesteps + 1] - if pattern.num_sequence_steps != pattern.timesteps: - invalid_timesteps.append(pattern.num_sequence_steps) - for i_timesteps in invalid_timesteps: - z2 = torch.randint(0, card, (bs, n_q, i_timesteps)) - with pytest.raises(AssertionError): - pattern.build_pattern_sequence(z2, special_token) - - # expected assertion fails on the number of codebooks - invalid_qs = [0, n_q - 1, n_q + 1] - for i_q in invalid_qs: - z3 = torch.randint(0, card, (bs, i_q, timesteps)) - with pytest.raises(AssertionError): - pattern.build_pattern_sequence(z3, special_token) - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [16, 72]) - def test_revert_pattern_sequence(self, n_q: int, timesteps: int): - bs = 2 - card = 256 - special_token = card - - pattern_providers = self._get_pattern_providers(n_q) - for pattern_provider in pattern_providers: - pattern = pattern_provider.get_pattern(timesteps) - # this works assuming previous tests are successful - z = torch.randint(0, card, (bs, n_q, timesteps)) - s = self.ref_build_pattern_sequence(z, pattern, special_token) - ref_out = self.ref_revert_pattern_sequence(s, pattern, special_token) - # ensure our reference script retrieve the original sequence - assert z.shape == ref_out.shape - assert (z == ref_out).float().mean() == 1.0 - # now we can test the scatter version - out, indexes, mask = pattern.revert_pattern_sequence(s, special_token) - assert out.shape == ref_out.shape - assert (out == ref_out).float().mean() == 1.0 - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [16, 72]) - @pytest.mark.parametrize("card", [1, 2, 256, 1024]) - def test_revert_pattern_logits(self, n_q: int, timesteps: int, card: int): - bs = 2 - special_token = card - logits_special_token = float('nan') - - pattern_providers = self._get_pattern_providers(n_q) - for pattern_provider in pattern_providers: - pattern = pattern_provider.get_pattern(timesteps) - # this works assuming previous tests are successful - z = torch.randint(0, card, (bs, n_q, timesteps)) - s = self.ref_build_pattern_sequence(z, pattern, special_token) - logits = torch.randn((bs, card, n_q, s.shape[-1])) - ref_out = self.ref_revert_pattern_logits(logits, pattern, logits_special_token) - # ensure our reference script retrieve the original sequence - assert ref_out.shape == torch.Size([bs, card, n_q, timesteps]) - # now we can test the scatter version - out, indexes, mask = pattern.revert_pattern_logits(logits, logits_special_token) - assert out.shape == ref_out.shape - assert (out == ref_out).float().mean() == 1.0 diff --git a/spaces/ProteinDesignLab/protpardelle/output_helpers.py b/spaces/ProteinDesignLab/protpardelle/output_helpers.py deleted file mode 100644 index ec42d57fd16bae401e1e0f5178694668e4bc75ef..0000000000000000000000000000000000000000 --- a/spaces/ProteinDesignLab/protpardelle/output_helpers.py +++ /dev/null @@ -1,1566 +0,0 @@ - -import json -import os - -local_url = "http://localhost:8888/frontend/" -remote_url = "https://cdn.jsdelivr.net/gh/duerrsimon/vue_3dmol_gradio@v.02/" - - -if os.environ.get("GRADIO_LOCAL") is not None: - url = local_url -else: - url = remote_url - -def viewer_html(path_to_file, name="input", selectionStyle={"color": "greenCarbon", - "representation": "cartoon", - "multiple": True}, representations={}): - - ext = path_to_file.split(".")[-1] - with open(path_to_file, "r") as f: - data = f.read() - moldata = {"moldata": [{"data": data, "name": name, "selectionStyle": selectionStyle,"format": ext, - "selectable": True, - "asFrames":False, - "clickable": False}] - } - # dict to json - moldata = json.dumps(moldata) - representations = json.dumps(representations) - - return """ - - - - - - - - - - Molecule View - - - - - -
-
- Resampling the whole PDB file, select - residues in the - sequence view to resample only parts - of the structure - Resampling the selected - residues - - - - -
-
- - - -
- -
-
-
- - -
- - - - - - -
- - -
-
-
-  Very high ({{confidenceLabel}} > 90) -
-
-  Confident (90 > {{confidenceLabel}} > 70) -
-
-  Low (70 > {{confidenceLabel}} > 50) -
-
-  Very low ({{confidenceLabel}} < 50) -
-
-
- - - -
- - - - - -
- -
-
-
-
- - -
- {{aa.resn}} -
- - -
-
-
-
-
- - - - -
- -
- -
- - - -
-
-
- Settings -
- -
- -
-
- - - - - - - - - - - - -
- - {{config[setting]}} -
- - - - -
-
- -
-
- - -
-
-
- - - -
-
-
- Representations -
- -
- - -
- -
-
- - - -
- -
-
- - -
-
- - -
-
- - -
-
-
Expand selection
- - -
Å
-
- -
-
- Full residue - -
-
- Only sidechain - -
-
- -
-
-
- - - - -
-
- - - - - - - - - -
-
- - -
- - - - - - -
- - - -
-
-
- -
- - - - -
- - - -""" -import os -import subprocess -import shlex - -def get_color(i): - colors= ["orange", "cyan", "blue", "yellow", "magenta"] - return colors[i % len(colors)]+"Carbon" - - -def output_html(path_to_file,path_to_designs,metrics, resample_idx="",mode="unconditional", selectionStyle={"color": "greenCarbon", - "representation": "cartoon", - "multiple": True}): - - if mode=="conditional": - ext = path_to_file.split(".")[-1] - with open(path_to_file, "r") as f: - data = f.read() - moldata = [{"data": data, "name": os.path.basename(path_to_file), - "selectionStyle": selectionStyle, - "format": ext, - "selectable": True, - "asFrames":False, - "clickable": False}] - representations = [{ - "model": 0, - "chain": "", - "resname": "", - "style": "cartoon", - "color": "whiteCarbon", - "residue_range": "", - "around": 0, - "byres": False, - "visible": False, - }] - if resample_idx!="": - representations.append({ - "model": 0, - "chain": "", - "resname": "", - "style": "cartoon", - "color": "greenCarbon", - "residue_range": resample_idx[1:-1], #remove leading and trailing quotes - "around": 0, - "byres": False, - "visible": False, - }) - # move file from temp to save dir - subprocess.run(shlex.split(f"cp {path_to_file} {os.path.dirname(path_to_designs[0])}/template.pdb")) - path_to_file = f"{os.path.dirname(path_to_designs[0])}/template.pdb" - designs = [{ - "model":0, - "name":"template.pdb", - "fullpath": path_to_file, - "len":76, - "metric":{ - "resample idx": resample_idx[1:-1], - }, - "visible":True, - "color":"gray" - }] - add_index = 1 - else: - designs = [] - moldata = [] - representations = [] - add_index = 0 - - for i,d in enumerate(path_to_designs): - ext = d.split(".")[-1] - with open(d, "r") as f: - data = f.read() - moldata.append({"data": data, "name": os.path.basename(d), - "selectionStyle": selectionStyle, - "format": ext, - "selectable": True, - "asFrames":False, - "clickable": False}) - representations.append({ - "model": i+add_index, - "chain": "", - "resname": "", - "style": "cartoon", - "color": get_color(i), - "residue_range": "", - "around": 0, - "byres": False, - "visible": False, - }) - designs.append({ - "model":i+add_index, - "fullpath": d, - "name":os.path.basename(d), - "metric":metrics[i], - "visible":True, - "color":"" - }) - # dict to json - moldata = json.dumps(moldata) - representations = json.dumps(representations) - designs = json.dumps(designs) - return """ - - - - - - - - Molecule View - - - - -
-
-
-
-
-
- - -
- - - - - - -
- - -
-
-
-  Very high ({{confidenceLabel}} > 90) -
-
-  Confident (90 > {{confidenceLabel}} > 70) -
-
-  Low (70 > {{confidenceLabel}} > 50) -
-
-  Very low ({{confidenceLabel}} < 50) -
-
-
- - - -
- - - - - -
- -
-
-
-
- - -
- {{aa.resn}} -
- - -
-
-
-
-
- - - - -
- -
- -
- - - -
-
-
- Settings -
- -
- -
-
- - - - - - - - - - - - -
- - {{config[setting]}} -
- - - - -
-
- -
-
- - -
-
-
- - - -
-
-
- Representations -
- -
- - -
- -
-
- - - -
- -
-
- - -
-
- - -
-
- - -
-
-
Expand selection
- - -
Å
-
- -
-
- Full residue - -
-
- Only sidechain - -
-
- -
-
-
- - - - -
-
- - - - - - - - - -
-
- - -
- - - - - - -
- - - -
-
-
-
-
- -
- - -
- -
-
- - - - - - - - - -
-
{{d.model}}{{d.name}} {{k}}: {{v}}
- -
-
- - - - - -
-
- -
-
-
-
-
- - - - - - -""" - -load_js = """ - -async () => { - // create empty textarea with id selectedAtoms that is hidden - // and append it to the body - var selectedAtoms = document.createElement("textarea"); - selectedAtoms.id = "selectedAtoms"; - selectedAtoms.style.display = "none"; - document.body.appendChild(selectedAtoms); - -window.onmessage = function(e) { -selectedAtoms.value = JSON.stringify(e.data); -}; -} -""" - -get_js = """ -async (resample_idxs) => { - - var selectedAtoms = document.getElementById("selectedAtoms"); - var selectedAtomsValue = selectedAtoms.value; - - var hasNumber = /\d/; - - - if (hasNumber.test(selectedAtomsValue)==false) { - selectedAtomsValue = resample_idxs.replace(/.$/,' "') - - } - return selectedAtomsValue; - -} -""" diff --git a/spaces/PrussianBlue/White-box-Cartoonization/wbc/cartoonize.py b/spaces/PrussianBlue/White-box-Cartoonization/wbc/cartoonize.py deleted file mode 100644 index 25faf1ceb95aaed9a3f7a7982d17a03dc6bc32b1..0000000000000000000000000000000000000000 --- a/spaces/PrussianBlue/White-box-Cartoonization/wbc/cartoonize.py +++ /dev/null @@ -1,112 +0,0 @@ -import os -import cv2 -import numpy as np -import tensorflow as tf -import wbc.network as network -import wbc.guided_filter as guided_filter -from tqdm import tqdm - - -def resize_crop(image): - h, w, c = np.shape(image) - if min(h, w) > 720: - if h > w: - h, w = int(720 * h / w), 720 - else: - h, w = 720, int(720 * w / h) - image = cv2.resize(image, (w, h), - interpolation=cv2.INTER_AREA) - h, w = (h // 8) * 8, (w // 8) * 8 - image = image[:h, :w, :] - return image - - -def cartoonize(load_folder, save_folder, model_path): - print(model_path) - input_photo = tf.placeholder(tf.float32, [1, None, None, 3]) - network_out = network.unet_generator(input_photo) - final_out = guided_filter.guided_filter(input_photo, network_out, r=1, eps=5e-3) - - all_vars = tf.trainable_variables() - gene_vars = [var for var in all_vars if 'generator' in var.name] - saver = tf.train.Saver(var_list=gene_vars) - - config = tf.ConfigProto() - config.gpu_options.allow_growth = True - sess = tf.Session(config=config) - - sess.run(tf.global_variables_initializer()) - saver.restore(sess, tf.train.latest_checkpoint(model_path)) - name_list = os.listdir(load_folder) - for name in tqdm(name_list): - try: - load_path = os.path.join(load_folder, name) - save_path = os.path.join(save_folder, name) - image = cv2.imread(load_path) - image = resize_crop(image) - batch_image = image.astype(np.float32) / 127.5 - 1 - batch_image = np.expand_dims(batch_image, axis=0) - output = sess.run(final_out, feed_dict={input_photo: batch_image}) - output = (np.squeeze(output) + 1) * 127.5 - output = np.clip(output, 0, 255).astype(np.uint8) - cv2.imwrite(save_path, output) - except: - print('cartoonize {} failed'.format(load_path)) - - -class Cartoonize: - def __init__(self, model_path): - print(model_path) - self.input_photo = tf.placeholder(tf.float32, [1, None, None, 3]) - network_out = network.unet_generator(self.input_photo) - self.final_out = guided_filter.guided_filter(self.input_photo, network_out, r=1, eps=5e-3) - - all_vars = tf.trainable_variables() - gene_vars = [var for var in all_vars if 'generator' in var.name] - saver = tf.train.Saver(var_list=gene_vars) - - config = tf.ConfigProto() - config.gpu_options.allow_growth = True - self.sess = tf.Session(config=config) - - self.sess.run(tf.global_variables_initializer()) - saver.restore(self.sess, tf.train.latest_checkpoint(model_path)) - - def run(self, load_folder, save_folder): - name_list = os.listdir(load_folder) - for name in tqdm(name_list): - try: - load_path = os.path.join(load_folder, name) - save_path = os.path.join(save_folder, name) - image = cv2.imread(load_path) - image = resize_crop(image) - batch_image = image.astype(np.float32) / 127.5 - 1 - batch_image = np.expand_dims(batch_image, axis=0) - output = self.sess.run(self.final_out, feed_dict={self.input_photo: batch_image}) - output = (np.squeeze(output) + 1) * 127.5 - output = np.clip(output, 0, 255).astype(np.uint8) - cv2.imwrite(save_path, output) - except: - print('cartoonize {} failed'.format(load_path)) - - def run_sigle(self, load_path, save_path): - try: - image = cv2.imread(load_path) - image = resize_crop(image) - batch_image = image.astype(np.float32) / 127.5 - 1 - batch_image = np.expand_dims(batch_image, axis=0) - output = self.sess.run(self.final_out, feed_dict={self.input_photo: batch_image}) - output = (np.squeeze(output) + 1) * 127.5 - output = np.clip(output, 0, 255).astype(np.uint8) - cv2.imwrite(save_path, output) - except: - print('cartoonize {} failed'.format(load_path)) - - -if __name__ == '__main__': - model_path = 'saved_models' - load_folder = 'test_images' - save_folder = 'cartoonized_images' - if not os.path.exists(save_folder): - os.mkdir(save_folder) - cartoonize(load_folder, save_folder, model_path) diff --git a/spaces/Purple11/Grounded-Diffusion/src/taming-transformers/taming/data/annotated_objects_coco.py b/spaces/Purple11/Grounded-Diffusion/src/taming-transformers/taming/data/annotated_objects_coco.py deleted file mode 100644 index af000ecd943d7b8a85d7eb70195c9ecd10ab5edc..0000000000000000000000000000000000000000 --- a/spaces/Purple11/Grounded-Diffusion/src/taming-transformers/taming/data/annotated_objects_coco.py +++ /dev/null @@ -1,139 +0,0 @@ -import json -from itertools import chain -from pathlib import Path -from typing import Iterable, Dict, List, Callable, Any -from collections import defaultdict - -from tqdm import tqdm - -from taming.data.annotated_objects_dataset import AnnotatedObjectsDataset -from taming.data.helper_types import Annotation, ImageDescription, Category - -COCO_PATH_STRUCTURE = { - 'train': { - 'top_level': '', - 'instances_annotations': 'annotations/instances_train2017.json', - 'stuff_annotations': 'annotations/stuff_train2017.json', - 'files': 'train2017' - }, - 'validation': { - 'top_level': '', - 'instances_annotations': 'annotations/instances_val2017.json', - 'stuff_annotations': 'annotations/stuff_val2017.json', - 'files': 'val2017' - } -} - - -def load_image_descriptions(description_json: List[Dict]) -> Dict[str, ImageDescription]: - return { - str(img['id']): ImageDescription( - id=img['id'], - license=img.get('license'), - file_name=img['file_name'], - coco_url=img['coco_url'], - original_size=(img['width'], img['height']), - date_captured=img.get('date_captured'), - flickr_url=img.get('flickr_url') - ) - for img in description_json - } - - -def load_categories(category_json: Iterable) -> Dict[str, Category]: - return {str(cat['id']): Category(id=str(cat['id']), super_category=cat['supercategory'], name=cat['name']) - for cat in category_json if cat['name'] != 'other'} - - -def load_annotations(annotations_json: List[Dict], image_descriptions: Dict[str, ImageDescription], - category_no_for_id: Callable[[str], int], split: str) -> Dict[str, List[Annotation]]: - annotations = defaultdict(list) - total = sum(len(a) for a in annotations_json) - for ann in tqdm(chain(*annotations_json), f'Loading {split} annotations', total=total): - image_id = str(ann['image_id']) - if image_id not in image_descriptions: - raise ValueError(f'image_id [{image_id}] has no image description.') - category_id = ann['category_id'] - try: - category_no = category_no_for_id(str(category_id)) - except KeyError: - continue - - width, height = image_descriptions[image_id].original_size - bbox = (ann['bbox'][0] / width, ann['bbox'][1] / height, ann['bbox'][2] / width, ann['bbox'][3] / height) - - annotations[image_id].append( - Annotation( - id=ann['id'], - area=bbox[2]*bbox[3], # use bbox area - is_group_of=ann['iscrowd'], - image_id=ann['image_id'], - bbox=bbox, - category_id=str(category_id), - category_no=category_no - ) - ) - return dict(annotations) - - -class AnnotatedObjectsCoco(AnnotatedObjectsDataset): - def __init__(self, use_things: bool = True, use_stuff: bool = True, **kwargs): - """ - @param data_path: is the path to the following folder structure: - coco/ - ├── annotations - │ ├── instances_train2017.json - │ ├── instances_val2017.json - │ ├── stuff_train2017.json - │ └── stuff_val2017.json - ├── train2017 - │ ├── 000000000009.jpg - │ ├── 000000000025.jpg - │ └── ... - ├── val2017 - │ ├── 000000000139.jpg - │ ├── 000000000285.jpg - │ └── ... - @param: split: one of 'train' or 'validation' - @param: desired image size (give square images) - """ - super().__init__(**kwargs) - self.use_things = use_things - self.use_stuff = use_stuff - - with open(self.paths['instances_annotations']) as f: - inst_data_json = json.load(f) - with open(self.paths['stuff_annotations']) as f: - stuff_data_json = json.load(f) - - category_jsons = [] - annotation_jsons = [] - if self.use_things: - category_jsons.append(inst_data_json['categories']) - annotation_jsons.append(inst_data_json['annotations']) - if self.use_stuff: - category_jsons.append(stuff_data_json['categories']) - annotation_jsons.append(stuff_data_json['annotations']) - - self.categories = load_categories(chain(*category_jsons)) - self.filter_categories() - self.setup_category_id_and_number() - - self.image_descriptions = load_image_descriptions(inst_data_json['images']) - annotations = load_annotations(annotation_jsons, self.image_descriptions, self.get_category_number, self.split) - self.annotations = self.filter_object_number(annotations, self.min_object_area, - self.min_objects_per_image, self.max_objects_per_image) - self.image_ids = list(self.annotations.keys()) - self.clean_up_annotations_and_image_descriptions() - - def get_path_structure(self) -> Dict[str, str]: - if self.split not in COCO_PATH_STRUCTURE: - raise ValueError(f'Split [{self.split} does not exist for COCO data.]') - return COCO_PATH_STRUCTURE[self.split] - - def get_image_path(self, image_id: str) -> Path: - return self.paths['files'].joinpath(self.image_descriptions[str(image_id)].file_name) - - def get_image_description(self, image_id: str) -> Dict[str, Any]: - # noinspection PyProtectedMember - return self.image_descriptions[image_id]._asdict() diff --git a/spaces/QianFeng/White-box-Cartoonization2308/wbc/guided_filter.py b/spaces/QianFeng/White-box-Cartoonization2308/wbc/guided_filter.py deleted file mode 100644 index fd019d145efc7f308cd96de90f4e7b648f6820b4..0000000000000000000000000000000000000000 --- a/spaces/QianFeng/White-box-Cartoonization2308/wbc/guided_filter.py +++ /dev/null @@ -1,87 +0,0 @@ -import tensorflow as tf -import numpy as np - - - - -def tf_box_filter(x, r): - k_size = int(2*r+1) - ch = x.get_shape().as_list()[-1] - weight = 1/(k_size**2) - box_kernel = weight*np.ones((k_size, k_size, ch, 1)) - box_kernel = np.array(box_kernel).astype(np.float32) - output = tf.nn.depthwise_conv2d(x, box_kernel, [1, 1, 1, 1], 'SAME') - return output - - - -def guided_filter(x, y, r, eps=1e-2): - - x_shape = tf.shape(x) - #y_shape = tf.shape(y) - - N = tf_box_filter(tf.ones((1, x_shape[1], x_shape[2], 1), dtype=x.dtype), r) - - mean_x = tf_box_filter(x, r) / N - mean_y = tf_box_filter(y, r) / N - cov_xy = tf_box_filter(x * y, r) / N - mean_x * mean_y - var_x = tf_box_filter(x * x, r) / N - mean_x * mean_x - - A = cov_xy / (var_x + eps) - b = mean_y - A * mean_x - - mean_A = tf_box_filter(A, r) / N - mean_b = tf_box_filter(b, r) / N - - output = mean_A * x + mean_b - - return output - - - -def fast_guided_filter(lr_x, lr_y, hr_x, r=1, eps=1e-8): - - #assert lr_x.shape.ndims == 4 and lr_y.shape.ndims == 4 and hr_x.shape.ndims == 4 - - lr_x_shape = tf.shape(lr_x) - #lr_y_shape = tf.shape(lr_y) - hr_x_shape = tf.shape(hr_x) - - N = tf_box_filter(tf.ones((1, lr_x_shape[1], lr_x_shape[2], 1), dtype=lr_x.dtype), r) - - mean_x = tf_box_filter(lr_x, r) / N - mean_y = tf_box_filter(lr_y, r) / N - cov_xy = tf_box_filter(lr_x * lr_y, r) / N - mean_x * mean_y - var_x = tf_box_filter(lr_x * lr_x, r) / N - mean_x * mean_x - - A = cov_xy / (var_x + eps) - b = mean_y - A * mean_x - - mean_A = tf.image.resize_images(A, hr_x_shape[1: 3]) - mean_b = tf.image.resize_images(b, hr_x_shape[1: 3]) - - output = mean_A * hr_x + mean_b - - return output - - -if __name__ == '__main__': - import cv2 - from tqdm import tqdm - - input_photo = tf.placeholder(tf.float32, [1, None, None, 3]) - #input_superpixel = tf.placeholder(tf.float32, [16, 256, 256, 3]) - output = guided_filter(input_photo, input_photo, 5, eps=1) - image = cv2.imread('output_figure1/cartoon2.jpg') - image = image/127.5 - 1 - image = np.expand_dims(image, axis=0) - - config = tf.ConfigProto() - config.gpu_options.allow_growth = True - sess = tf.Session(config=config) - sess.run(tf.global_variables_initializer()) - - out = sess.run(output, feed_dict={input_photo: image}) - out = (np.squeeze(out)+1)*127.5 - out = np.clip(out, 0, 255).astype(np.uint8) - cv2.imwrite('output_figure1/cartoon2_filter.jpg', out) diff --git a/spaces/RamAnanth1/T2I-Adapter/gradio_pose.py b/spaces/RamAnanth1/T2I-Adapter/gradio_pose.py deleted file mode 100644 index 3b0f50b5599431173444c3f070787e695c4c0ef3..0000000000000000000000000000000000000000 --- a/spaces/RamAnanth1/T2I-Adapter/gradio_pose.py +++ /dev/null @@ -1,32 +0,0 @@ -import gradio as gr - -def create_demo(process): - block = gr.Blocks().queue() - with block: - with gr.Row(): - with gr.Column(): - input_img = gr.Image(source='upload', type="numpy") - prompt = gr.Textbox(label="Prompt") - neg_prompt = gr.Textbox(label="Negative Prompt", - value='ugly, tiling, poorly drawn hands, poorly drawn feet, poorly drawn face, out of frame, extra limbs, disfigured, deformed, body out of frame, bad anatomy, watermark, signature, cut off, low contrast, underexposed, overexposed, bad art, beginner, amateur, distorted face') - run_button = gr.Button(label="Run") - with gr.Accordion("Advanced options", open=False): - con_strength = gr.Slider(label="Controling Strength (The guidance strength of the sketch to the result)", minimum=0, maximum=1, value=0.4, step=0.1) - scale = gr.Slider(label="Guidance Scale (Classifier free guidance)", minimum=0.1, maximum=30.0, value=7.5, step=0.1) - fix_sample = gr.inputs.Radio(['True', 'False'], type="value", default='False', label='Fix Sampling\n (Fix the random seed)') - base_model = gr.inputs.Radio(['sd-v1-4.ckpt', 'anything-v4.0-pruned.ckpt'], type="value", default='sd-v1-4.ckpt', label='The base model you want to use') - with gr.Column(): - result = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2, height='auto') - ips = [input_img,prompt, neg_prompt, fix_sample, scale, con_strength, base_model] - run_button.click(fn=process, inputs=ips, outputs=[result]) - - examples_list = [["human.png", "beautiful girl", - "ugly, tiling, poorly drawn hands, poorly drawn feet, poorly drawn face, out of frame, extra limbs, disfigured, deformed, body out of frame, bad anatomy, watermark, signature, cut off, low contrast, underexposed, overexposed, bad art, beginner, amateur, distorted face", - 'True', - 7.5, - 0.4, - 'anything-v4.0-pruned.ckpt']] - - examples = gr.Examples(examples=examples_list,inputs = [input_img, prompt,neg_prompt, fix_sample, scale, con_strength,base_model], outputs = [result], cache_examples = True, fn = process) - - return block \ No newline at end of file diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/requests/utils.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/requests/utils.py deleted file mode 100644 index 33f394d265d5da17dd5b3c2467e2e4e71af1395d..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/requests/utils.py +++ /dev/null @@ -1,1086 +0,0 @@ -""" -requests.utils -~~~~~~~~~~~~~~ - -This module provides utility functions that are used within Requests -that are also useful for external consumption. -""" - -import codecs -import contextlib -import io -import os -import re -import socket -import struct -import sys -import tempfile -import warnings -import zipfile -from collections import OrderedDict - -from pip._vendor.urllib3.util import make_headers, parse_url - -from . import certs -from .__version__ import __version__ - -# to_native_string is unused here, but imported here for backwards compatibility -from ._internal_utils import HEADER_VALIDATORS, to_native_string # noqa: F401 -from .compat import ( - Mapping, - basestring, - bytes, - getproxies, - getproxies_environment, - integer_types, -) -from .compat import parse_http_list as _parse_list_header -from .compat import ( - proxy_bypass, - proxy_bypass_environment, - quote, - str, - unquote, - urlparse, - urlunparse, -) -from .cookies import cookiejar_from_dict -from .exceptions import ( - FileModeWarning, - InvalidHeader, - InvalidURL, - UnrewindableBodyError, -) -from .structures import CaseInsensitiveDict - -NETRC_FILES = (".netrc", "_netrc") - -DEFAULT_CA_BUNDLE_PATH = certs.where() - -DEFAULT_PORTS = {"http": 80, "https": 443} - -# Ensure that ', ' is used to preserve previous delimiter behavior. -DEFAULT_ACCEPT_ENCODING = ", ".join( - re.split(r",\s*", make_headers(accept_encoding=True)["accept-encoding"]) -) - - -if sys.platform == "win32": - # provide a proxy_bypass version on Windows without DNS lookups - - def proxy_bypass_registry(host): - try: - import winreg - except ImportError: - return False - - try: - internetSettings = winreg.OpenKey( - winreg.HKEY_CURRENT_USER, - r"Software\Microsoft\Windows\CurrentVersion\Internet Settings", - ) - # ProxyEnable could be REG_SZ or REG_DWORD, normalizing it - proxyEnable = int(winreg.QueryValueEx(internetSettings, "ProxyEnable")[0]) - # ProxyOverride is almost always a string - proxyOverride = winreg.QueryValueEx(internetSettings, "ProxyOverride")[0] - except (OSError, ValueError): - return False - if not proxyEnable or not proxyOverride: - return False - - # make a check value list from the registry entry: replace the - # '' string by the localhost entry and the corresponding - # canonical entry. - proxyOverride = proxyOverride.split(";") - # now check if we match one of the registry values. - for test in proxyOverride: - if test == "": - if "." not in host: - return True - test = test.replace(".", r"\.") # mask dots - test = test.replace("*", r".*") # change glob sequence - test = test.replace("?", r".") # change glob char - if re.match(test, host, re.I): - return True - return False - - def proxy_bypass(host): # noqa - """Return True, if the host should be bypassed. - - Checks proxy settings gathered from the environment, if specified, - or the registry. - """ - if getproxies_environment(): - return proxy_bypass_environment(host) - else: - return proxy_bypass_registry(host) - - -def dict_to_sequence(d): - """Returns an internal sequence dictionary update.""" - - if hasattr(d, "items"): - d = d.items() - - return d - - -def super_len(o): - total_length = None - current_position = 0 - - if hasattr(o, "__len__"): - total_length = len(o) - - elif hasattr(o, "len"): - total_length = o.len - - elif hasattr(o, "fileno"): - try: - fileno = o.fileno() - except (io.UnsupportedOperation, AttributeError): - # AttributeError is a surprising exception, seeing as how we've just checked - # that `hasattr(o, 'fileno')`. It happens for objects obtained via - # `Tarfile.extractfile()`, per issue 5229. - pass - else: - total_length = os.fstat(fileno).st_size - - # Having used fstat to determine the file length, we need to - # confirm that this file was opened up in binary mode. - if "b" not in o.mode: - warnings.warn( - ( - "Requests has determined the content-length for this " - "request using the binary size of the file: however, the " - "file has been opened in text mode (i.e. without the 'b' " - "flag in the mode). This may lead to an incorrect " - "content-length. In Requests 3.0, support will be removed " - "for files in text mode." - ), - FileModeWarning, - ) - - if hasattr(o, "tell"): - try: - current_position = o.tell() - except OSError: - # This can happen in some weird situations, such as when the file - # is actually a special file descriptor like stdin. In this - # instance, we don't know what the length is, so set it to zero and - # let requests chunk it instead. - if total_length is not None: - current_position = total_length - else: - if hasattr(o, "seek") and total_length is None: - # StringIO and BytesIO have seek but no usable fileno - try: - # seek to end of file - o.seek(0, 2) - total_length = o.tell() - - # seek back to current position to support - # partially read file-like objects - o.seek(current_position or 0) - except OSError: - total_length = 0 - - if total_length is None: - total_length = 0 - - return max(0, total_length - current_position) - - -def get_netrc_auth(url, raise_errors=False): - """Returns the Requests tuple auth for a given url from netrc.""" - - netrc_file = os.environ.get("NETRC") - if netrc_file is not None: - netrc_locations = (netrc_file,) - else: - netrc_locations = (f"~/{f}" for f in NETRC_FILES) - - try: - from netrc import NetrcParseError, netrc - - netrc_path = None - - for f in netrc_locations: - try: - loc = os.path.expanduser(f) - except KeyError: - # os.path.expanduser can fail when $HOME is undefined and - # getpwuid fails. See https://bugs.python.org/issue20164 & - # https://github.com/psf/requests/issues/1846 - return - - if os.path.exists(loc): - netrc_path = loc - break - - # Abort early if there isn't one. - if netrc_path is None: - return - - ri = urlparse(url) - - # Strip port numbers from netloc. This weird `if...encode`` dance is - # used for Python 3.2, which doesn't support unicode literals. - splitstr = b":" - if isinstance(url, str): - splitstr = splitstr.decode("ascii") - host = ri.netloc.split(splitstr)[0] - - try: - _netrc = netrc(netrc_path).authenticators(host) - if _netrc: - # Return with login / password - login_i = 0 if _netrc[0] else 1 - return (_netrc[login_i], _netrc[2]) - except (NetrcParseError, OSError): - # If there was a parsing error or a permissions issue reading the file, - # we'll just skip netrc auth unless explicitly asked to raise errors. - if raise_errors: - raise - - # App Engine hackiness. - except (ImportError, AttributeError): - pass - - -def guess_filename(obj): - """Tries to guess the filename of the given object.""" - name = getattr(obj, "name", None) - if name and isinstance(name, basestring) and name[0] != "<" and name[-1] != ">": - return os.path.basename(name) - - -def extract_zipped_paths(path): - """Replace nonexistent paths that look like they refer to a member of a zip - archive with the location of an extracted copy of the target, or else - just return the provided path unchanged. - """ - if os.path.exists(path): - # this is already a valid path, no need to do anything further - return path - - # find the first valid part of the provided path and treat that as a zip archive - # assume the rest of the path is the name of a member in the archive - archive, member = os.path.split(path) - while archive and not os.path.exists(archive): - archive, prefix = os.path.split(archive) - if not prefix: - # If we don't check for an empty prefix after the split (in other words, archive remains unchanged after the split), - # we _can_ end up in an infinite loop on a rare corner case affecting a small number of users - break - member = "/".join([prefix, member]) - - if not zipfile.is_zipfile(archive): - return path - - zip_file = zipfile.ZipFile(archive) - if member not in zip_file.namelist(): - return path - - # we have a valid zip archive and a valid member of that archive - tmp = tempfile.gettempdir() - extracted_path = os.path.join(tmp, member.split("/")[-1]) - if not os.path.exists(extracted_path): - # use read + write to avoid the creating nested folders, we only want the file, avoids mkdir racing condition - with atomic_open(extracted_path) as file_handler: - file_handler.write(zip_file.read(member)) - return extracted_path - - -@contextlib.contextmanager -def atomic_open(filename): - """Write a file to the disk in an atomic fashion""" - tmp_descriptor, tmp_name = tempfile.mkstemp(dir=os.path.dirname(filename)) - try: - with os.fdopen(tmp_descriptor, "wb") as tmp_handler: - yield tmp_handler - os.replace(tmp_name, filename) - except BaseException: - os.remove(tmp_name) - raise - - -def from_key_val_list(value): - """Take an object and test to see if it can be represented as a - dictionary. Unless it can not be represented as such, return an - OrderedDict, e.g., - - :: - - >>> from_key_val_list([('key', 'val')]) - OrderedDict([('key', 'val')]) - >>> from_key_val_list('string') - Traceback (most recent call last): - ... - ValueError: cannot encode objects that are not 2-tuples - >>> from_key_val_list({'key': 'val'}) - OrderedDict([('key', 'val')]) - - :rtype: OrderedDict - """ - if value is None: - return None - - if isinstance(value, (str, bytes, bool, int)): - raise ValueError("cannot encode objects that are not 2-tuples") - - return OrderedDict(value) - - -def to_key_val_list(value): - """Take an object and test to see if it can be represented as a - dictionary. If it can be, return a list of tuples, e.g., - - :: - - >>> to_key_val_list([('key', 'val')]) - [('key', 'val')] - >>> to_key_val_list({'key': 'val'}) - [('key', 'val')] - >>> to_key_val_list('string') - Traceback (most recent call last): - ... - ValueError: cannot encode objects that are not 2-tuples - - :rtype: list - """ - if value is None: - return None - - if isinstance(value, (str, bytes, bool, int)): - raise ValueError("cannot encode objects that are not 2-tuples") - - if isinstance(value, Mapping): - value = value.items() - - return list(value) - - -# From mitsuhiko/werkzeug (used with permission). -def parse_list_header(value): - """Parse lists as described by RFC 2068 Section 2. - - In particular, parse comma-separated lists where the elements of - the list may include quoted-strings. A quoted-string could - contain a comma. A non-quoted string could have quotes in the - middle. Quotes are removed automatically after parsing. - - It basically works like :func:`parse_set_header` just that items - may appear multiple times and case sensitivity is preserved. - - The return value is a standard :class:`list`: - - >>> parse_list_header('token, "quoted value"') - ['token', 'quoted value'] - - To create a header from the :class:`list` again, use the - :func:`dump_header` function. - - :param value: a string with a list header. - :return: :class:`list` - :rtype: list - """ - result = [] - for item in _parse_list_header(value): - if item[:1] == item[-1:] == '"': - item = unquote_header_value(item[1:-1]) - result.append(item) - return result - - -# From mitsuhiko/werkzeug (used with permission). -def parse_dict_header(value): - """Parse lists of key, value pairs as described by RFC 2068 Section 2 and - convert them into a python dict: - - >>> d = parse_dict_header('foo="is a fish", bar="as well"') - >>> type(d) is dict - True - >>> sorted(d.items()) - [('bar', 'as well'), ('foo', 'is a fish')] - - If there is no value for a key it will be `None`: - - >>> parse_dict_header('key_without_value') - {'key_without_value': None} - - To create a header from the :class:`dict` again, use the - :func:`dump_header` function. - - :param value: a string with a dict header. - :return: :class:`dict` - :rtype: dict - """ - result = {} - for item in _parse_list_header(value): - if "=" not in item: - result[item] = None - continue - name, value = item.split("=", 1) - if value[:1] == value[-1:] == '"': - value = unquote_header_value(value[1:-1]) - result[name] = value - return result - - -# From mitsuhiko/werkzeug (used with permission). -def unquote_header_value(value, is_filename=False): - r"""Unquotes a header value. (Reversal of :func:`quote_header_value`). - This does not use the real unquoting but what browsers are actually - using for quoting. - - :param value: the header value to unquote. - :rtype: str - """ - if value and value[0] == value[-1] == '"': - # this is not the real unquoting, but fixing this so that the - # RFC is met will result in bugs with internet explorer and - # probably some other browsers as well. IE for example is - # uploading files with "C:\foo\bar.txt" as filename - value = value[1:-1] - - # if this is a filename and the starting characters look like - # a UNC path, then just return the value without quotes. Using the - # replace sequence below on a UNC path has the effect of turning - # the leading double slash into a single slash and then - # _fix_ie_filename() doesn't work correctly. See #458. - if not is_filename or value[:2] != "\\\\": - return value.replace("\\\\", "\\").replace('\\"', '"') - return value - - -def dict_from_cookiejar(cj): - """Returns a key/value dictionary from a CookieJar. - - :param cj: CookieJar object to extract cookies from. - :rtype: dict - """ - - cookie_dict = {} - - for cookie in cj: - cookie_dict[cookie.name] = cookie.value - - return cookie_dict - - -def add_dict_to_cookiejar(cj, cookie_dict): - """Returns a CookieJar from a key/value dictionary. - - :param cj: CookieJar to insert cookies into. - :param cookie_dict: Dict of key/values to insert into CookieJar. - :rtype: CookieJar - """ - - return cookiejar_from_dict(cookie_dict, cj) - - -def get_encodings_from_content(content): - """Returns encodings from given content string. - - :param content: bytestring to extract encodings from. - """ - warnings.warn( - ( - "In requests 3.0, get_encodings_from_content will be removed. For " - "more information, please see the discussion on issue #2266. (This" - " warning should only appear once.)" - ), - DeprecationWarning, - ) - - charset_re = re.compile(r']', flags=re.I) - pragma_re = re.compile(r']', flags=re.I) - xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]') - - return ( - charset_re.findall(content) - + pragma_re.findall(content) - + xml_re.findall(content) - ) - - -def _parse_content_type_header(header): - """Returns content type and parameters from given header - - :param header: string - :return: tuple containing content type and dictionary of - parameters - """ - - tokens = header.split(";") - content_type, params = tokens[0].strip(), tokens[1:] - params_dict = {} - items_to_strip = "\"' " - - for param in params: - param = param.strip() - if param: - key, value = param, True - index_of_equals = param.find("=") - if index_of_equals != -1: - key = param[:index_of_equals].strip(items_to_strip) - value = param[index_of_equals + 1 :].strip(items_to_strip) - params_dict[key.lower()] = value - return content_type, params_dict - - -def get_encoding_from_headers(headers): - """Returns encodings from given HTTP Header Dict. - - :param headers: dictionary to extract encoding from. - :rtype: str - """ - - content_type = headers.get("content-type") - - if not content_type: - return None - - content_type, params = _parse_content_type_header(content_type) - - if "charset" in params: - return params["charset"].strip("'\"") - - if "text" in content_type: - return "ISO-8859-1" - - if "application/json" in content_type: - # Assume UTF-8 based on RFC 4627: https://www.ietf.org/rfc/rfc4627.txt since the charset was unset - return "utf-8" - - -def stream_decode_response_unicode(iterator, r): - """Stream decodes an iterator.""" - - if r.encoding is None: - yield from iterator - return - - decoder = codecs.getincrementaldecoder(r.encoding)(errors="replace") - for chunk in iterator: - rv = decoder.decode(chunk) - if rv: - yield rv - rv = decoder.decode(b"", final=True) - if rv: - yield rv - - -def iter_slices(string, slice_length): - """Iterate over slices of a string.""" - pos = 0 - if slice_length is None or slice_length <= 0: - slice_length = len(string) - while pos < len(string): - yield string[pos : pos + slice_length] - pos += slice_length - - -def get_unicode_from_response(r): - """Returns the requested content back in unicode. - - :param r: Response object to get unicode content from. - - Tried: - - 1. charset from content-type - 2. fall back and replace all unicode characters - - :rtype: str - """ - warnings.warn( - ( - "In requests 3.0, get_unicode_from_response will be removed. For " - "more information, please see the discussion on issue #2266. (This" - " warning should only appear once.)" - ), - DeprecationWarning, - ) - - tried_encodings = [] - - # Try charset from content-type - encoding = get_encoding_from_headers(r.headers) - - if encoding: - try: - return str(r.content, encoding) - except UnicodeError: - tried_encodings.append(encoding) - - # Fall back: - try: - return str(r.content, encoding, errors="replace") - except TypeError: - return r.content - - -# The unreserved URI characters (RFC 3986) -UNRESERVED_SET = frozenset( - "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + "0123456789-._~" -) - - -def unquote_unreserved(uri): - """Un-escape any percent-escape sequences in a URI that are unreserved - characters. This leaves all reserved, illegal and non-ASCII bytes encoded. - - :rtype: str - """ - parts = uri.split("%") - for i in range(1, len(parts)): - h = parts[i][0:2] - if len(h) == 2 and h.isalnum(): - try: - c = chr(int(h, 16)) - except ValueError: - raise InvalidURL(f"Invalid percent-escape sequence: '{h}'") - - if c in UNRESERVED_SET: - parts[i] = c + parts[i][2:] - else: - parts[i] = f"%{parts[i]}" - else: - parts[i] = f"%{parts[i]}" - return "".join(parts) - - -def requote_uri(uri): - """Re-quote the given URI. - - This function passes the given URI through an unquote/quote cycle to - ensure that it is fully and consistently quoted. - - :rtype: str - """ - safe_with_percent = "!#$%&'()*+,/:;=?@[]~" - safe_without_percent = "!#$&'()*+,/:;=?@[]~" - try: - # Unquote only the unreserved characters - # Then quote only illegal characters (do not quote reserved, - # unreserved, or '%') - return quote(unquote_unreserved(uri), safe=safe_with_percent) - except InvalidURL: - # We couldn't unquote the given URI, so let's try quoting it, but - # there may be unquoted '%'s in the URI. We need to make sure they're - # properly quoted so they do not cause issues elsewhere. - return quote(uri, safe=safe_without_percent) - - -def address_in_network(ip, net): - """This function allows you to check if an IP belongs to a network subnet - - Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24 - returns False if ip = 192.168.1.1 and net = 192.168.100.0/24 - - :rtype: bool - """ - ipaddr = struct.unpack("=L", socket.inet_aton(ip))[0] - netaddr, bits = net.split("/") - netmask = struct.unpack("=L", socket.inet_aton(dotted_netmask(int(bits))))[0] - network = struct.unpack("=L", socket.inet_aton(netaddr))[0] & netmask - return (ipaddr & netmask) == (network & netmask) - - -def dotted_netmask(mask): - """Converts mask from /xx format to xxx.xxx.xxx.xxx - - Example: if mask is 24 function returns 255.255.255.0 - - :rtype: str - """ - bits = 0xFFFFFFFF ^ (1 << 32 - mask) - 1 - return socket.inet_ntoa(struct.pack(">I", bits)) - - -def is_ipv4_address(string_ip): - """ - :rtype: bool - """ - try: - socket.inet_aton(string_ip) - except OSError: - return False - return True - - -def is_valid_cidr(string_network): - """ - Very simple check of the cidr format in no_proxy variable. - - :rtype: bool - """ - if string_network.count("/") == 1: - try: - mask = int(string_network.split("/")[1]) - except ValueError: - return False - - if mask < 1 or mask > 32: - return False - - try: - socket.inet_aton(string_network.split("/")[0]) - except OSError: - return False - else: - return False - return True - - -@contextlib.contextmanager -def set_environ(env_name, value): - """Set the environment variable 'env_name' to 'value' - - Save previous value, yield, and then restore the previous value stored in - the environment variable 'env_name'. - - If 'value' is None, do nothing""" - value_changed = value is not None - if value_changed: - old_value = os.environ.get(env_name) - os.environ[env_name] = value - try: - yield - finally: - if value_changed: - if old_value is None: - del os.environ[env_name] - else: - os.environ[env_name] = old_value - - -def should_bypass_proxies(url, no_proxy): - """ - Returns whether we should bypass proxies or not. - - :rtype: bool - """ - # Prioritize lowercase environment variables over uppercase - # to keep a consistent behaviour with other http projects (curl, wget). - def get_proxy(key): - return os.environ.get(key) or os.environ.get(key.upper()) - - # First check whether no_proxy is defined. If it is, check that the URL - # we're getting isn't in the no_proxy list. - no_proxy_arg = no_proxy - if no_proxy is None: - no_proxy = get_proxy("no_proxy") - parsed = urlparse(url) - - if parsed.hostname is None: - # URLs don't always have hostnames, e.g. file:/// urls. - return True - - if no_proxy: - # We need to check whether we match here. We need to see if we match - # the end of the hostname, both with and without the port. - no_proxy = (host for host in no_proxy.replace(" ", "").split(",") if host) - - if is_ipv4_address(parsed.hostname): - for proxy_ip in no_proxy: - if is_valid_cidr(proxy_ip): - if address_in_network(parsed.hostname, proxy_ip): - return True - elif parsed.hostname == proxy_ip: - # If no_proxy ip was defined in plain IP notation instead of cidr notation & - # matches the IP of the index - return True - else: - host_with_port = parsed.hostname - if parsed.port: - host_with_port += f":{parsed.port}" - - for host in no_proxy: - if parsed.hostname.endswith(host) or host_with_port.endswith(host): - # The URL does match something in no_proxy, so we don't want - # to apply the proxies on this URL. - return True - - with set_environ("no_proxy", no_proxy_arg): - # parsed.hostname can be `None` in cases such as a file URI. - try: - bypass = proxy_bypass(parsed.hostname) - except (TypeError, socket.gaierror): - bypass = False - - if bypass: - return True - - return False - - -def get_environ_proxies(url, no_proxy=None): - """ - Return a dict of environment proxies. - - :rtype: dict - """ - if should_bypass_proxies(url, no_proxy=no_proxy): - return {} - else: - return getproxies() - - -def select_proxy(url, proxies): - """Select a proxy for the url, if applicable. - - :param url: The url being for the request - :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs - """ - proxies = proxies or {} - urlparts = urlparse(url) - if urlparts.hostname is None: - return proxies.get(urlparts.scheme, proxies.get("all")) - - proxy_keys = [ - urlparts.scheme + "://" + urlparts.hostname, - urlparts.scheme, - "all://" + urlparts.hostname, - "all", - ] - proxy = None - for proxy_key in proxy_keys: - if proxy_key in proxies: - proxy = proxies[proxy_key] - break - - return proxy - - -def resolve_proxies(request, proxies, trust_env=True): - """This method takes proxy information from a request and configuration - input to resolve a mapping of target proxies. This will consider settings - such a NO_PROXY to strip proxy configurations. - - :param request: Request or PreparedRequest - :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs - :param trust_env: Boolean declaring whether to trust environment configs - - :rtype: dict - """ - proxies = proxies if proxies is not None else {} - url = request.url - scheme = urlparse(url).scheme - no_proxy = proxies.get("no_proxy") - new_proxies = proxies.copy() - - if trust_env and not should_bypass_proxies(url, no_proxy=no_proxy): - environ_proxies = get_environ_proxies(url, no_proxy=no_proxy) - - proxy = environ_proxies.get(scheme, environ_proxies.get("all")) - - if proxy: - new_proxies.setdefault(scheme, proxy) - return new_proxies - - -def default_user_agent(name="python-requests"): - """ - Return a string representing the default user agent. - - :rtype: str - """ - return f"{name}/{__version__}" - - -def default_headers(): - """ - :rtype: requests.structures.CaseInsensitiveDict - """ - return CaseInsensitiveDict( - { - "User-Agent": default_user_agent(), - "Accept-Encoding": DEFAULT_ACCEPT_ENCODING, - "Accept": "*/*", - "Connection": "keep-alive", - } - ) - - -def parse_header_links(value): - """Return a list of parsed link headers proxies. - - i.e. Link: ; rel=front; type="image/jpeg",; rel=back;type="image/jpeg" - - :rtype: list - """ - - links = [] - - replace_chars = " '\"" - - value = value.strip(replace_chars) - if not value: - return links - - for val in re.split(", *<", value): - try: - url, params = val.split(";", 1) - except ValueError: - url, params = val, "" - - link = {"url": url.strip("<> '\"")} - - for param in params.split(";"): - try: - key, value = param.split("=") - except ValueError: - break - - link[key.strip(replace_chars)] = value.strip(replace_chars) - - links.append(link) - - return links - - -# Null bytes; no need to recreate these on each call to guess_json_utf -_null = "\x00".encode("ascii") # encoding to ASCII for Python 3 -_null2 = _null * 2 -_null3 = _null * 3 - - -def guess_json_utf(data): - """ - :rtype: str - """ - # JSON always starts with two ASCII characters, so detection is as - # easy as counting the nulls and from their location and count - # determine the encoding. Also detect a BOM, if present. - sample = data[:4] - if sample in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE): - return "utf-32" # BOM included - if sample[:3] == codecs.BOM_UTF8: - return "utf-8-sig" # BOM included, MS style (discouraged) - if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE): - return "utf-16" # BOM included - nullcount = sample.count(_null) - if nullcount == 0: - return "utf-8" - if nullcount == 2: - if sample[::2] == _null2: # 1st and 3rd are null - return "utf-16-be" - if sample[1::2] == _null2: # 2nd and 4th are null - return "utf-16-le" - # Did not detect 2 valid UTF-16 ascii-range characters - if nullcount == 3: - if sample[:3] == _null3: - return "utf-32-be" - if sample[1:] == _null3: - return "utf-32-le" - # Did not detect a valid UTF-32 ascii-range character - return None - - -def prepend_scheme_if_needed(url, new_scheme): - """Given a URL that may or may not have a scheme, prepend the given scheme. - Does not replace a present scheme with the one provided as an argument. - - :rtype: str - """ - parsed = parse_url(url) - scheme, auth, host, port, path, query, fragment = parsed - - # A defect in urlparse determines that there isn't a netloc present in some - # urls. We previously assumed parsing was overly cautious, and swapped the - # netloc and path. Due to a lack of tests on the original defect, this is - # maintained with parse_url for backwards compatibility. - netloc = parsed.netloc - if not netloc: - netloc, path = path, netloc - - if auth: - # parse_url doesn't provide the netloc with auth - # so we'll add it ourselves. - netloc = "@".join([auth, netloc]) - if scheme is None: - scheme = new_scheme - if path is None: - path = "" - - return urlunparse((scheme, netloc, path, "", query, fragment)) - - -def get_auth_from_url(url): - """Given a url with authentication components, extract them into a tuple of - username,password. - - :rtype: (str,str) - """ - parsed = urlparse(url) - - try: - auth = (unquote(parsed.username), unquote(parsed.password)) - except (AttributeError, TypeError): - auth = ("", "") - - return auth - - -def check_header_validity(header): - """Verifies that header parts don't contain leading whitespace - reserved characters, or return characters. - - :param header: tuple, in the format (name, value). - """ - name, value = header - - for part in header: - if type(part) not in HEADER_VALIDATORS: - raise InvalidHeader( - f"Header part ({part!r}) from {{{name!r}: {value!r}}} must be " - f"of type str or bytes, not {type(part)}" - ) - - _validate_header_part(name, "name", HEADER_VALIDATORS[type(name)][0]) - _validate_header_part(value, "value", HEADER_VALIDATORS[type(value)][1]) - - -def _validate_header_part(header_part, header_kind, validator): - if not validator.match(header_part): - raise InvalidHeader( - f"Invalid leading whitespace, reserved character(s), or return" - f"character(s) in header {header_kind}: {header_part!r}" - ) - - -def urldefragauth(url): - """ - Given a url remove the fragment and the authentication part. - - :rtype: str - """ - scheme, netloc, path, params, query, fragment = urlparse(url) - - # see func:`prepend_scheme_if_needed` - if not netloc: - netloc, path = path, netloc - - netloc = netloc.rsplit("@", 1)[-1] - - return urlunparse((scheme, netloc, path, params, query, "")) - - -def rewind_body(prepared_request): - """Move file pointer back to its recorded starting position - so it can be read again on redirect. - """ - body_seek = getattr(prepared_request.body, "seek", None) - if body_seek is not None and isinstance( - prepared_request._body_position, integer_types - ): - try: - body_seek(prepared_request._body_position) - except OSError: - raise UnrewindableBodyError( - "An error occurred when rewinding request body for redirect." - ) - else: - raise UnrewindableBodyError("Unable to rewind request body for redirect.") diff --git a/spaces/Realcat/image-matching-webui/third_party/DarkFeat/trainer_single_norel.py b/spaces/Realcat/image-matching-webui/third_party/DarkFeat/trainer_single_norel.py deleted file mode 100644 index 5447a37dabba339183f4e50ef44381ebc7a34998..0000000000000000000000000000000000000000 --- a/spaces/Realcat/image-matching-webui/third_party/DarkFeat/trainer_single_norel.py +++ /dev/null @@ -1,336 +0,0 @@ -import os -import cv2 -import time -import yaml -import torch -import datetime -from tensorboardX import SummaryWriter -import torchvision.transforms as tvf -import torch.nn as nn -import torch.nn.functional as F -import numpy as np - -from nets.l2net import Quad_L2Net -from nets.geom import getK, getWarp, _grid_positions -from nets.loss import make_detector_loss -from nets.score import extract_kpts -from datasets.noise_simulator import NoiseSimulator -from nets.l2net import Quad_L2Net - - -class SingleTrainerNoRel: - def __init__(self, config, device, loader, job_name, start_cnt): - self.config = config - self.device = device - self.loader = loader - - # tensorboard writer construction - os.makedirs("./runs/", exist_ok=True) - if job_name != "": - self.log_dir = f"runs/{job_name}" - else: - self.log_dir = f'runs/{datetime.datetime.now().strftime("%m-%d-%H%M%S")}' - - self.writer = SummaryWriter(self.log_dir) - with open(f"{self.log_dir}/config.yaml", "w") as f: - yaml.dump(config, f) - - if ( - config["network"]["input_type"] == "gray" - or config["network"]["input_type"] == "raw-gray" - ): - self.model = eval(f'{config["network"]["model"]}(inchan=1)').to(device) - elif ( - config["network"]["input_type"] == "rgb" - or config["network"]["input_type"] == "raw-demosaic" - ): - self.model = eval(f'{config["network"]["model"]}(inchan=3)').to(device) - elif config["network"]["input_type"] == "raw": - self.model = eval(f'{config["network"]["model"]}(inchan=4)').to(device) - else: - raise NotImplementedError() - - # noise maker - self.noise_maker = NoiseSimulator(device) - - # load model - self.cnt = 0 - if start_cnt != 0: - self.model.load_state_dict( - torch.load(f"{self.log_dir}/model_{start_cnt:06d}.pth") - ) - self.cnt = start_cnt + 1 - - # optimizer and scheduler - if self.config["training"]["optimizer"] == "SGD": - self.optimizer = torch.optim.SGD( - [ - { - "params": self.model.parameters(), - "initial_lr": self.config["training"]["lr"], - } - ], - lr=self.config["training"]["lr"], - momentum=self.config["training"]["momentum"], - weight_decay=self.config["training"]["weight_decay"], - ) - elif self.config["training"]["optimizer"] == "Adam": - self.optimizer = torch.optim.Adam( - [ - { - "params": self.model.parameters(), - "initial_lr": self.config["training"]["lr"], - } - ], - lr=self.config["training"]["lr"], - weight_decay=self.config["training"]["weight_decay"], - ) - else: - raise NotImplementedError() - - self.lr_scheduler = torch.optim.lr_scheduler.StepLR( - self.optimizer, - step_size=self.config["training"]["lr_step"], - gamma=self.config["training"]["lr_gamma"], - last_epoch=start_cnt, - ) - for param_tensor in self.model.state_dict(): - print(param_tensor, "\t", self.model.state_dict()[param_tensor].size()) - - def save(self, iter_num): - torch.save(self.model.state_dict(), f"{self.log_dir}/model_{iter_num:06d}.pth") - - def load(self, path): - self.model.load_state_dict(torch.load(path)) - - def train(self): - self.model.train() - - for epoch in range(2): - for batch_idx, inputs in enumerate(self.loader): - self.optimizer.zero_grad() - t = time.time() - - # preprocess and add noise - img0_ori, noise_img0_ori = self.preprocess_noise_pair( - inputs["img0"], self.cnt - ) - img1_ori, noise_img1_ori = self.preprocess_noise_pair( - inputs["img1"], self.cnt - ) - - img0 = img0_ori.permute(0, 3, 1, 2).float().to(self.device) - img1 = img1_ori.permute(0, 3, 1, 2).float().to(self.device) - - if self.config["network"]["input_type"] == "rgb": - # 3-channel rgb - RGB_mean = [0.485, 0.456, 0.406] - RGB_std = [0.229, 0.224, 0.225] - norm_RGB = tvf.Normalize(mean=RGB_mean, std=RGB_std) - img0 = norm_RGB(img0) - img1 = norm_RGB(img1) - noise_img0 = norm_RGB(noise_img0) - noise_img1 = norm_RGB(noise_img1) - - elif self.config["network"]["input_type"] == "gray": - # 1-channel - img0 = torch.mean(img0, dim=1, keepdim=True) - img1 = torch.mean(img1, dim=1, keepdim=True) - noise_img0 = torch.mean(noise_img0, dim=1, keepdim=True) - noise_img1 = torch.mean(noise_img1, dim=1, keepdim=True) - norm_gray0 = tvf.Normalize(mean=img0.mean(), std=img0.std()) - norm_gray1 = tvf.Normalize(mean=img1.mean(), std=img1.std()) - img0 = norm_gray0(img0) - img1 = norm_gray1(img1) - noise_img0 = norm_gray0(noise_img0) - noise_img1 = norm_gray1(noise_img1) - - elif self.config["network"]["input_type"] == "raw": - # 4-channel - pass - - elif self.config["network"]["input_type"] == "raw-demosaic": - # 3-channel - pass - - else: - raise NotImplementedError() - - desc0, score_map0, _, _ = self.model(img0) - desc1, score_map1, _, _ = self.model(img1) - - cur_feat_size0 = torch.tensor(score_map0.shape[2:]) - cur_feat_size1 = torch.tensor(score_map1.shape[2:]) - - desc0 = desc0.permute(0, 2, 3, 1) - desc1 = desc1.permute(0, 2, 3, 1) - score_map0 = score_map0.permute(0, 2, 3, 1) - score_map1 = score_map1.permute(0, 2, 3, 1) - - r_K0 = getK(inputs["ori_img_size0"], cur_feat_size0, inputs["K0"]).to( - self.device - ) - r_K1 = getK(inputs["ori_img_size1"], cur_feat_size1, inputs["K1"]).to( - self.device - ) - - pos0 = _grid_positions( - cur_feat_size0[0], cur_feat_size0[1], img0.shape[0] - ).to(self.device) - - pos0, pos1, _ = getWarp( - pos0, - inputs["rel_pose"].to(self.device), - inputs["depth0"].to(self.device), - r_K0, - inputs["depth1"].to(self.device), - r_K1, - img0.shape[0], - ) - - det_structured_loss, det_accuracy = make_detector_loss( - pos0, - pos1, - desc0, - desc1, - score_map0, - score_map1, - img0.shape[0], - self.config["network"]["use_corr_n"], - self.config["network"]["loss_type"], - self.config, - ) - - total_loss = det_structured_loss - - self.writer.add_scalar("acc/normal_acc", det_accuracy, self.cnt) - self.writer.add_scalar("loss/total_loss", total_loss, self.cnt) - self.writer.add_scalar( - "loss/det_loss_normal", det_structured_loss, self.cnt - ) - print( - "iter={},\tloss={:.4f},\tacc={:.4f},\t{:.4f}s/iter".format( - self.cnt, total_loss, det_accuracy, time.time() - t - ) - ) - - if det_structured_loss != 0: - total_loss.backward() - self.optimizer.step() - self.lr_scheduler.step() - - if self.cnt % 100 == 0: - indices0, scores0 = extract_kpts( - score_map0.permute(0, 3, 1, 2), - k=self.config["network"]["det"]["kpt_n"], - score_thld=self.config["network"]["det"]["score_thld"], - nms_size=self.config["network"]["det"]["nms_size"], - eof_size=self.config["network"]["det"]["eof_size"], - edge_thld=self.config["network"]["det"]["edge_thld"], - ) - indices1, scores1 = extract_kpts( - score_map1.permute(0, 3, 1, 2), - k=self.config["network"]["det"]["kpt_n"], - score_thld=self.config["network"]["det"]["score_thld"], - nms_size=self.config["network"]["det"]["nms_size"], - eof_size=self.config["network"]["det"]["eof_size"], - edge_thld=self.config["network"]["det"]["edge_thld"], - ) - - if self.config["network"]["input_type"] == "raw": - kpt_img0 = self.showKeyPoints( - img0_ori[0][..., :3] * 255.0, indices0[0] - ) - kpt_img1 = self.showKeyPoints( - img1_ori[0][..., :3] * 255.0, indices1[0] - ) - else: - kpt_img0 = self.showKeyPoints(img0_ori[0] * 255.0, indices0[0]) - kpt_img1 = self.showKeyPoints(img1_ori[0] * 255.0, indices1[0]) - - self.writer.add_image( - "img0/kpts", kpt_img0, self.cnt, dataformats="HWC" - ) - self.writer.add_image( - "img1/kpts", kpt_img1, self.cnt, dataformats="HWC" - ) - self.writer.add_image( - "img0/score_map", score_map0[0], self.cnt, dataformats="HWC" - ) - self.writer.add_image( - "img1/score_map", score_map1[0], self.cnt, dataformats="HWC" - ) - - if self.cnt % 10000 == 0: - self.save(self.cnt) - - self.cnt += 1 - - def showKeyPoints(self, img, indices): - key_points = cv2.KeyPoint_convert(indices.cpu().float().numpy()[:, ::-1]) - img = img.numpy().astype("uint8") - img = cv2.drawKeypoints(img, key_points, None, color=(0, 255, 0)) - return img - - def preprocess(self, img, iter_idx): - if ( - not self.config["network"]["noise"] - and "raw" not in self.config["network"]["input_type"] - ): - return img - - raw = self.noise_maker.rgb2raw(img, batched=True) - - if self.config["network"]["noise"]: - ratio_dec = ( - min(self.config["network"]["noise_maxstep"], iter_idx) - / self.config["network"]["noise_maxstep"] - ) - raw = self.noise_maker.raw2noisyRaw(raw, ratio_dec=ratio_dec, batched=True) - - if self.config["network"]["input_type"] == "raw": - return torch.tensor(self.noise_maker.raw2packedRaw(raw, batched=True)) - - if self.config["network"]["input_type"] == "raw-demosaic": - return torch.tensor(self.noise_maker.raw2demosaicRaw(raw, batched=True)) - - rgb = self.noise_maker.raw2rgb(raw, batched=True) - if ( - self.config["network"]["input_type"] == "rgb" - or self.config["network"]["input_type"] == "gray" - ): - return torch.tensor(rgb) - - raise NotImplementedError() - - def preprocess_noise_pair(self, img, iter_idx): - assert self.config["network"]["noise"] - - raw = self.noise_maker.rgb2raw(img, batched=True) - - ratio_dec = ( - min(self.config["network"]["noise_maxstep"], iter_idx) - / self.config["network"]["noise_maxstep"] - ) - noise_raw = self.noise_maker.raw2noisyRaw( - raw, ratio_dec=ratio_dec, batched=True - ) - - if self.config["network"]["input_type"] == "raw": - return torch.tensor( - self.noise_maker.raw2packedRaw(raw, batched=True) - ), torch.tensor(self.noise_maker.raw2packedRaw(noise_raw, batched=True)) - - if self.config["network"]["input_type"] == "raw-demosaic": - return torch.tensor( - self.noise_maker.raw2demosaicRaw(raw, batched=True) - ), torch.tensor(self.noise_maker.raw2demosaicRaw(noise_raw, batched=True)) - - noise_rgb = self.noise_maker.raw2rgb(noise_raw, batched=True) - if ( - self.config["network"]["input_type"] == "rgb" - or self.config["network"]["input_type"] == "gray" - ): - return img, torch.tensor(noise_rgb) - - raise NotImplementedError() diff --git a/spaces/Reeve/Ohayou_Face/training/stylegan2_multi.py b/spaces/Reeve/Ohayou_Face/training/stylegan2_multi.py deleted file mode 100644 index 23b003d9003c47c0095ea00b02a0f6e1c987a789..0000000000000000000000000000000000000000 --- a/spaces/Reeve/Ohayou_Face/training/stylegan2_multi.py +++ /dev/null @@ -1,414 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -import numpy as np -import torch -from torch_utils import misc -from torch_utils import persistence -from torch_utils.ops import conv2d_resample -from torch_utils.ops import upfirdn2d -from torch_utils.ops import bias_act -from torch_utils.ops import fma - -from .networks import FullyConnectedLayer, Conv2dLayer, ToRGBLayer, MappingNetwork - -from util.utilgan import hw_scales, fix_size, multimask - -@misc.profiled_function -def modulated_conv2d( - x, # Input tensor of shape [batch_size, in_channels, in_height, in_width]. - weight, # Weight tensor of shape [out_channels, in_channels, kernel_height, kernel_width]. - styles, # Modulation coefficients of shape [batch_size, in_channels]. -# !!! custom - # latmask, # mask for split-frame latents blending - countHW = [1,1], # frame split count by height,width - splitfine = 0., # frame split edge fineness (float from 0+) - size = None, # custom size - scale_type = None, # scaling way: fit, centr, side, pad, padside - noise = None, # Optional noise tensor to add to the output activations. - up = 1, # Integer upsampling factor. - down = 1, # Integer downsampling factor. - padding = 0, # Padding with respect to the upsampled image. - resample_filter = None, # Low-pass filter to apply when resampling activations. Must be prepared beforehand by calling upfirdn2d.setup_filter(). - demodulate = True, # Apply weight demodulation? - flip_weight = True, # False = convolution, True = correlation (matches torch.nn.functional.conv2d). - fused_modconv = True, # Perform modulation, convolution, and demodulation as a single fused operation? -): - batch_size = x.shape[0] - out_channels, in_channels, kh, kw = weight.shape - misc.assert_shape(weight, [out_channels, in_channels, kh, kw]) # [OIkk] - misc.assert_shape(x, [batch_size, in_channels, None, None]) # [NIHW] - misc.assert_shape(styles, [batch_size, in_channels]) # [NI] - - # Pre-normalize inputs to avoid FP16 overflow. - if x.dtype == torch.float16 and demodulate: - weight = weight * (1 / np.sqrt(in_channels * kh * kw) / weight.norm(float('inf'), dim=[1,2,3], keepdim=True)) # max_Ikk - styles = styles / styles.norm(float('inf'), dim=1, keepdim=True) # max_I - - # Calculate per-sample weights and demodulation coefficients. - w = None - dcoefs = None - if demodulate or fused_modconv: - w = weight.unsqueeze(0) # [NOIkk] - w = w * styles.reshape(batch_size, 1, -1, 1, 1) # [NOIkk] - if demodulate: - dcoefs = (w.square().sum(dim=[2,3,4]) + 1e-8).rsqrt() # [NO] - if demodulate and fused_modconv: - w = w * dcoefs.reshape(batch_size, -1, 1, 1, 1) # [NOIkk] - - # Execute by scaling the activations before and after the convolution. - if not fused_modconv: - x = x * styles.to(x.dtype).reshape(batch_size, -1, 1, 1) - x = conv2d_resample.conv2d_resample(x=x, w=weight.to(x.dtype), f=resample_filter, up=up, down=down, padding=padding, flip_weight=flip_weight) -# !!! custom size & multi latent blending - if size is not None and up==2: - x = fix_size(x, size, scale_type) - # x = multimask(x, size, latmask, countHW, splitfine) - if demodulate and noise is not None: - x = fma.fma(x, dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1), noise.to(x.dtype)) - elif demodulate: - x = x * dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1) - elif noise is not None: - x = x.add_(noise.to(x.dtype)) - return x - - # Execute as one fused op using grouped convolution. - with misc.suppress_tracer_warnings(): # this value will be treated as a constant - batch_size = int(batch_size) - misc.assert_shape(x, [batch_size, in_channels, None, None]) - x = x.reshape(1, -1, *x.shape[2:]) - w = w.reshape(-1, in_channels, kh, kw) - x = conv2d_resample.conv2d_resample(x=x, w=w.to(x.dtype), f=resample_filter, up=up, down=down, padding=padding, groups=batch_size, flip_weight=flip_weight) - x = x.reshape(batch_size, -1, *x.shape[2:]) -# !!! custom size & multi latent blending - if size is not None and up==2: - x = fix_size(x, size, scale_type) - # x = multimask(x, size, latmask, countHW, splitfine) - if noise is not None: - x = x.add_(noise) - return x - -#---------------------------------------------------------------------------- - -@persistence.persistent_class -class SynthesisLayer(torch.nn.Module): - def __init__(self, - in_channels, # Number of input channels. - out_channels, # Number of output channels. - w_dim, # Intermediate latent (W) dimensionality. - resolution, # Resolution of this layer. -# !!! custom - countHW = [1,1], # frame split count by height,width - splitfine = 0., # frame split edge fineness (float from 0+) - size = None, # custom size - scale_type = None, # scaling way: fit, centr, side, pad, padside - init_res = [4,4], # Initial (minimal) resolution for progressive training - kernel_size = 3, # Convolution kernel size. - up = 1, # Integer upsampling factor. - use_noise = True, # Enable noise input? - activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc. - resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations. - conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping. - channels_last = False, # Use channels_last format for the weights? - ): - super().__init__() - self.resolution = resolution - self.countHW = countHW # !!! custom - self.splitfine = splitfine # !!! custom - self.size = size # !!! custom - self.scale_type = scale_type # !!! custom - self.init_res = init_res # !!! custom - self.up = up - self.use_noise = use_noise - self.activation = activation - self.conv_clamp = conv_clamp - self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter)) - self.padding = kernel_size // 2 - self.act_gain = bias_act.activation_funcs[activation].def_gain - - self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1) - memory_format = torch.channels_last if channels_last else torch.contiguous_format - self.weight = torch.nn.Parameter(torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format)) - if use_noise: -# !!! custom - self.register_buffer('noise_const', torch.randn([resolution * init_res[0]//4, resolution * init_res[1]//4])) - # self.register_buffer('noise_const', torch.randn([resolution, resolution])) - self.noise_strength = torch.nn.Parameter(torch.zeros([])) - self.bias = torch.nn.Parameter(torch.zeros([out_channels])) - -# !!! custom - # def forward(self, x, latmask, w, noise_mode='random', fused_modconv=True, gain=1): - def forward(self, x, w, noise_mode='random', fused_modconv=True, gain=1): - assert noise_mode in ['random', 'const', 'none'] - in_resolution = self.resolution // self.up - # misc.assert_shape(x, [None, self.weight.shape[1], in_resolution, in_resolution]) - styles = self.affine(w) - - noise = None - if self.use_noise and noise_mode == 'random': -# !!! custom - sz = self.size if self.up==2 and self.size is not None else x.shape[2:] - noise = torch.randn([x.shape[0], 1, *sz], device=x.device) * self.noise_strength - # noise = torch.randn([x.shape[0], 1, self.resolution, self.resolution], device=x.device) * self.noise_strength - if self.use_noise and noise_mode == 'const': - noise = self.noise_const * self.noise_strength -# !!! custom noise size - noise_size = self.size if self.up==2 and self.size is not None and self.resolution > 4 else x.shape[2:] - noise = fix_size(noise.unsqueeze(0).unsqueeze(0), noise_size, scale_type=self.scale_type)[0][0] - - # print(x.shape, noise.shape, self.size, self.up) - - flip_weight = (self.up == 1) # slightly faster - # x = modulated_conv2d(x=x, weight=self.weight, styles=styles, noise=noise, up=self.up, - # latmask=latmask, countHW=self.countHW, splitfine=self.splitfine, size=self.size, scale_type=self.scale_type, # !!! custom - # padding=self.padding, resample_filter=self.resample_filter, flip_weight=flip_weight, fused_modconv=fused_modconv) - - x = modulated_conv2d(x=x, weight=self.weight, styles=styles, noise=noise, up=self.up, - countHW=self.countHW, splitfine=self.splitfine, size=self.size, scale_type=self.scale_type, # !!! custom - padding=self.padding, resample_filter=self.resample_filter, flip_weight=flip_weight, fused_modconv=fused_modconv) - - - act_gain = self.act_gain * gain - act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None - x = bias_act.bias_act(x, self.bias.to(x.dtype), act=self.activation, gain=act_gain, clamp=act_clamp) - return x - -#---------------------------------------------------------------------------- - -@persistence.persistent_class -class SynthesisBlock(torch.nn.Module): - def __init__(self, - in_channels, # Number of input channels, 0 = first block. - out_channels, # Number of output channels. - w_dim, # Intermediate latent (W) dimensionality. - resolution, # Resolution of this block. - img_channels, # Number of output color channels. - is_last, # Is this the last block? -# !!! custom - size = None, # custom size - scale_type = None, # scaling way: fit, centr, side, pad, padside - init_res = [4,4], # Initial (minimal) resolution for progressive training - architecture = 'skip', # Architecture: 'orig', 'skip', 'resnet'. - resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations. - conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping. - use_fp16 = False, # Use FP16 for this block? - fp16_channels_last = False, # Use channels-last memory format with FP16? - **layer_kwargs, # Arguments for SynthesisLayer. - ): - assert architecture in ['orig', 'skip', 'resnet'] - super().__init__() - self.in_channels = in_channels - self.w_dim = w_dim - self.resolution = resolution - self.size = size # !!! custom - self.scale_type = scale_type # !!! custom - self.init_res = init_res # !!! custom - self.img_channels = img_channels - self.is_last = is_last - self.architecture = architecture - self.use_fp16 = use_fp16 - self.channels_last = (use_fp16 and fp16_channels_last) - self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter)) - self.num_conv = 0 - self.num_torgb = 0 - - if in_channels == 0: -# !!! custom - self.const = torch.nn.Parameter(torch.randn([out_channels, *init_res])) - # self.const = torch.nn.Parameter(torch.randn([out_channels, resolution, resolution])) - - if in_channels != 0: - self.conv0 = SynthesisLayer(in_channels, out_channels, w_dim=w_dim, resolution=resolution, up=2, - init_res=init_res, scale_type=scale_type, size=size, # !!! custom - resample_filter=resample_filter, conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs) - self.num_conv += 1 - - self.conv1 = SynthesisLayer(out_channels, out_channels, w_dim=w_dim, resolution=resolution, - init_res=init_res, scale_type=scale_type, size=size, # !!! custom - conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs) - self.num_conv += 1 - - if is_last or architecture == 'skip': - self.torgb = ToRGBLayer(out_channels, img_channels, w_dim=w_dim, - conv_clamp=conv_clamp, channels_last=self.channels_last) - self.num_torgb += 1 - - if in_channels != 0 and architecture == 'resnet': - self.skip = Conv2dLayer(in_channels, out_channels, kernel_size=1, bias=False, up=2, - resample_filter=resample_filter, channels_last=self.channels_last) - -# !!! custom - # def forward(self, x, img, ws, latmask, dconst, force_fp32=False, fused_modconv=None, **layer_kwargs): - def forward(self, x, img, ws, force_fp32=False, fused_modconv=None, **layer_kwargs): - misc.assert_shape(ws, [None, self.num_conv + self.num_torgb, self.w_dim]) - w_iter = iter(ws.unbind(dim=1)) - dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32 - memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format - if fused_modconv is None: - with misc.suppress_tracer_warnings(): # this value will be treated as a constant - fused_modconv = (not self.training) and (dtype == torch.float32 or int(x.shape[0]) == 1) - - # Input. - if self.in_channels == 0: - x = self.const.to(dtype=dtype, memory_format=memory_format) - x = x.unsqueeze(0).repeat([ws.shape[0], 1, 1, 1]) -# !!! custom const size - if 'side' in self.scale_type and 'symm' in self.scale_type: # looks better - const_size = self.init_res if self.size is None else self.size - x = fix_size(x, const_size, self.scale_type) -# distortion technique from Aydao - # x += dconst - else: - # misc.assert_shape(x, [None, self.in_channels, self.resolution // 2, self.resolution // 2]) - x = x.to(dtype=dtype, memory_format=memory_format) - - # Main layers. - if self.in_channels == 0: -# !!! custom latmask - # x = self.conv1(x, None, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs) - x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs) - elif self.architecture == 'resnet': - y = self.skip(x, gain=np.sqrt(0.5)) -# !!! custom latmask - # x = self.conv0(x, latmask, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs) - # x = self.conv1(x, None, next(w_iter), fused_modconv=fused_modconv, gain=np.sqrt(0.5), **layer_kwargs) - x = self.conv0(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs) - x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, gain=np.sqrt(0.5), **layer_kwargs) - x = y.add_(x) - else: -# !!! custom latmask - # x = self.conv0(x, latmask, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs) - # x = self.conv1(x, None, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs) - x = self.conv0(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs) - x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs) - - # ToRGB. - if img is not None: -# !!! custom img size - # misc.assert_shape(img, [None, self.img_channels, self.resolution // 2, self.resolution // 2]) - img = upfirdn2d.upsample2d(img, self.resample_filter) - img = fix_size(img, self.size, scale_type=self.scale_type) - - if self.is_last or self.architecture == 'skip': - y = self.torgb(x, next(w_iter), fused_modconv=fused_modconv) - y = y.to(dtype=torch.float32, memory_format=torch.contiguous_format) - img = img.add_(y) if img is not None else y - - assert x.dtype == dtype - assert img is None or img.dtype == torch.float32 - return x, img - -#---------------------------------------------------------------------------- - -@persistence.persistent_class -class SynthesisNetwork(torch.nn.Module): - def __init__(self, - w_dim, # Intermediate latent (W) dimensionality. - img_resolution, # Output image resolution. - img_channels, # Number of color channels. -# !!! custom - init_res = [4,4], # Initial (minimal) resolution for progressive training - size = None, # Output size - scale_type = None, # scaling way: fit, centr, side, pad, padside - channel_base = 32768, # Overall multiplier for the number of channels. - channel_max = 512, # Maximum number of channels in any layer. - num_fp16_res = 0, # Use FP16 for the N highest resolutions. - verbose = False, # - **block_kwargs, # Arguments for SynthesisBlock. - ): - assert img_resolution >= 4 and img_resolution & (img_resolution - 1) == 0 - super().__init__() - self.w_dim = w_dim - self.img_resolution = img_resolution - self.res_log2 = int(np.log2(img_resolution)) - self.img_channels = img_channels - self.fmap_base = channel_base - self.block_resolutions = [2 ** i for i in range(2, self.res_log2 + 1)] - channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions} - fp16_resolution = max(2 ** (self.res_log2 + 1 - num_fp16_res), 8) - - # calculate intermediate layers sizes for arbitrary output resolution - custom_res = (img_resolution * init_res[0] // 4, img_resolution * init_res[1] // 4) - if size is None: size = custom_res - if init_res != [4,4] and verbose: - print(' .. init res', init_res, size) - keep_first_layers = 2 if scale_type == 'fit' else None - hws = hw_scales(size, custom_res, self.res_log2 - 2, keep_first_layers, verbose) - if verbose: print(hws, '..', custom_res, self.res_log2-1) - - self.num_ws = 0 - for i, res in enumerate(self.block_resolutions): - in_channels = channels_dict[res // 2] if res > 4 else 0 - out_channels = channels_dict[res] - use_fp16 = (res >= fp16_resolution) - is_last = (res == self.img_resolution) - block = SynthesisBlock(in_channels, out_channels, w_dim=w_dim, resolution=res, - init_res=init_res, scale_type=scale_type, size=hws[i], # !!! custom - img_channels=img_channels, is_last=is_last, use_fp16=use_fp16, **block_kwargs) - self.num_ws += block.num_conv - if is_last: - self.num_ws += block.num_torgb - setattr(self, f'b{res}', block) - - # def forward(self, ws, latmask, dconst, **block_kwargs): - def forward(self, ws, **block_kwargs): - block_ws = [] - with torch.autograd.profiler.record_function('split_ws'): - misc.assert_shape(ws, [None, self.num_ws, self.w_dim]) - ws = ws.to(torch.float32) - w_idx = 0 - for res in self.block_resolutions: - block = getattr(self, f'b{res}') - block_ws.append(ws.narrow(1, w_idx, block.num_conv + block.num_torgb)) - w_idx += block.num_conv - - x = img = None - for res, cur_ws in zip(self.block_resolutions, block_ws): - block = getattr(self, f'b{res}') -# !!! custom - # x, img = block(x, img, cur_ws, latmask, dconst, **block_kwargs) - x, img = block(x, img, cur_ws, **block_kwargs) - return img - -#---------------------------------------------------------------------------- - -@persistence.persistent_class -class Generator(torch.nn.Module): - def __init__(self, - z_dim, # Input latent (Z) dimensionality. - c_dim, # Conditioning label (C) dimensionality. - w_dim, # Intermediate latent (W) dimensionality. - img_resolution, # Output resolution. - img_channels, # Number of output color channels. -# !!! custom - init_res = [4,4], # Initial (minimal) resolution for progressive training - mapping_kwargs = {}, # Arguments for MappingNetwork. - synthesis_kwargs = {}, # Arguments for SynthesisNetwork. - ): - super().__init__() - self.z_dim = z_dim - self.c_dim = c_dim - self.w_dim = w_dim - self.img_resolution = img_resolution - self.init_res = init_res # !!! custom - self.img_channels = img_channels -# !!! custom - self.synthesis = SynthesisNetwork(w_dim=w_dim, img_resolution=img_resolution, init_res=init_res, img_channels=img_channels, **synthesis_kwargs) # !!! custom - self.num_ws = self.synthesis.num_ws - self.mapping = MappingNetwork(z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_ws, **mapping_kwargs) -# !!! custom - self.output_shape = [1, img_channels, img_resolution * init_res[0] // 4, img_resolution * init_res[1] // 4] - -# !!! custom - # def forward(self, z, c, latmask, dconst, truncation_psi=1, truncation_cutoff=None, **synthesis_kwargs): - def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, **synthesis_kwargs): - # def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, **synthesis_kwargs): - ws = self.mapping(z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff) - # img = self.synthesis(ws, latmask, dconst, **synthesis_kwargs) # !!! custom - img = self.synthesis(ws, **synthesis_kwargs) # !!! custom - return img diff --git a/spaces/Rifd/Sdallmodels/app.py b/spaces/Rifd/Sdallmodels/app.py deleted file mode 100644 index f397cd755aebfbccab9ee9d1a0491461aec2766d..0000000000000000000000000000000000000000 --- a/spaces/Rifd/Sdallmodels/app.py +++ /dev/null @@ -1,1280 +0,0 @@ -import gradio as gr -import os -import sys -from pathlib import Path - -models = [ - "Yntec/Reanimate", - "Yntec/Deliberate2", - "Yntec/AgarthaChadstyle", - "Yntec/526", - "Yntec/526Mix", - "Yntec/UberRealisticLegacy", - "Yntec/fennPhoto", - "Yntec/makeitdoubleplz", - "Yntec/ChiliConCarne", - "Yntec/m0nst3rfy3", - "Yntec/DucHaitenLofi", - "Yntec/DreamWorks", - "Yntec/SillySymphonies", - "Yntec/AnythingV3-768", - "Yntec/MeinaAlter", - "Yntec/YiffyMix", - "Yntec/LuckyStrike", - "Yntec/Crayon", - "Yntec/Yuzu", - "Yntec/WoopWoopAnime", - "Yntec/PotaytoPotahto", - "Yntec/Protogen", - "Yntec/Deliberate", #1K - "Yntec/DeliberateRealisticWoop", #1K - "Yntec/EstheticRetroAnime", #1K - "Yntec/DucHaiten-GoldenLife", - "Yntec/3DCuteWave", - "Yntec/GoldenEra", #1K - "Yntec/ClassicEra", #1K - "Yntec/GoodLife", #1K - "Yntec/Hassanim", #1K - "Yntec/DeliberateRemix", #1K - "Yntec/3DCute", #2K - "Yntec/SuperCuteRemix", #2K - "Yntec/Trending", #2K - "Yntec/DreamWorld", #3K - "Yntec/MGM", #3K - "Yntec/3DKX/", #3K - "Yntec/3DKXv11", #3K - "Yntec/Cute", #3K - "Yntec/DreamFulV2", #3K - "Yntec/DucHaitenDarkside4", #3K - "Yntec/Citrus", #3K - "Yntec/Classic", #3K - "Yntec/BasilRemix", #3K - "Yntec/BeautyFool", #4K - "Yntec/CyberRealistic", #4K - "Yntec/Lyriel", #4K - "Yntec/3DRendering", #4K - "Yntec/aMovieTrend", #2K - "Yntec/Dreamscape", #2K - "Yntec/elldrethSVividMix", #2K - "Yntec/elldrethSLucidMix", #2K - "Yntec/CitrineDreamMix", #2K - "Yntec/elldrethsImagination", #2K - "Yntec/ReVAnimated768", #2K - "Yntec/OpenNijiRemix", #2K - "Yntec/Dreamful3", #5K - "Yntec/BrandiMilne", #6K - "Yntec/dosmixVAE", #3K - "Yntec/aPhotographicTrend", #3K - "Yntec/BeenYou", #3K - "Yntec/level4", #3K - "Yntec/AbsoluteRemix", #7K - "Yntec/mistoonAnime2", #7K - "Yntec/DucHaiten-FANCYxFANCY",#7K - "Yntec/3Danimation", #4K - "Yntec/DucHaitenNiji", #4K - "Yntec/Darkside", #4K - "Yntec/animeTEN", #4K - "Yntec/Dreamscapes_n_Dragonfire_v2", #4K - "Yntec/Cetus", #4K - "Yntec/lamettaRemix", #5K - "Yntec/lametta", #2K - "Yntec/RadiantCinemagic", #5K - "Yntec/RadiantVibes", #3K - "Yntec/NeverEndingDream768", #3K - "Yntec/Dreamlike", #3K - "Yntec/LAMEanime", #10K - "Yntec/Dreamshaper8", #12K - "Yntec/Oiran", #6K - "Yntec/RealCartoon3D", #6K - "Yntec/animeTWO", #6K - "Yntec/lamettaNightly", #6K - "Yntec/REV", #6K - "Yntec/Thriller", #13K - "Yntec/Splash", #7K - "Yntec/OpenGenDiffusers", #7K - "Yntec/epiCRealismVAE", #8K - "Yntec/LehinaModel", #8K - "Yntec/NaughtyChildren", #9K - "Yntec/vividicAnime", #9K - "Yntec/HassanBlend12", #10 - "Yntec/HassanBlend1512VAE", #9K - "Yntec/animeSEXTILLION/", #15K - "Yntec/AbsoluteReality", #15K - "Yntec/CetusRemix", #16K - "Yntec/edgeOfRealism", #25K - "Yntec/aMovieX/", #28K - "Yntec/photoMovieXFinal", #31K - "Yntec/nuipenimix2", #34K - "Yntec/epiCPhotoGasm", #40K - "Yntec/HitenDiffusion", #2K - "Yntec/DreamShaperRemix", - "Yntec/DeliShaper", - "Yntec/dreamlike-photoreal-remix", - "Yntec/epiCVision", - "Yntec/realistic-vision-v12", - "Yntec/MangledMerge3_768", - "Yntec/OpenLexica", - "Yntec/DreamLikeRemix", - "Yntec/humu", - "Linaqruf/animagine-xl", - "nerijs/pixel-art-xl", - "Yntec/MapleSyrup", - "Yntec/WoopWoopRemix", - "Yntec/ArcticFowl", - "Yntec/iComixRemix", - "Yntec/SamaritanDoesArt", - "Yntec/samaritan3dCartoon2MVAE", - "Yntec/CartoonStyleClassic", - "Yntec/CultClassic", - "Yntec/CinemaE", - "Yntec/GalenaVAE", - "Yntec/a-ZovyaRemix", - "Yntec/a-ZovyaRPGV3VAE", - "Yntec/Infinite80s", - "Yntec/a-ZoviaRPGArtistV2VAE", - "Yntec/GameAssetsDigitalUnitsCreationKit", - "Yntec/QToriReloaded", - "Yntec/Toonify2", - "Yntec/LunarLuma", - "Yntec/Lunar", - "Yntec/Chik2", - "Yntec/photoMovieRealistic", - "Yntec/DucHaiten-StyleLikeMeVAE", - "Yntec/InsaneRealisticCVAE", - "Yntec/Noosphere_v3_CVAE", - "Yntec/RealRainbows", - "Yntec/InsaneM3U", - "Yntec/ChildrenStoriesAnime", - "Yntec/theallysMixIV-verisimilar", - "Yntec/DucHaitenAnime768", - "Yntec/RainbowClassicAnime", - "Yntec/DucHaitenClassicAnime768", - "Yntec/Luma", - "Yntec/WesternAnimation", - "Yntec/NeverExisted", - "Yntec/Rainbowsphere", - "Yntec/Ninja-Diffusers", - "Yntec/GOLDFish", - "Yntec/DreamAnything", - "Yntec/Dreamsphere", - "Yntec/Photosphere", - "Yntec/yabalMixTrue25D_v2_VAE", - "dreamlike-art/dreamlike-anime-1.0", - "Yntec/RainbowDreams", - "Yntec/rainbowpatch", - "Yntec/DucHaiten-Retro-Diffusers", - "Yntec/ElldrethsRetroMix_Diffusers", - "Yntec/sexyToons", - "Yntec/photoMovieX/", - "dreamlike-art/dreamlike-photoreal-2.0", - "dreamlike-art/dreamlike-diffusion-1.0", - "Yntec/CuteYuki2", - "Yntec/KIDSILLUSTRATIONS", - "Yntec/COOLKIDSV2", - "Yntec/Pavo-Mix-Diffusers", - "Yntec/RPG_Remix", - "Yntec/OrangeRemix", - "Yntec/PeachMix3", - "Yntec/DucHaitenAIart-beta", - "Yntec/samdoesartsUlt", - "Yntec/NovelAI", - "Yntec/NovelAIRemix", - "Yntec/Hiten", - "AIARTCHAN/AbyssHellHero", - "digiplay/VersaMix_base_diffusers", - "digiplay/OldFish_fix1.1.997_diffusers", - "digiplay/VoidnoiseCore_R0829", - "digiplay/OldFish_v1.1", - "digiplay/AI-infinity-V1-fp16", - "digiplay/wantan25D_prototype", - "digiplay/PotoPhotoRealism_v1", - "digiplay/LunarDiffusion_v1.27", - "digiplay/insaneRealistic_v1", - "digiplay/OLDFish_2348_diffusers", - "digiplay/OldFish_v1.1_diffusers_recover", - "digiplay/OldFish_v1.1mix_hello", - "digiplay/OldFish_v1.1_personal_HDmix", - "digiplay/FishMix_v1", - "DucHaiten/DucHaitenDreamWorld", - "digiplay/LemonteaMixPainterly2_v1", - "digiplay/SweetMuse_diffusers", - "digiplay/Realisian_v1", - "Hius/DreamFul-V2", - "digiplay/m3u", #263 - "digiplay/RMHF_2.5D_v2", - "digiplay/FishMix_v1.1", - "stablediffusionapi/icomix-2", - "digiplay/Remedy", - "Hemlok/QuinceMix", - "digiplay/K-main", - "digiplay/LusterMix_v1.5_safetensors", #256 - "digiplay/perfectLewdFantasy_v1.01", - "digiplay/Opiate_v2", - "digiplay/PhotoSomnia_vFinal", - "digiplay/polla_mix_2.5D", - "stablediffusionapi/all-526-animated", - "AstraliteHeart/pony-diffusion", - "stablediffusionapi/chilloutmixsf", - "Masagin/Deliberate", #235 - "DucHaiten/DucHaitenSuperCute", - "stablediffusionapi/all-526", - "theintuitiveye/HARDblend", - "stablediffusionapi/cyberrealistic", - "stablediffusionapi/cusp-of-serenity", - "SG161222/Realistic_Vision_V1.4", - "digiplay/paulEberSRealismMix_v1", - "Ojimi/anime-kawai-diffusion", - "hassanblend/hassanblend1.4", - "digiplay/zodiac_eclipse_DAY1", - "claudfuen/photorealistic-fuen-v1", - "stablediffusionapi/chillout-app-factory", - "DucHaiten/DucHaitenJourney", - "robotjung/SemiRealMix", - "Joeythemonster/anything-midjourney-v-4-1", - "prompthero/midjourney-v4-diffusion", - "prompthero/openjourney-v4", - "x67/shortjourney", - "FredZhang7/paint-journey-v2", - "digiplay/PersonaStyleCheckpoint", - "darkstorm2150/Protogen_Infinity_Official_Release", - "PeggyWang/openjourney-v2", - "darkstorm2150/Protogen_x3.4_Official_Release", - "stablediffusionapi/deliberateappfactory", #236 - "digiplay/CrossoverMix_v2", - "stablediffusionapi/spybg", - "stablediffusionapi/dreamshaper-v6", #239 - "stablediffusionapi/the-ally", - "darkstorm2150/Protogen_x5.8_Official_Release", - "coreco/seek.art_MEGA", - "digiplay/BlankCanvas_v1", #07.11 - "digiplay/OnlyAnime_v2.3", - "Korakoe/OpenNiji", - "digiplay/Photon_v1", - "digiplay/Pika_v2", - "digiplay/RealCartoon3D_F16full_v3.1", #254 - "digiplay/realidefmix_3.5VAE", - "digiplay/realmixUnrealjourney_v1", - "digiplay/SyncMix_v1.5", - "digiplay/TWingshadow_v1.2", - "digiplay/V3_by_Hans_Asian", - "digiplay/whatamix_v1", - - "digiplay/2K", #216 - "digiplay/AIGEN_v1.4_diffusers", - "digiplay/asyncsMIX_v2", - "digiplay/BrickAndMortarMix_v2.0_diffusers", #224 - "digiplay/BeautyFool_v1.2VAE_pruned", - "digiplay/breakdomainrealistic_R2333", - "digiplay/CCTV2.5d_v1", #219 - "digiplay/ChikMix_V3", #253 - "stablediffusionapi/chilledremixsazyou-r", #195 - "digiplay/CityEdge_StyleMix_v1.44", - "stablediffusionapi/dalcefopainting2", #199 - "digiplay/EdisonNilMix_v1", #07.10 - "digiplay/DiamondCoalMix_v2_pruned_diffusers", - "digiplay/DreamShaper_7", #259 - "digiplay/elegantEntropy_v1.1", #221 - "digiplay/EtherRealMix_LUX2", - "digiplay/KawaiiRealisticAnimeMix_A0.3", - "digiplay/highQualityCGMIX_v1", - "digiplay/HIMAWARI_v1", - "digiplay/Hodgepodge_v2.1", #217 - "digiplay/illustro1stEdition_illustroV1", #214 - "digiplay/Juggernaut_final", #07.11 - "digiplay/Landscape_PhotoReal_v1", - "digiplay/LuckyStrikeMix0.2Realistic", #07.10 - "digiplay/Matrix_Stellar_VAE_v1", - "digiplay/PrefixRealisticMix_v1", - "digiplay/RealEpicMajicRevolution_v1", #07.11 - "digiplay/ShampooMix_4", #252 - "digiplay/ShowmakerMix_v1", - "digiplay/SoapMix2.5D_v1", - "digiplay/ZemiHR_v2_diffusers", - - "Redamancy2299/dreambooth", - "Lykon/DreamShaper", #240 - "trysem/DreamShaper-3.3", - "HusseinHE/hussein-deliberate-1000steps", #237 - "stablediffusionapi/majicmixfantasy", - "stablediffusionapi/majicmixsombre", #247 - "wavymulder/modelshoot", - "digiplay/ChillyMix_v1", #215 - "stablediffusionapi/foto-assisted-diffusion", #197 - "wavymulder/portraitplus", - "stablediffusionapi/chilloutmix-4264", - "stablediffusionapi/product-design", #194 - "kandinsky-community/kandinsky-2-1", #251 - - "digiplay/2.5DSET_diffusers", #227 - "digiplay/2-KWI", #213 - "digiplay/alstroemeriaMix_v1", - "wavymulder/Analog-Diffusion", - "digiplay/AniRealityMix_v1", #257 - "digiplay/ARRealVX1.1", - "digiplay/BadAnime_v1", - "digiplay/BasilKorea_v2", #07.11 - "digiplay/bluePencilRealistic_v01", - "digiplay/bra_v40_diffusers", - "digiplay/Burger_Mix_semiR2Lite", #222 - "digiplay/calicomixreal_v2.0_diffusers", - "digiplay/CampurSari_Gen1", - "digiplay/cocotifacute_v1", #07.10 - "digiplay/cosfMix_v1", #223 - "digiplay/CounterMix_v2", #211 - "digiplay/CuriousMerge2.5D_v5", - "digiplay/dosmix", - "digiplay/epi_2.5Dphotogodess_diffusers", - "stablediffusionapi/droodlyrielv15", - "digiplay/fantexi_v0.7", - "digiplay/fishmix_other_v1", - "digiplay/FormCleansingMix_v1", #228 - "digiplay/FumizukiMix_v1", - "digiplay/helloworld_v3", - "digiplay/HenmixArt_v1", - "digiplay/ISOmix_v3.22", - "digiplay/JF-Cu_v1", - "digiplay/kencanmix_v2.0beta", - "wavymulder/lomo-diffusion", - "stablediffusionapi/majicmixv5", #192 - "digiplay/mecha_musume_vivid_soft", - "digiplay/MGM", - "digiplay/MiracleMixGlitter_v1", - "digiplay/MixTape_RocknRoll_v3punk_bake_fp16", - "digiplay/NextPhoto_v1", - "digiplay/Noosphere_v3", - "digiplay/nk15_diffusers", #230 - "digiplay/PeachMixsRelistic_R0", #262 - "wavymulder/timeless-diffusion", - "digiplay/WhiteDreamyHillMix_v1", #220 - "digiplay/ya3p_VAE", #258 - - "DucHaiten/DucHaitenAnime", - "DucHaiten/DucHaitenAIart", - "digiplay/BeenYouLiteL11_diffusers", - "Manseo/Colorful-v4.5-Plus", #244 - "Guizmus/SDArt_ChaosAndOrder", - "DucHaiten/DH_ClassicAnime", - "stablediffusionapi/disneypixar", - "johnslegers/epic-diffusion-v1.1", - "emilianJR/epiCRealism", - "johnslegers/epic-diffusion", - "digiplay/endlessMixRenatus_v1.1", #07.10 - "digiplay/fantasticAnime_diffusers", - "stablediffusionapi/ghostmix", - "Duskfallcrew/EpicMix_Realism", - "nitrosocke/Nitro-Diffusion", - "prompthero/openjourney", - "Guizmus/SDArt_something", - "DucHaiten/DucHaiten-StyleLikeMe", - "ddPn08/subtly", #250 - "22h/vintedois-diffusion-v0-1", - - "circulus/sd-anireal-v2.7", - "0xJustin/Dungeons-and-Diffusion", - "darkstorm2150/Protogen_v2.2_Official_Release", - "Guizmus/SDArt_AliceInDiffusionLand", - "stablediffusionapi/realistic-vision-v20-2047", - "redstonehero/RPG-v5-itr17_A10T", - - "stablediffusionapi/camelliamix25d", - "Guizmus/SDArt_cosmichorrors", - "DGSpitzer/DGSpitzer-Art-Diffusion", - "stablediffusionapi/emotion-puppeteer-v2", - "stablediffusionapi/fengjing", - "stablediffusionapi/fuwafuwamix", - "Fred99774/girlnew1", - "stablediffusionapi/majicmixrealistic", - "badmonk/nxka", - "ItsJayQz/SynthwavePunk-v2", - "zhyemmmm/ToonYou", - "stablediffusionapi/uber-realistic-merge", - "stablediffusionapi/vne732h9dh4", - "stablediffusionapi/wand-magic2", - "stablediffusionapi/waifu-journey-2", - "stablediffusionapi/zovya", - - "Guizmus/SDArt_cosmichorrors768", - "stablediffusionapi/counterfeit-v30", - "stablediffusionapi/amireal", - #"JamesFlare/pastel-mix", #"andite/pastel-mix", - "stablediffusionapi/rev-anim", - "aipicasso/picasso-diffusion-1-1", - "xiaolxl/Gf_style2", - "circulus/sd-semireal-v2.8", - "Crosstyan/BPModel", #07.11 - - "digiplay/Dusk-1", - "ogkalu/Comic-Diffusion", - "Guizmus/SDArt_ChaosAndOrder768", - "gsdf/Counterfeit-V2.0", - "dwancin/memoji", #07.11 - "nousr/robo-diffusion-2-base", - - ##"hakurei/waifu-diffusion", - "WarriorMama777/AbyssOrangeMix2", - "stablediffusionapi/abyssorangemix2nsfw", #200 - "cag/anything-v3-1", - "iZELX1/Anything-V3-X", - "xyn-ai/anything-v4.0", #"andite/anything-v4.0", - "D1b4l4p/AsianMix", - #"Fred99774/chilloutvlara", - "aipicasso/cool-japan-diffusion-2-1-2", - "stablediffusionapi/corneos-7th-heaven-m", #196 - "DGSpitzer/Cyberpunk-Anime-Diffusion", - "stablediffusionapi/dark-sushi-mix", - "joachimsallstrom/Double-Exposure-Diffusion", - "eimiss/EimisAnimeDiffusion_1.0v", - "prompthero/funko-diffusion", - "nitrosocke/Ghibli-Diffusion", - ###"iZELX1/Grapefruit", - "xiaolxl/GuoFeng3", - "stablediffusionapi/tmnd-mix", - "coder119/Vectorartz_Diffusion", #203 - - "WarriorMama777/AbyssOrangeMix", - "AIARTCHAN/7pa", - "JosephusCheung/ACertainModel", - "JosephusCheung/ACertainThing", - "JosephusCheung/ACertainty", - "AIARTCHAN/AbyssHellVer3", - "AIARTCHAN/AbyssMapleVer3", - "stablediffusionapi/abyssorangemixsfw", - "AIARTCHAN/anidosmixV2", - "stablediffusionapi/anime-model-v2", - "kubanemil/AnyLORA", - "stablediffusionapi/hc-anything-v3-vae", #231 - "mm00/anything-v3.0-light", - "stablediffusionapi/anythingelse-v4", - "stablediffusionapi/anything-v45-fixed", - "stablediffusionapi/anything-v5", - "nitrosocke/Arcane-Diffusion", - "nitrosocke/archer-diffusion", - "stablediffusionapi/architecture-tuned-model", - "WarriorMama777/BloodOrangeMix", - "wavymulder/collage-diffusion", - "stablediffusionapi/camelliamixline", - "digiplay/chrysanthemumMix_v1", - "digiplay/CiderMix_ciderR", #260 - "Johnhex/Clam", #243 - "stablediffusionapi/cosmic-babes", - "digiplay/CoffeeDonut_v1", - "stablediffusionapi/dark-sushi-25d", - "digiplay/Defacta_v1_diffusers", #226 - ## "WarriorMama777/EerieOrangeMix", - "digiplay/DuelAnimeMix_v1", #225 - "Envvi/Inkpunk-Diffusion", - "digiplay/kotosmix_diffusers", #229 - "stablediffusionapi/meinaalter", - "Nacholmo/meinamixv7-diffusers", - "stablediffusionapi/meinapastel", - "AIARTCHAN/MIX-Pro-V4", - "stablediffusionapi/shirataki-mix", #191 - "NoCrypt/SomethingV2_2", - "NoCrypt/SomethingV2", - "badmonk/sxzumi", - ## "stablediffusionapi/three-delicacy", - ## "stablediffusionapi/three-delicacy-wonto", - "etherealxx/systemy-csrmodel-cutesexyrobutts", #"andite/cutesexyrobutts-diffusion", - "sd-dreambooth-library/true-guweiz-style", # "andite/guweiz-diffusion", - "stablediffusionapi/vector-art", #198 - "digiplay/xxMix_4", - ###"mio/hiten", #"andite/hiten-diffusion", - ### "andite/mashuu-diffusion", - ### "andite/mignon-diffusion", - ### "andite/mikapikazo-diffusion", - ### "andite/piromizu-diffusion", - "digiplay/Zevinemix_v1.0/", - - "digiplay/AnaMix_v2", #07.11 - "stablediffusionapi/animetestmodelv3", - "yulet1de/anything", #232 - "hakurei/artstation-diffusion", #07.11 - "Fictiverse/Stable_Diffusion_BalloonArt_Model", - "stablediffusionapi/bg-dream-irl", - "stablediffusionapi/bg-dream-model-b", #193 - "Rardilit/Ciffusion_v0.1", - "circulus/sd-anireal-2d-v2", - "circulus/sd-photoreal-v2.7", - "circulus/sd-photoreal-photo-v2", - "circulus/sd-anireal-2.5d-v2", - "circulus/sd-anireal-v2.5", - "circulus/sd-photoreal-semi-v2", - "circulus/sd-photoreal-real-v2", - "circulus/sd-photoreal-v2.5", - "circulus/sd-anireal-3d-v2", - "circulus/sd-anireal-v2.8", - "nitrosocke/classic-anim-diffusion", - "Conflictx/Complex-Lineart", #245 - "sayakpaul/da-vinci-sd-pokemon", - "nitrosocke/elden-ring-diffusion", - "digiplay/EtherBluMix_1", #07.11 - "digiplay/fantasticmix_v40_test", #261 - "theintuitiveye/FantasyMix", - "Fictiverse/Stable_Diffusion_FluidArt_Model", - "nitrosocke/Future-Diffusion", - "ItsJayQz/GTA5_Artwork_Diffusion", #205 - "digiplay/hellopure_v2.23", - "TheLastBen/hrrzg-style-768px", #246 - "nevernotsean/IllustratedPaperMini", #242 - "dallinmackay/JWST-Deep-Space-diffusion", - "prompthero/linkedin-diffusion", - "mann-e/mann-e_4_rev-0-1", #210 - "ItsJayQz/Marvel_WhatIf_Diffusion", #206 - "yuanbit/max-15-1e-6-1500", - "MyneFactory/MF-Base", #248 - "Fictiverse/Stable_Diffusion_Microscopic_model", #249 - "nitrosocke/mo-di-diffusion", - "luongphamit/NeverEnding-Dream2", #241 - "lambdalabs/sd-naruto-diffusers", #201 - "Vernon-2/output_test", - "Fictiverse/Stable_Diffusion_PaperCut_Model", - "bsuutari/path_to_saved_model", - "bsuutari/path_to_saved_model_rafa", - "digiplay/PlanetBumix_v1", - "lambdalabs/sd-pokemon-diffusers", #202 - "prompthero/poolsuite-diffusion", - "digiplay/RealismEngine_v1", - "nitrosocke/redshift-diffusion", - "nitrosocke/redshift-diffusion-768", - "nousr/robo-diffusion", - "digiplay/SDVN1-Real_v1", #255 - "nitrosocke/spider-verse-diffusion", - #"runwayml/stable-diffusion-v1-5", - "nicky007/stable-diffusion-logo-fine-tuned", - "stablediffusionapi/three-delicacy", #233 - "stablediffusionapi/three-delicacy-wonto", #234 - "naclbit/trinart_stable_diffusion_v2", - "dallinmackay/Tron-Legacy-diffusion", - "digiplay/unstableDiffusersYamerMIX_v3", - "dallinmackay/Van-Gogh-diffusion", - "ItsJayQz/Valorant_Diffusion", - "Fictiverse/Stable_Diffusion_VoxelArt_Model", #204 - "wavymulder/wavyfusion", - "Yntec/HassanRemix", - "Yntec/Reddit", - "Yntec/CinematicReality", - "Yntec/3DKX2", - "CompVis/stable-diffusion-v1-4", #530 - "CompVis/stable-diffusion-v1-3", #207 - "CompVis/stable-diffusion-v1-2", #208 - "CompVis/stable-diffusion-v1-1", #209 -] -current_model = models[0] - -text_gen1=gr.Interface.load("spaces/daspartho/prompt-extend") -#text_gen1=gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link") - -models2=[ - gr.Interface.load(f"models/{models[0]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[1]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[2]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[3]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[4]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[5]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[6]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[7]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[8]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[9]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[10]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[11]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[12]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[13]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[14]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[15]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[16]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[17]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[18]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[19]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[20]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[21]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[22]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[23]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[24]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[25]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[26]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[27]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[28]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[29]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[30]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[31]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[32]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[33]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[34]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[35]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[36]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[37]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[38]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[39]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[40]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[41]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[42]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[43]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[44]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[45]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[46]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[47]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[48]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[49]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[50]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[51]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[52]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[53]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[54]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[55]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[56]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[57]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[58]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[59]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[60]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[61]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[62]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[63]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[64]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[65]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[66]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[67]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[68]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[69]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[70]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[71]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[72]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[73]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[74]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[75]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[76]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[77]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[78]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[79]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[80]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[81]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[82]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[83]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[84]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[85]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[86]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[87]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[88]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[89]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[90]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[91]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[92]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[93]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[94]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[95]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[96]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[97]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[98]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[99]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[100]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[101]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[102]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[103]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[104]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[105]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[106]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[107]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[108]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[109]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[110]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[111]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[112]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[113]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[114]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[115]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[116]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[117]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[118]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[119]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[120]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[121]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[122]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[123]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[124]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[125]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[126]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[127]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[128]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[129]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[130]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[131]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[132]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[133]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[134]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[135]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[136]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[137]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[138]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[139]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[140]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[141]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[142]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[143]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[144]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[145]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[146]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[147]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[148]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[149]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[150]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[151]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[152]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[153]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[154]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[155]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[156]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[157]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[158]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[159]}",live=True,preprocess=False), - - gr.Interface.load(f"models/{models[160]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[161]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[162]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[163]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[164]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[165]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[166]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[167]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[168]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[169]}",live=True,preprocess=False), - - gr.Interface.load(f"models/{models[170]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[171]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[172]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[173]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[174]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[175]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[176]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[177]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[178]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[179]}",live=True,preprocess=False), - - gr.Interface.load(f"models/{models[180]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[181]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[182]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[183]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[184]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[185]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[186]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[187]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[188]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[189]}",live=True,preprocess=False), - - gr.Interface.load(f"models/{models[190]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[191]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[192]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[193]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[194]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[195]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[196]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[197]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[198]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[199]}",live=True,preprocess=False), - - gr.Interface.load(f"models/{models[200]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[201]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[202]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[203]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[204]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[205]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[206]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[207]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[208]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[209]}",live=True,preprocess=False), - - gr.Interface.load(f"models/{models[210]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[211]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[212]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[213]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[214]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[215]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[216]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[217]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[218]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[219]}",live=True,preprocess=False), - - gr.Interface.load(f"models/{models[220]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[221]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[222]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[223]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[224]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[225]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[226]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[227]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[228]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[229]}",live=True,preprocess=False), - - gr.Interface.load(f"models/{models[230]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[231]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[232]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[233]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[234]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[235]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[236]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[237]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[238]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[239]}",live=True,preprocess=False), - - gr.Interface.load(f"models/{models[240]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[241]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[242]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[243]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[244]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[245]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[246]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[247]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[248]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[249]}",live=True,preprocess=False), - - gr.Interface.load(f"models/{models[250]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[251]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[252]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[253]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[254]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[255]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[256]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[257]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[258]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[259]}",live=True,preprocess=False), - - gr.Interface.load(f"models/{models[260]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[261]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[262]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[263]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[264]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[265]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[266]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[267]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[268]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[269]}",live=True,preprocess=False), - - gr.Interface.load(f"models/{models[270]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[271]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[272]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[273]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[274]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[275]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[276]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[277]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[278]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[279]}",live=True,preprocess=False), - - gr.Interface.load(f"models/{models[280]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[281]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[282]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[283]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[284]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[285]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[286]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[287]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[288]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[289]}",live=True,preprocess=False), - - gr.Interface.load(f"models/{models[290]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[291]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[292]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[293]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[294]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[295]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[296]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[297]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[298]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[299]}",live=True,preprocess=False), - - gr.Interface.load(f"models/{models[300]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[301]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[302]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[303]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[304]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[305]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[306]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[307]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[308]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[309]}",live=True,preprocess=False), - - gr.Interface.load(f"models/{models[310]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[311]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[312]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[313]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[314]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[315]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[316]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[317]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[318]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[319]}",live=True,preprocess=False), - - gr.Interface.load(f"models/{models[320]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[321]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[322]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[323]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[324]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[325]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[326]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[327]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[328]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[329]}",live=True,preprocess=False), - - gr.Interface.load(f"models/{models[330]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[331]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[332]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[333]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[334]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[335]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[336]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[337]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[338]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[339]}",live=True,preprocess=False), - - gr.Interface.load(f"models/{models[340]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[341]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[342]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[343]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[344]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[345]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[346]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[347]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[348]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[349]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[350]}",live=True,preprocess=False), - - gr.Interface.load(f"models/{models[351]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[352]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[353]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[354]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[355]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[356]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[357]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[358]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[359]}",live=True,preprocess=False), - - gr.Interface.load(f"models/{models[360]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[361]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[362]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[363]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[364]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[365]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[366]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[367]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[368]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[369]}",live=True,preprocess=False), - - gr.Interface.load(f"models/{models[370]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[371]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[372]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[373]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[374]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[375]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[376]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[377]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[378]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[379]}",live=True,preprocess=False), - - gr.Interface.load(f"models/{models[380]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[381]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[382]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[383]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[384]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[385]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[386]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[387]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[388]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[389]}",live=True,preprocess=False), - - gr.Interface.load(f"models/{models[390]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[391]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[392]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[393]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[394]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[395]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[396]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[397]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[398]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[399]}",live=True,preprocess=False), - - gr.Interface.load(f"models/{models[400]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[401]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[402]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[403]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[404]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[405]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[406]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[407]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[408]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[409]}",live=True,preprocess=False), - - gr.Interface.load(f"models/{models[410]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[411]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[412]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[413]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[414]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[415]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[416]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[417]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[418]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[419]}",live=True,preprocess=False), - - gr.Interface.load(f"models/{models[420]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[421]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[422]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[423]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[424]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[425]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[426]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[427]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[428]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[429]}",live=True,preprocess=False), - - gr.Interface.load(f"models/{models[430]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[431]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[432]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[433]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[434]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[435]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[436]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[437]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[438]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[439]}",live=True,preprocess=False), - - gr.Interface.load(f"models/{models[440]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[441]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[442]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[443]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[444]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[445]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[446]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[447]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[448]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[449]}",live=True,preprocess=False), - - gr.Interface.load(f"models/{models[450]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[451]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[452]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[453]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[454]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[455]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[456]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[457]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[458]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[459]}",live=True,preprocess=False), - - gr.Interface.load(f"models/{models[460]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[461]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[462]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[463]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[464]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[465]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[466]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[467]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[469]}",live=True,preprocess=False), - - gr.Interface.load(f"models/{models[470]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[471]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[472]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[473]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[474]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[475]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[476]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[477]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[478]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[479]}",live=True,preprocess=False), - - gr.Interface.load(f"models/{models[480]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[481]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[482]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[483]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[484]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[485]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[486]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[487]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[488]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[489]}",live=True,preprocess=False), - - gr.Interface.load(f"models/{models[490]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[491]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[492]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[493]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[494]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[495]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[496]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[497]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[498]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[499]}",live=True,preprocess=False), - - gr.Interface.load(f"models/{models[500]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[501]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[502]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[503]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[504]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[505]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[506]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[507]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[508]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[509]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[510]}",live=True,preprocess=False), - - gr.Interface.load(f"models/{models[511]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[512]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[513]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[514]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[515]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[516]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[517]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[518]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[519]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[520]}",live=True,preprocess=False), - - gr.Interface.load(f"models/{models[521]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[522]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[523]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[524]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[525]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[526]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[527]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[528]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[529]}",live=True,preprocess=False), - #Because there's a model 0, to know the number of models you add 1 to {models[n]} - -] - -def text_it1(inputs,text_gen1=text_gen1): - go_t1=text_gen1(inputs) - return(go_t1) - -def set_model(current_model): - current_model = models[current_model] - return gr.update(label=(f"{current_model}")) - - -def send_it1(inputs, model_choice): #negative_prompt, - proc1=models2[model_choice] - output1=proc1(inputs) - #negative_prompt=negative_prompt - return(output1) -css="""""" - - -with gr.Blocks(css=css) as myface: - gr.HTML(""" -
-
- - -

Toy World

-
- -
-

-

Blitz Diffusion - 530 Stable Diffusion models, but why? For your enjoyment!

-
2023.11.5 NEW! Toys to play with: The models AgarthaChadstyle, Deliberate2 and Reanimate have been added!
-
2023.11.3 NEW! The models m0nst3rfy3, ChiliConCarne, makeitdoubleplz, fennPhoto, UberRealisticLegacy, 526Mix and 526 have been added!
-

-

If a model is already loaded each new image takes less than 20 seconds to generate!

-

-
If you get ERROR it's because that model ran out of memory, try again, or wait a minute and try again, have fun!

-
- """) - with gr.Row(): - with gr.Column(scale=100): - #Model selection dropdown - model_name1 = gr.Dropdown(label="Select Model", choices=[m for m in models], type="index", value=current_model, interactive=True) - with gr.Row(): - with gr.Column(scale=100): - magic1=gr.Textbox(label="Your Prompt", lines=4) #Positive - #with gr.Column(scale=100): - #negative_prompt=gr.Textbox(label="Negative Prompt", lines=1) - gr.HTML("""""") - run=gr.Button("Generate Image") - with gr.Row(): - with gr.Column(style="width=800px"): - output1=gr.Image(label=(f"{current_model}")) - - - with gr.Row(): - with gr.Column(scale=50): - input_text=gr.Textbox(label="Use this box to extend an idea automagically, by typing some words and clicking Extend Idea",lines=2) - see_prompts=gr.Button("Extend Idea -> overwrite the contents of the `Your Prompt´ box above") - use_short=gr.Button("Copy the contents of this box to the `Your Prompt´ box above") - def short_prompt(inputs): - return(inputs) - - model_name1.change(set_model,inputs=model_name1,outputs=[output1]) - - run.click(send_it1, inputs=[magic1, model_name1], outputs=[output1]) - - use_short.click(short_prompt,inputs=[input_text],outputs=magic1) - - see_prompts.click(text_it1,inputs=[input_text],outputs=magic1) - -myface.queue(concurrency_count=200) -myface.launch(inline=True, show_api=False, max_threads=400) \ No newline at end of file diff --git a/spaces/RinInori/Vicuna_ChatBot/README.md b/spaces/RinInori/Vicuna_ChatBot/README.md deleted file mode 100644 index 99683e542aa5e9ccf5034932ece7ba7e5142437d..0000000000000000000000000000000000000000 --- a/spaces/RinInori/Vicuna_ChatBot/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Vicuna ChatBot -emoji: 😻 -colorFrom: green -colorTo: green -sdk: gradio -sdk_version: 3.29.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/Ritori/play_with_baby_llama2/run.c b/spaces/Ritori/play_with_baby_llama2/run.c deleted file mode 100644 index 15352aebab3ff890f776d1ae9ab6c3ab2253b373..0000000000000000000000000000000000000000 --- a/spaces/Ritori/play_with_baby_llama2/run.c +++ /dev/null @@ -1,490 +0,0 @@ -/* -Inference for Llama-2 Transformer model in pure C. - -Example compile: (see README for more details) -$ gcc -O3 -o run run.c -lm - -Then run with: -$ ./run -*/ - -#include -#include -#include -#include -#include -#include - -// ---------------------------------------------------------------------------- -// Transformer and RunState structs, and related memory management - -typedef struct { - int dim; // transformer dimension - int hidden_dim; // for ffn layers - int n_layers; // number of layers - int n_heads; // number of query heads - int n_kv_heads; // number of key/value heads (can be < query heads because of multiquery) - int vocab_size; // vocabulary size, usually 256 (byte-level) - int seq_len; // max sequence length -} Config; - -typedef struct { - // token embedding table - float* token_embedding_table; // (vocab_size, dim) - // weights for rmsnorms - float* rms_att_weight; // (layer, dim) rmsnorm weights - float* rms_ffn_weight; // (layer, dim) - // weights for matmuls - float* wq; // (layer, dim, dim) - float* wk; // (layer, dim, dim) - float* wv; // (layer, dim, dim) - float* wo; // (layer, dim, dim) - // weights for ffn - float* w1; // (layer, hidden_dim, dim) - float* w2; // (layer, dim, hidden_dim) - float* w3; // (layer, hidden_dim, dim) - // final rmsnorm - float* rms_final_weight; // (dim,) - // freq_cis for RoPE relatively positional embeddings - float* freq_cis_real; // (seq_len, dim/2) - float* freq_cis_imag; // (seq_len, dim/2) -} TransformerWeights; - -typedef struct { - // current wave of activations - float *x; // activation at current time stamp (dim,) - float *xb; // same, but inside a residual branch (dim,) - float *xb2; // an additional buffer just for convenience (dim,) - float *hb; // buffer for hidden dimension in the ffn (hidden_dim,) - float *hb2; // buffer for hidden dimension in the ffn (hidden_dim,) - float *q; // query (dim,) - float *k; // key (dim,) - float *v; // value (dim,) - float *att; // buffer for scores/attention values (seq_len,) - float *logits; // output logits - // kv cache - float* key_cache; // (layer, seq_len, dim) - float* value_cache; // (layer, seq_len, dim) -} RunState; - -void malloc_run_state(RunState* s, Config* p) { - // we calloc instead of malloc to keep valgrind happy - s->x = calloc(p->dim, sizeof(float)); - s->xb = calloc(p->dim, sizeof(float)); - s->xb2 = calloc(p->dim, sizeof(float)); - s->hb = calloc(p->hidden_dim, sizeof(float)); - s->hb2 = calloc(p->hidden_dim, sizeof(float)); - s->q = calloc(p->dim, sizeof(float)); - s->k = calloc(p->dim, sizeof(float)); - s->v = calloc(p->dim, sizeof(float)); - s->att = calloc(p->seq_len, sizeof(float)); - s->logits = calloc(p->vocab_size, sizeof(float)); - s->key_cache = calloc(p->n_layers * p->seq_len * p->dim, sizeof(float)); - s->value_cache = calloc(p->n_layers * p->seq_len * p->dim, sizeof(float)); - // ensure all mallocs went fine - if (!s->x || !s->xb || !s->xb2 || !s->hb || !s->hb2 || !s->q - || !s->k || !s->v || !s->att || !s->logits || !s->key_cache - || !s->value_cache) { - printf("malloc failed!\n"); - exit(1); - } -} - -void free_run_state(RunState* s) { - free(s->x); - free(s->xb); - free(s->xb2); - free(s->hb); - free(s->hb2); - free(s->q); - free(s->k); - free(s->v); - free(s->att); - free(s->logits); - free(s->key_cache); - free(s->value_cache); -} - -void malloc_weights(TransformerWeights* w, Config* p) { - // we calloc instead of malloc to keep valgrind happy - w->token_embedding_table = calloc(p->vocab_size * p->dim, sizeof(float)); - w->rms_att_weight = calloc(p->n_layers * p->dim, sizeof(float)); - w->rms_ffn_weight = calloc(p->n_layers * p->dim, sizeof(float)); - w->wq = calloc(p->n_layers * p->dim * p->dim, sizeof(float)); - w->wk = calloc(p->n_layers * p->dim * p->dim, sizeof(float)); - w->wv = calloc(p->n_layers * p->dim * p->dim, sizeof(float)); - w->wo = calloc(p->n_layers * p->dim * p->dim, sizeof(float)); - w->w1 = calloc(p->n_layers * p->hidden_dim * p->dim, sizeof(float)); - w->w2 = calloc(p->n_layers * p->dim * p->hidden_dim, sizeof(float)); - w->w3 = calloc(p->n_layers * p->hidden_dim * p->dim, sizeof(float)); - w->rms_final_weight = calloc(p->dim, sizeof(float)); - w->freq_cis_real = calloc(p->seq_len * p->dim / 2, sizeof(float)); - w->freq_cis_imag = calloc(p->seq_len * p->dim / 2, sizeof(float)); - // ensure all mallocs went fine - if (!w->token_embedding_table || !w->rms_att_weight || !w->rms_ffn_weight - || !w->wq || !w->wk || !w->wv || !w->wo || !w->w1 || !w->w2 || !w->w3 || - !w->rms_final_weight || !w->freq_cis_real || !w->freq_cis_imag) { - printf("malloc failed!\n"); - exit(1); - } -} - -void free_weights(TransformerWeights* w) { - free(w->token_embedding_table); - free(w->rms_att_weight); - free(w->rms_ffn_weight); - free(w->wq); - free(w->wk); - free(w->wv); - free(w->wo); - free(w->w1); - free(w->w2); - free(w->w3); - free(w->rms_final_weight); - free(w->freq_cis_real); - free(w->freq_cis_imag); -} - -// ---------------------------------------------------------------------------- -// initialization: read from checkpoint - -int checkpoint_init_weights(TransformerWeights *w, Config* p, FILE* f) { - if (fread(w->token_embedding_table, sizeof(float), p->vocab_size * p->dim, f) != p->vocab_size * p->dim) return 1; - if (fread(w->rms_att_weight, sizeof(float), p->n_layers * p->dim, f) != p->n_layers * p->dim) return 1; - if (fread(w->wq, sizeof(float), p->n_layers * p->dim * p->dim, f) != p->n_layers * p->dim * p->dim) return 1; - if (fread(w->wk, sizeof(float), p->n_layers * p->dim * p->dim, f) != p->n_layers * p->dim * p->dim) return 1; - if (fread(w->wv, sizeof(float), p->n_layers * p->dim * p->dim, f) != p->n_layers * p->dim * p->dim) return 1; - if (fread(w->wo, sizeof(float), p->n_layers * p->dim * p->dim, f) != p->n_layers * p->dim * p->dim) return 1; - if (fread(w->rms_ffn_weight, sizeof(float), p->n_layers * p->dim, f) != p->n_layers * p->dim) return 1; - if (fread(w->w1, sizeof(float), p->n_layers * p->dim * p->hidden_dim, f) != p->n_layers * p->dim * p->hidden_dim) return 1; - if (fread(w->w2, sizeof(float), p->n_layers * p->hidden_dim * p->dim, f) != p->n_layers * p->hidden_dim * p->dim) return 1; - if (fread(w->w3, sizeof(float), p->n_layers * p->dim * p->hidden_dim, f) != p->n_layers * p->dim * p->hidden_dim) return 1; - if (fread(w->rms_final_weight, sizeof(float), p->dim, f) != p->dim) return 1; - int head_size = p->dim / p->n_heads; - if (fread(w->freq_cis_real, sizeof(float), p->seq_len * head_size / 2, f) != p->seq_len * head_size / 2) return 1; - if (fread(w->freq_cis_imag, sizeof(float), p->seq_len * head_size / 2, f) != p->seq_len * head_size / 2) return 1; - return 0; -} - - -// ---------------------------------------------------------------------------- -// neural net blocks - -void accum(float *a, float *b, int size) { - for (int i = 0; i < size; i++) { - a[i] += b[i]; - } -} - -void rmsnorm(float* o, float* x, float* weight, int size) { - // calculate sum of squares - float ss = 0.0f; - for (int j = 0; j < size; j++) { - ss += x[j] * x[j]; - } - ss /= size; - ss += 1e-5f; - ss = 1.0f / sqrt(ss); - // normalize and scale - for (int j = 0; j < size; j++) { - o[j] = weight[j] * (ss * x[j]); - } -} - -void softmax(float* x, int size) { - // find max value (for numerical stability) - float max_val = x[0]; - for (int i = 1; i < size; i++) { - if (x[i] > max_val) { - max_val = x[i]; - } - } - // exp and sum - float sum = 0.0f; - for (int i = 0; i < size; i++) { - x[i] = exp(x[i] - max_val); - sum += x[i]; - } - // normalize - for (int i = 0; i < size; i++) { - x[i] /= sum; - } -} - -void matmul(float* xout, float* x, float* w, int n, int d) { - // W (d,n) @ x (n,) -> xout (d,) - #pragma omp parallel for - for (int i = 0; i < d; i++) { - float val = 0.0f; - for (int j = 0; j < n; j++) { - val += w[i * n + j] * x[j]; - } - xout[i] = val; - } -} - -void transformer(int token, int pos, Config* p, RunState* s, TransformerWeights* w) { - - // a few convenience variables - float *x = s->x; - int dim = p->dim; - int hidden_dim = p->hidden_dim; - int head_size = dim / p->n_heads; - - // copy the token embedding into x - float* content_row = &(w->token_embedding_table[token * dim]); - memcpy(x, content_row, dim*sizeof(*x)); - - // pluck out the "pos" row of freq_cis_real and freq_cis_imag - float* freq_cis_real_row = w->freq_cis_real + pos * head_size / 2; - float* freq_cis_imag_row = w->freq_cis_imag + pos * head_size / 2; - - // forward all the layers - for(int l = 0; l < p->n_layers; l++) { - - // attention rmsnorm - rmsnorm(s->xb, x, w->rms_att_weight + l*dim, dim); - - // qkv matmuls for this position - matmul(s->q, s->xb, w->wq + l*dim*dim, dim, dim); - matmul(s->k, s->xb, w->wk + l*dim*dim, dim, dim); - matmul(s->v, s->xb, w->wv + l*dim*dim, dim, dim); - - // apply RoPE rotation to the q and k vectors for each head - for (int h = 0; h < p->n_heads; h++) { - // get the q and k vectors for this head - float* q = s->q + h * head_size; - float* k = s->k + h * head_size; - // rotate q and k by the freq_cis_real and freq_cis_imag - for (int i = 0; i < head_size; i+=2) { - float q0 = q[i]; - float q1 = q[i+1]; - float k0 = k[i]; - float k1 = k[i+1]; - float fcr = freq_cis_real_row[i/2]; - float fci = freq_cis_imag_row[i/2]; - q[i] = q0 * fcr - q1 * fci; - q[i+1] = q0 * fci + q1 * fcr; - k[i] = k0 * fcr - k1 * fci; - k[i+1] = k0 * fci + k1 * fcr; - } - } - - // save key,value at this time step (pos) to our kv cache - int loff = l * p->seq_len * dim; // kv cache layer offset for convenience - float* key_cache_row = s->key_cache + loff + pos * dim; - float* value_cache_row = s->value_cache + loff + pos * dim; - memcpy(key_cache_row, s->k, dim*sizeof(*key_cache_row)); - memcpy(value_cache_row, s->v, dim*sizeof(*value_cache_row)); - - // multihead attention. iterate over all heads - for (int h = 0; h < p->n_heads; h++) { - // get the query vector for this head - float* q = s->q + h * head_size; - // iterate over all timesteps, including the current one - for (int t = 0; t <= pos; t++) { - // get the key vector for this head and at this timestep - float* k = s->key_cache + loff + t * dim + h * head_size; - // calculate the attention score as the dot product of q and k - float score = 0.0f; - for (int i = 0; i < head_size; i++) { - score += q[i] * k[i]; - } - score /= sqrtf(head_size); - // save the score to the attention buffer - s->att[t] = score; - } - - // softmax the scores to get attention weights, from 0..pos inclusively - softmax(s->att, pos + 1); - - // weighted sum of the values, store back into xb - for (int i = 0; i < head_size; i++) { - float val = 0.0f; - for (int t = 0; t <= pos; t++) { - val += s->att[t] * s->value_cache[loff + t * dim + h * head_size + i]; // note bad locality - } - s->xb[h * head_size + i] = val; - } - } - - // final matmul to get the output of the attention - matmul(s->xb2, s->xb, w->wo + l*dim*dim, dim, dim); - - // residual connection back into x - accum(x, s->xb2, dim); - - // ffn rmsnorm - rmsnorm(s->xb, x, w->rms_ffn_weight + l*dim, dim); - - // Now for FFN in PyTorch we have: self.w2(F.silu(self.w1(x)) * self.w3(x)) - // first calculate self.w1(x) and self.w3(x) - matmul(s->hb, s->xb, w->w1 + l*dim*hidden_dim, dim, hidden_dim); - matmul(s->hb2, s->xb, w->w3 + l*dim*hidden_dim, dim, hidden_dim); - - // F.silu; silu(x)=x*σ(x),where σ(x) is the logistic sigmoid - for (int i = 0; i < hidden_dim; i++) { - s->hb[i] = s->hb[i] * (1.0f / (1.0f + expf(-s->hb[i]))); - } - - // elementwise multiply with w3(x) - for (int i = 0; i < hidden_dim; i++) { - s->hb[i] = s->hb[i] * s->hb2[i]; - } - - // final matmul to get the output of the ffn - matmul(s->xb, s->hb, w->w2 + l*dim*hidden_dim, hidden_dim, dim); - - // residual connection - accum(x, s->xb, dim); - } - - // final rmsnorm - rmsnorm(x, x, w->rms_final_weight, dim); - - // classifier into logits - matmul(s->logits, x, w->token_embedding_table, p->dim, p->vocab_size); -} - -int sample(float* probabilities, int n) { - // sample index from probabilities, they must sum to 1 - float r = (float)rand() / (float)RAND_MAX; - float cdf = 0.0f; - for (int i = 0; i < n; i++) { - cdf += probabilities[i]; - if (r < cdf) { - return i; - } - } - return n - 1; // in case of rounding errors -} - -int argmax(float* v, int n) { - // return argmax of v in elements 0..n - int max_i = 0; - float max_p = v[0]; - for (int i = 1; i < n; i++) { - if (v[i] > max_p) { - max_i = i; - max_p = v[i]; - } - } - return max_i; -} - -// ---------------------------------------------------------------------------- - -long time_in_ms() { - struct timeval time; - gettimeofday(&time, NULL); - return time.tv_sec * 1000 + time.tv_usec / 1000; -} - -int main(int argc, char *argv[]) { - - // poor man's C argparse - char *checkpoint = NULL; - float temperature = 0.9f; - // 'checkpoint' is necessary arg - if (argc < 2) { - printf("Usage: %s [temperature] [seed]\n", argv[0]); - return 1; - } - checkpoint = argv[1]; - // temperature is optional - if (argc >= 3) { - temperature = atof(argv[2]); - } - // seed is optional - if (argc >= 4) { - unsigned int seed = atoi(argv[3]); - srand(seed); - } else { - time_t current_time; - time(¤t_time); - srand((unsigned int)current_time); - } - - // read in the model.bin file - Config config; - TransformerWeights weights; - { - FILE *file = fopen(checkpoint, "rb"); - if (!file) { - printf("Unable to open the checkpoint file %s!\n", checkpoint); - return 1; - } - // read in the config header - if(fread(&config, sizeof(Config), 1, file) != 1) { return 1; } - // read in the Transformer weights - malloc_weights(&weights, &config); - if(checkpoint_init_weights(&weights, &config, file)) { return 1; } - fclose(file); - } - - // read in the tokenizer.bin file - char** vocab = (char**)malloc(config.vocab_size * sizeof(char*)); - { - FILE *file = fopen("tokenizer.bin", "rb"); - if (!file) { - printf("Unable to open the tokenizer file tokenizer.bin! Run " - "python tokenizer.py to convert tokenizer.model -> tokenizer.bin\n"); - return 1; - } - int len; - for (int i = 0; i < config.vocab_size; i++) { - if(fread(&len, sizeof(int), 1, file) != 1) { return 1; } - vocab[i] = (char *)malloc(len + 1); - if(fread(vocab[i], len, 1, file) != 1) { return 1; } - vocab[i][len] = '\0'; // add the string terminating token - } - fclose(file); - } - - // create and init the application RunState - RunState state; - malloc_run_state(&state, &config); - - // the current position we are in - long start = time_in_ms(); - - int next; - int token = 1; // 1 = BOS token in Llama-2 sentencepiece - int pos = 0; - while (pos < config.seq_len) { - - // forward the transformer to get logits for the next token - transformer(token, pos, &config, &state, &weights); - - // sample the next token - if(temperature == 0.0f) { - // greedy argmax sampling - next = argmax(state.logits, config.vocab_size); - } else { - // apply the temperature to the logits - for (int q=0; q` - between the backward pass(es) and :meth:`step`. - If :meth:`unscale_` is not called explicitly, gradients will be unscaled automatically during :meth:`step`. - Simple example, using :meth:`unscale_` to enable clipping of unscaled gradients:: - ... - scaler.scale(loss).backward() - scaler.unscale_(optimizer) - torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm) - scaler.step(optimizer) - scaler.update() - Args: - optimizer (torch.optim.Optimizer): Optimizer that owns the gradients to be unscaled. - .. warning:: - :meth:`unscale_` should only be called once per optimizer per :meth:`step` call, - and only after all gradients for that optimizer's assigned parameters have been accumulated. - Calling :meth:`unscale_` twice for a given optimizer between each :meth:`step` triggers a RuntimeError. - .. warning:: - :meth:`unscale_` may unscale sparse gradients out of place, replacing the ``.grad`` attribute. - """ - if not self._enabled: - return - - self._check_scale_growth_tracker("unscale_") - - optimizer_state = self._per_optimizer_states[id(optimizer)] - - if optimizer_state["stage"] is OptState.UNSCALED: # pylint: disable=no-else-raise - raise RuntimeError( - "unscale_() has already been called on this optimizer since the last update()." - ) - elif optimizer_state["stage"] is OptState.STEPPED: - raise RuntimeError("unscale_() is being called after step().") - - # FP32 division can be imprecise for certain compile options, so we carry out the reciprocal in FP64. - assert self._scale is not None - inv_scale = self._scale.to("cpu").double().reciprocal().float().to(self._scale.device) - found_inf = torch.full( - (1,), 0.0, dtype=torch.float32, device=self._scale.device - ) - - optimizer_state["found_inf_per_device"] = self._unscale_grads_( - optimizer, inv_scale, found_inf, False - ) - optimizer_state["stage"] = OptState.UNSCALED - -def update(self, new_scale=None): - """ - Updates the scale factor. - If any optimizer steps were skipped the scale is multiplied by ``backoff_factor`` - to reduce it. If ``growth_interval`` unskipped iterations occurred consecutively, - the scale is multiplied by ``growth_factor`` to increase it. - Passing ``new_scale`` sets the new scale value manually. (``new_scale`` is not - used directly, it's used to fill GradScaler's internal scale tensor. So if - ``new_scale`` was a tensor, later in-place changes to that tensor will not further - affect the scale GradScaler uses internally.) - Args: - new_scale (float or :class:`torch.FloatTensor`, optional, default=None): New scale factor. - .. warning:: - :meth:`update` should only be called at the end of the iteration, after ``scaler.step(optimizer)`` has - been invoked for all optimizers used this iteration. - """ - if not self._enabled: - return - - _scale, _growth_tracker = self._check_scale_growth_tracker("update") - - if new_scale is not None: - # Accept a new user-defined scale. - if isinstance(new_scale, float): - self._scale.fill_(new_scale) # type: ignore[union-attr] - else: - reason = "new_scale should be a float or a 1-element torch.FloatTensor with requires_grad=False." - assert isinstance(new_scale, torch.FloatTensor), reason # type: ignore[attr-defined] - assert new_scale.numel() == 1, reason - assert new_scale.requires_grad is False, reason - self._scale.copy_(new_scale) # type: ignore[union-attr] - else: - # Consume shared inf/nan data collected from optimizers to update the scale. - # If all found_inf tensors are on the same device as self._scale, this operation is asynchronous. - found_infs = [ - found_inf.to(device="cpu", non_blocking=True) - for state in self._per_optimizer_states.values() - for found_inf in state["found_inf_per_device"].values() - ] - - assert len(found_infs) > 0, "No inf checks were recorded prior to update." - - found_inf_combined = found_infs[0] - if len(found_infs) > 1: - for i in range(1, len(found_infs)): - found_inf_combined += found_infs[i] - - to_device = _scale.device - _scale = _scale.to("cpu") - _growth_tracker = _growth_tracker.to("cpu") - - core._amp_update_scale_( - _scale, - _growth_tracker, - found_inf_combined, - self._growth_factor, - self._backoff_factor, - self._growth_interval, - ) - - _scale = _scale.to(to_device) - _growth_tracker = _growth_tracker.to(to_device) - # To prepare for next iteration, clear the data collected from optimizers this iteration. - self._per_optimizer_states = defaultdict(_refresh_per_optimizer_state) - -def gradscaler_init(): - torch.xpu.amp.GradScaler = ipex.cpu.autocast._grad_scaler.GradScaler - torch.xpu.amp.GradScaler._unscale_grads_ = _unscale_grads_ - torch.xpu.amp.GradScaler.unscale_ = unscale_ - torch.xpu.amp.GradScaler.update = update - return torch.xpu.amp.GradScaler \ No newline at end of file diff --git a/spaces/SeyedAli/Persian-Visual-Question-Answering-1/README.md b/spaces/SeyedAli/Persian-Visual-Question-Answering-1/README.md deleted file mode 100644 index 9fcae7a089eafd33f643d819d143a169db604218..0000000000000000000000000000000000000000 --- a/spaces/SeyedAli/Persian-Visual-Question-Answering-1/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Persian Visual Question Answering (Version 1) -emoji: 🖼️❓ -colorFrom: pink -colorTo: pink -sdk: gradio -sdk_version: 3.44.4 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Shad0ws/Chat-with-Files/README.md b/spaces/Shad0ws/Chat-with-Files/README.md deleted file mode 100644 index ac34757d7c091ded8b52dd34d6eb0b5c78e58c28..0000000000000000000000000000000000000000 --- a/spaces/Shad0ws/Chat-with-Files/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Chat With Files -emoji: ⚡ -colorFrom: blue -colorTo: yellow -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/chromadb/test/db/test_system.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/chromadb/test/db/test_system.py deleted file mode 100644 index 15271bd480fc069e9ae06a8e12409b876f87435d..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/chromadb/test/db/test_system.py +++ /dev/null @@ -1,317 +0,0 @@ -import pytest -from typing import Generator, List, Callable, Dict, Union -from chromadb.types import Collection, Segment, SegmentScope -from chromadb.db.impl.sqlite import SqliteDB -from chromadb.config import System, Settings -from chromadb.db.system import SysDB -from chromadb.db.base import NotFoundError, UniqueConstraintError -from pytest import FixtureRequest -import uuid - - -def sqlite() -> Generator[SysDB, None, None]: - """Fixture generator for sqlite DB""" - db = SqliteDB(System(Settings(sqlite_database=":memory:", allow_reset=True))) - db.start() - yield db - db.stop() - - -def db_fixtures() -> List[Callable[[], Generator[SysDB, None, None]]]: - return [sqlite] - - -@pytest.fixture(scope="module", params=db_fixtures()) -def sysdb(request: FixtureRequest) -> Generator[SysDB, None, None]: - yield next(request.param()) - - -sample_collections = [ - Collection( - id=uuid.uuid4(), - name="test_collection_1", - topic="test_topic_1", - metadata={"test_str": "str1", "test_int": 1, "test_float": 1.3}, - ), - Collection( - id=uuid.uuid4(), - name="test_collection_2", - topic="test_topic_2", - metadata={"test_str": "str2", "test_int": 2, "test_float": 2.3}, - ), - Collection( - id=uuid.uuid4(), - name="test_collection_3", - topic="test_topic_3", - metadata={"test_str": "str3", "test_int": 3, "test_float": 3.3}, - ), -] - - -def test_create_get_delete_collections(sysdb: SysDB) -> None: - sysdb.reset() - - for collection in sample_collections: - sysdb.create_collection(collection) - - results = sysdb.get_collections() - results = sorted(results, key=lambda c: c["name"]) - - assert sorted(results, key=lambda c: c["name"]) == sample_collections - - # Duplicate create fails - with pytest.raises(UniqueConstraintError): - sysdb.create_collection(sample_collections[0]) - - # Find by name - for collection in sample_collections: - result = sysdb.get_collections(name=collection["name"]) - assert result == [collection] - - # Find by topic - for collection in sample_collections: - result = sysdb.get_collections(topic=collection["topic"]) - assert result == [collection] - - # Find by id - for collection in sample_collections: - result = sysdb.get_collections(id=collection["id"]) - assert result == [collection] - - # Find by id and topic (positive case) - for collection in sample_collections: - result = sysdb.get_collections(id=collection["id"], topic=collection["topic"]) - assert result == [collection] - - # find by id and topic (negative case) - for collection in sample_collections: - result = sysdb.get_collections(id=collection["id"], topic="other_topic") - assert result == [] - - # Delete - c1 = sample_collections[0] - sysdb.delete_collection(c1["id"]) - - results = sysdb.get_collections() - assert c1 not in results - assert len(results) == len(sample_collections) - 1 - assert sorted(results, key=lambda c: c["name"]) == sample_collections[1:] - - by_id_result = sysdb.get_collections(id=c1["id"]) - assert by_id_result == [] - - # Duplicate delete throws an exception - with pytest.raises(NotFoundError): - sysdb.delete_collection(c1["id"]) - - -def test_update_collections(sysdb: SysDB) -> None: - metadata: Dict[str, Union[str, int, float]] = { - "test_str": "str1", - "test_int": 1, - "test_float": 1.3, - } - coll = Collection( - id=uuid.uuid4(), - name="test_collection_1", - topic="test_topic_1", - metadata=metadata, - ) - - sysdb.reset() - - sysdb.create_collection(coll) - - # Update name - coll["name"] = "new_name" - sysdb.update_collection(coll["id"], name=coll["name"]) - result = sysdb.get_collections(name=coll["name"]) - assert result == [coll] - - # Update topic - coll["topic"] = "new_topic" - sysdb.update_collection(coll["id"], topic=coll["topic"]) - result = sysdb.get_collections(topic=coll["topic"]) - assert result == [coll] - - # Add a new metadata key - metadata["test_str2"] = "str2" - sysdb.update_collection(coll["id"], metadata={"test_str2": "str2"}) - result = sysdb.get_collections(id=coll["id"]) - assert result == [coll] - - # Update a metadata key - metadata["test_str"] = "str3" - sysdb.update_collection(coll["id"], metadata={"test_str": "str3"}) - result = sysdb.get_collections(id=coll["id"]) - assert result == [coll] - - # Delete a metadata key - del metadata["test_str"] - sysdb.update_collection(coll["id"], metadata={"test_str": None}) - result = sysdb.get_collections(id=coll["id"]) - assert result == [coll] - - # Delete all metadata keys - coll["metadata"] = None - sysdb.update_collection(coll["id"], metadata=None) - result = sysdb.get_collections(id=coll["id"]) - assert result == [coll] - - -sample_segments = [ - Segment( - id=uuid.UUID("00000000-d7d7-413b-92e1-731098a6e492"), - type="test_type_a", - scope=SegmentScope.VECTOR, - topic=None, - collection=sample_collections[0]["id"], - metadata={"test_str": "str1", "test_int": 1, "test_float": 1.3}, - ), - Segment( - id=uuid.UUID("11111111-d7d7-413b-92e1-731098a6e492"), - type="test_type_b", - topic="test_topic_2", - scope=SegmentScope.VECTOR, - collection=sample_collections[1]["id"], - metadata={"test_str": "str2", "test_int": 2, "test_float": 2.3}, - ), - Segment( - id=uuid.UUID("22222222-d7d7-413b-92e1-731098a6e492"), - type="test_type_b", - topic="test_topic_3", - scope=SegmentScope.METADATA, - collection=None, - metadata={"test_str": "str3", "test_int": 3, "test_float": 3.3}, - ), -] - - -def test_create_get_delete_segments(sysdb: SysDB) -> None: - sysdb.reset() - - for collection in sample_collections: - sysdb.create_collection(collection) - - for segment in sample_segments: - sysdb.create_segment(segment) - - results = sysdb.get_segments() - results = sorted(results, key=lambda c: c["id"]) - - assert results == sample_segments - - # Duplicate create fails - with pytest.raises(UniqueConstraintError): - sysdb.create_segment(sample_segments[0]) - - # Find by id - for segment in sample_segments: - result = sysdb.get_segments(id=segment["id"]) - assert result == [segment] - - # Find by type - result = sysdb.get_segments(type="test_type_a") - assert result == sample_segments[:1] - - result = sysdb.get_segments(type="test_type_b") - assert result == sample_segments[1:] - - # Find by collection ID - result = sysdb.get_segments(collection=sample_collections[0]["id"]) - assert result == sample_segments[:1] - - # Find by type and collection ID (positive case) - result = sysdb.get_segments( - type="test_type_a", collection=sample_collections[0]["id"] - ) - assert result == sample_segments[:1] - - # Find by type and collection ID (negative case) - result = sysdb.get_segments( - type="test_type_b", collection=sample_collections[0]["id"] - ) - assert result == [] - - # Delete - s1 = sample_segments[0] - sysdb.delete_segment(s1["id"]) - - results = sysdb.get_segments() - assert s1 not in results - assert len(results) == len(sample_segments) - 1 - assert sorted(results, key=lambda c: c["type"]) == sample_segments[1:] - - # Duplicate delete throws an exception - with pytest.raises(NotFoundError): - sysdb.delete_segment(s1["id"]) - - -def test_update_segment(sysdb: SysDB) -> None: - metadata: Dict[str, Union[str, int, float]] = { - "test_str": "str1", - "test_int": 1, - "test_float": 1.3, - } - segment = Segment( - id=uuid.uuid4(), - type="test_type_a", - scope=SegmentScope.VECTOR, - topic="test_topic_a", - collection=sample_collections[0]["id"], - metadata=metadata, - ) - - sysdb.reset() - for c in sample_collections: - sysdb.create_collection(c) - - sysdb.create_segment(segment) - - # Update topic to new value - segment["topic"] = "new_topic" - sysdb.update_segment(segment["id"], topic=segment["topic"]) - result = sysdb.get_segments(id=segment["id"]) - assert result == [segment] - - # Update topic to None - segment["topic"] = None - sysdb.update_segment(segment["id"], topic=segment["topic"]) - result = sysdb.get_segments(id=segment["id"]) - assert result == [segment] - - # Update collection to new value - segment["collection"] = sample_collections[1]["id"] - sysdb.update_segment(segment["id"], collection=segment["collection"]) - result = sysdb.get_segments(id=segment["id"]) - assert result == [segment] - - # Update collection to None - segment["collection"] = None - sysdb.update_segment(segment["id"], collection=segment["collection"]) - result = sysdb.get_segments(id=segment["id"]) - assert result == [segment] - - # Add a new metadata key - metadata["test_str2"] = "str2" - sysdb.update_segment(segment["id"], metadata={"test_str2": "str2"}) - result = sysdb.get_segments(id=segment["id"]) - assert result == [segment] - - # Update a metadata key - metadata["test_str"] = "str3" - sysdb.update_segment(segment["id"], metadata={"test_str": "str3"}) - result = sysdb.get_segments(id=segment["id"]) - assert result == [segment] - - # Delete a metadata key - del metadata["test_str"] - sysdb.update_segment(segment["id"], metadata={"test_str": None}) - result = sysdb.get_segments(id=segment["id"]) - assert result == [segment] - - # Delete all metadata keys - segment["metadata"] = None - sysdb.update_segment(segment["id"], metadata=None) - result = sysdb.get_segments(id=segment["id"]) - assert result == [segment] diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/clickhouse_connect/driver/constants.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/clickhouse_connect/driver/constants.py deleted file mode 100644 index a242e559b9463cbcef3c67e8fa883aed93db04ec..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/clickhouse_connect/driver/constants.py +++ /dev/null @@ -1,2 +0,0 @@ -PROTOCOL_VERSION_WITH_LOW_CARD = 54405 -CH_VERSION_WITH_PROTOCOL = '23.2.1.2537' diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/setup_pydevd_cython.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/setup_pydevd_cython.py deleted file mode 100644 index 5b395ddcf09d9e3538a85ad11a25a317925f5543..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/setup_pydevd_cython.py +++ /dev/null @@ -1,250 +0,0 @@ -''' -A simpler setup version just to compile the speedup module. - -It should be used as: - -python setup_pydevd_cython build_ext --inplace - -Note: the .c file and other generated files are regenerated from -the .pyx file by running "python build_tools/build.py" -''' - -import os -import sys -from setuptools import setup - -os.chdir(os.path.dirname(os.path.abspath(__file__))) - -IS_PY36_OR_GREATER = sys.version_info > (3, 6) -TODO_PY311 = sys.version_info > (3, 11) - - -def process_args(): - extension_folder = None - target_pydevd_name = None - target_frame_eval = None - force_cython = False - - for i, arg in enumerate(sys.argv[:]): - if arg == '--build-lib': - extension_folder = sys.argv[i + 1] - # It shouldn't be removed from sys.argv (among with --build-temp) because they're passed further to setup() - if arg.startswith('--target-pyd-name='): - sys.argv.remove(arg) - target_pydevd_name = arg[len('--target-pyd-name='):] - if arg.startswith('--target-pyd-frame-eval='): - sys.argv.remove(arg) - target_frame_eval = arg[len('--target-pyd-frame-eval='):] - if arg == '--force-cython': - sys.argv.remove(arg) - force_cython = True - - return extension_folder, target_pydevd_name, target_frame_eval, force_cython - - -def process_template_lines(template_lines): - # Create 2 versions of the template, one for Python 3.8 and another for Python 3.9 - for version in ('38', '39'): - yield '### WARNING: GENERATED CODE, DO NOT EDIT!' - yield '### WARNING: GENERATED CODE, DO NOT EDIT!' - yield '### WARNING: GENERATED CODE, DO NOT EDIT!' - - for line in template_lines: - if version == '38': - line = line.replace('get_bytecode_while_frame_eval(PyFrameObject * frame_obj, int exc)', 'get_bytecode_while_frame_eval_38(PyFrameObject * frame_obj, int exc)') - line = line.replace('CALL_EvalFrameDefault', 'CALL_EvalFrameDefault_38(frame_obj, exc)') - else: # 3.9 - line = line.replace('get_bytecode_while_frame_eval(PyFrameObject * frame_obj, int exc)', 'get_bytecode_while_frame_eval_39(PyThreadState* tstate, PyFrameObject * frame_obj, int exc)') - line = line.replace('CALL_EvalFrameDefault', 'CALL_EvalFrameDefault_39(tstate, frame_obj, exc)') - - yield line - - yield '### WARNING: GENERATED CODE, DO NOT EDIT!' - yield '### WARNING: GENERATED CODE, DO NOT EDIT!' - yield '### WARNING: GENERATED CODE, DO NOT EDIT!' - yield '' - yield '' - - -def process_template_file(contents): - ret = [] - template_lines = [] - - append_to = ret - for line in contents.splitlines(keepends=False): - if line.strip() == '### TEMPLATE_START': - append_to = template_lines - elif line.strip() == '### TEMPLATE_END': - append_to = ret - for line in process_template_lines(template_lines): - ret.append(line) - else: - append_to.append(line) - - return '\n'.join(ret) - - -def build_extension(dir_name, extension_name, target_pydevd_name, force_cython, extended=False, has_pxd=False, template=False): - pyx_file = os.path.join(os.path.dirname(__file__), dir_name, "%s.pyx" % (extension_name,)) - - if template: - pyx_template_file = os.path.join(os.path.dirname(__file__), dir_name, "%s.template.pyx" % (extension_name,)) - with open(pyx_template_file, 'r') as stream: - contents = stream.read() - - contents = process_template_file(contents) - - with open(pyx_file, 'w') as stream: - stream.write(contents) - - if target_pydevd_name != extension_name: - # It MUST be there in this case! - # (otherwise we'll have unresolved externals because the .c file had another name initially). - import shutil - - # We must force cython in this case (but only in this case -- for the regular setup in the user machine, we - # should always compile the .c file). - force_cython = True - - new_pyx_file = os.path.join(os.path.dirname(__file__), dir_name, "%s.pyx" % (target_pydevd_name,)) - new_c_file = os.path.join(os.path.dirname(__file__), dir_name, "%s.c" % (target_pydevd_name,)) - shutil.copy(pyx_file, new_pyx_file) - pyx_file = new_pyx_file - if has_pxd: - pxd_file = os.path.join(os.path.dirname(__file__), dir_name, "%s.pxd" % (extension_name,)) - new_pxd_file = os.path.join(os.path.dirname(__file__), dir_name, "%s.pxd" % (target_pydevd_name,)) - shutil.copy(pxd_file, new_pxd_file) - assert os.path.exists(pyx_file) - - try: - c_files = [os.path.join(dir_name, "%s.c" % target_pydevd_name), ] - if force_cython: - for c_file in c_files: - try: - os.remove(c_file) - except: - pass - from Cython.Build import cythonize # @UnusedImport - # Generate the .c files in cythonize (will not compile at this point). - - target = "%s/%s.pyx" % (dir_name, target_pydevd_name,) - cythonize([target]) - - # Workarounds needed in CPython 3.8 and 3.9 to access PyInterpreterState.eval_frame. - for c_file in c_files: - with open(c_file, 'r') as stream: - c_file_contents = stream.read() - - if '#include "internal/pycore_gc.h"' not in c_file_contents: - c_file_contents = c_file_contents.replace('#include "Python.h"', '''#include "Python.h" -#if PY_VERSION_HEX >= 0x03090000 -#include "internal/pycore_gc.h" -#include "internal/pycore_interp.h" -#endif -''') - - if '#include "internal/pycore_pystate.h"' not in c_file_contents: - c_file_contents = c_file_contents.replace('#include "pystate.h"', '''#include "pystate.h" -#if PY_VERSION_HEX >= 0x03080000 -#include "internal/pycore_pystate.h" -#endif -''') - - # We want the same output on Windows and Linux. - c_file_contents = c_file_contents.replace('\r\n', '\n').replace('\r', '\n') - c_file_contents = c_file_contents.replace(r'_pydevd_frame_eval\\release_mem.h', '_pydevd_frame_eval/release_mem.h') - c_file_contents = c_file_contents.replace(r'_pydevd_frame_eval\\pydevd_frame_evaluator.pyx', '_pydevd_frame_eval/pydevd_frame_evaluator.pyx') - c_file_contents = c_file_contents.replace(r'_pydevd_bundle\\pydevd_cython.pxd', '_pydevd_bundle/pydevd_cython.pxd') - c_file_contents = c_file_contents.replace(r'_pydevd_bundle\\pydevd_cython.pyx', '_pydevd_bundle/pydevd_cython.pyx') - - with open(c_file, 'w') as stream: - stream.write(c_file_contents) - - # Always compile the .c (and not the .pyx) file (which we should keep up-to-date by running build_tools/build.py). - from distutils.extension import Extension - extra_compile_args = [] - extra_link_args = [] - - if 'linux' in sys.platform: - # Enabling -flto brings executable from 4MB to 0.56MB and -Os to 0.41MB - # Profiling shows an execution around 3-5% slower with -Os vs -O3, - # so, kept only -flto. - extra_compile_args = ["-flto", "-O3"] - extra_link_args = extra_compile_args[:] - - # Note: also experimented with profile-guided optimization. The executable - # size became a bit smaller (from 0.56MB to 0.5MB) but this would add an - # extra step to run the debugger to obtain the optimizations - # so, skipped it for now (note: the actual benchmarks time was in the - # margin of a 0-1% improvement, which is probably not worth it for - # speed increments). - # extra_compile_args = ["-flto", "-fprofile-generate"] - # ... Run benchmarks ... - # extra_compile_args = ["-flto", "-fprofile-use", "-fprofile-correction"] - elif 'win32' in sys.platform: - pass - # uncomment to generate pdbs for visual studio. - # extra_compile_args=["-Zi", "/Od"] - # extra_link_args=["-debug"] - - kwargs = {} - if extra_link_args: - kwargs['extra_link_args'] = extra_link_args - if extra_compile_args: - kwargs['extra_compile_args'] = extra_compile_args - - ext_modules = [ - Extension( - "%s%s.%s" % (dir_name, "_ext" if extended else "", target_pydevd_name,), - c_files, - **kwargs - )] - - # This is needed in CPython 3.8 to be able to include internal/pycore_pystate.h - # (needed to set PyInterpreterState.eval_frame). - for module in ext_modules: - module.define_macros = [('Py_BUILD_CORE_MODULE', '1')] - setup( - name='Cythonize', - ext_modules=ext_modules - ) - finally: - if target_pydevd_name != extension_name: - try: - os.remove(new_pyx_file) - except: - import traceback - traceback.print_exc() - try: - os.remove(new_c_file) - except: - import traceback - traceback.print_exc() - if has_pxd: - try: - os.remove(new_pxd_file) - except: - import traceback - traceback.print_exc() - - -extension_folder, target_pydevd_name, target_frame_eval, force_cython = process_args() - -extension_name = "pydevd_cython" -if target_pydevd_name is None: - target_pydevd_name = extension_name -build_extension("_pydevd_bundle", extension_name, target_pydevd_name, force_cython, extension_folder, True) - -if IS_PY36_OR_GREATER and not TODO_PY311: - extension_name = "pydevd_frame_evaluator" - if target_frame_eval is None: - target_frame_eval = extension_name - build_extension("_pydevd_frame_eval", extension_name, target_frame_eval, force_cython, extension_folder, True, template=True) - -if extension_folder: - os.chdir(extension_folder) - for folder in [file for file in os.listdir(extension_folder) if - file != 'build' and os.path.isdir(os.path.join(extension_folder, file))]: - file = os.path.join(folder, "__init__.py") - if not os.path.exists(file): - open(file, 'a').close() diff --git a/spaces/TH5314/newbing/src/components/chat-suggestions.tsx b/spaces/TH5314/newbing/src/components/chat-suggestions.tsx deleted file mode 100644 index 00c2fee295c9e010946046eb71705a5e131f7a5a..0000000000000000000000000000000000000000 --- a/spaces/TH5314/newbing/src/components/chat-suggestions.tsx +++ /dev/null @@ -1,45 +0,0 @@ -import React, { useMemo } from 'react' -import Image from 'next/image' -import HelpIcon from '@/assets/images/help.svg' -import { SuggestedResponse } from '@/lib/bots/bing/types' -import { useBing } from '@/lib/hooks/use-bing' -import { atom, useAtom } from 'jotai' - -type Suggestions = SuggestedResponse[] -const helpSuggestions = ['为什么不回应某些主题', '告诉我更多关于必应的资迅', '必应如何使用 AI?'].map((text) => ({ text })) -const suggestionsAtom = atom([]) - -type ChatSuggestionsProps = React.ComponentProps<'div'> & Pick, 'setInput'> & { suggestions?: Suggestions } - -export function ChatSuggestions({ setInput, suggestions = [] }: ChatSuggestionsProps) { - const [currentSuggestions, setSuggestions] = useAtom(suggestionsAtom) - const toggleSuggestions = (() => { - if (currentSuggestions === helpSuggestions) { - setSuggestions(suggestions) - } else { - setSuggestions(helpSuggestions) - } - }) - - useMemo(() => { - setSuggestions(suggestions) - window.scrollBy(0, 2000) - }, [suggestions.length]) - - return currentSuggestions?.length ? ( -
-
- - { - currentSuggestions.map(suggestion => ( - - )) - } -
-
- ) : null -} diff --git a/spaces/TNR-5/semantic-image-search.img/src/app/utils.js b/spaces/TNR-5/semantic-image-search.img/src/app/utils.js deleted file mode 100644 index f0401723a8079fda923d524eabe7ab23fe3a166f..0000000000000000000000000000000000000000 --- a/spaces/TNR-5/semantic-image-search.img/src/app/utils.js +++ /dev/null @@ -1,52 +0,0 @@ - -import { decode } from "blurhash" - -const SIZE = 32; - -export function blurHashToDataURL(hash) { - if (!hash) return undefined - - const pixels = decode(hash, SIZE, SIZE) - - const canvas = document.createElement("canvas"); - canvas.width = SIZE; - canvas.height = SIZE; - - const ctx = canvas.getContext("2d"); - const imageData = ctx.createImageData(SIZE, SIZE); - imageData.data.set(pixels); - ctx.putImageData(imageData, 0, 0); - - return canvas.toDataURL(); -} - -function downloadData(url, filename) { - - // Create an anchor element with the data URL as the href attribute - const downloadLink = document.createElement('a'); - downloadLink.href = url; - - // Set the download attribute to specify the desired filename for the downloaded image - downloadLink.download = filename; - - // Trigger the download - downloadLink.click(); - - // Clean up: remove the anchor element from the DOM - downloadLink.remove(); -} - -export function downloadImage(url, filename) { - fetch(url, { - headers: new Headers({ - Origin: location.origin, - }), - mode: 'cors', - }) - .then((response) => response.blob()) - .then((blob) => { - let blobUrl = window.URL.createObjectURL(blob) - downloadData(blobUrl, filename) - }) - .catch((e) => console.error(e)) -} diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/segment.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/segment.py deleted file mode 100644 index e125798463512ce4322a2cc139b4e5c1515e5c05..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/segment.py +++ /dev/null @@ -1,739 +0,0 @@ -from enum import IntEnum -from functools import lru_cache -from itertools import filterfalse -from logging import getLogger -from operator import attrgetter -from typing import ( - TYPE_CHECKING, - Dict, - Iterable, - List, - NamedTuple, - Optional, - Sequence, - Tuple, - Type, - Union, -) - -from .cells import ( - _is_single_cell_widths, - cached_cell_len, - cell_len, - get_character_cell_size, - set_cell_size, -) -from .repr import Result, rich_repr -from .style import Style - -if TYPE_CHECKING: - from .console import Console, ConsoleOptions, RenderResult - -log = getLogger("rich") - - -class ControlType(IntEnum): - """Non-printable control codes which typically translate to ANSI codes.""" - - BELL = 1 - CARRIAGE_RETURN = 2 - HOME = 3 - CLEAR = 4 - SHOW_CURSOR = 5 - HIDE_CURSOR = 6 - ENABLE_ALT_SCREEN = 7 - DISABLE_ALT_SCREEN = 8 - CURSOR_UP = 9 - CURSOR_DOWN = 10 - CURSOR_FORWARD = 11 - CURSOR_BACKWARD = 12 - CURSOR_MOVE_TO_COLUMN = 13 - CURSOR_MOVE_TO = 14 - ERASE_IN_LINE = 15 - SET_WINDOW_TITLE = 16 - - -ControlCode = Union[ - Tuple[ControlType], - Tuple[ControlType, Union[int, str]], - Tuple[ControlType, int, int], -] - - -@rich_repr() -class Segment(NamedTuple): - """A piece of text with associated style. Segments are produced by the Console render process and - are ultimately converted in to strings to be written to the terminal. - - Args: - text (str): A piece of text. - style (:class:`~rich.style.Style`, optional): An optional style to apply to the text. - control (Tuple[ControlCode], optional): Optional sequence of control codes. - - Attributes: - cell_length (int): The cell length of this Segment. - """ - - text: str - style: Optional[Style] = None - control: Optional[Sequence[ControlCode]] = None - - @property - def cell_length(self) -> int: - """The number of terminal cells required to display self.text. - - Returns: - int: A number of cells. - """ - text, _style, control = self - return 0 if control else cell_len(text) - - def __rich_repr__(self) -> Result: - yield self.text - if self.control is None: - if self.style is not None: - yield self.style - else: - yield self.style - yield self.control - - def __bool__(self) -> bool: - """Check if the segment contains text.""" - return bool(self.text) - - @property - def is_control(self) -> bool: - """Check if the segment contains control codes.""" - return self.control is not None - - @classmethod - @lru_cache(1024 * 16) - def _split_cells(cls, segment: "Segment", cut: int) -> Tuple["Segment", "Segment"]: - - text, style, control = segment - _Segment = Segment - - cell_length = segment.cell_length - if cut >= cell_length: - return segment, _Segment("", style, control) - - cell_size = get_character_cell_size - - pos = int((cut / cell_length) * (len(text) - 1)) - - before = text[:pos] - cell_pos = cell_len(before) - if cell_pos == cut: - return ( - _Segment(before, style, control), - _Segment(text[pos:], style, control), - ) - while pos < len(text): - char = text[pos] - pos += 1 - cell_pos += cell_size(char) - before = text[:pos] - if cell_pos == cut: - return ( - _Segment(before, style, control), - _Segment(text[pos:], style, control), - ) - if cell_pos > cut: - return ( - _Segment(before[: pos - 1] + " ", style, control), - _Segment(" " + text[pos:], style, control), - ) - - raise AssertionError("Will never reach here") - - def split_cells(self, cut: int) -> Tuple["Segment", "Segment"]: - """Split segment in to two segments at the specified column. - - If the cut point falls in the middle of a 2-cell wide character then it is replaced - by two spaces, to preserve the display width of the parent segment. - - Returns: - Tuple[Segment, Segment]: Two segments. - """ - text, style, control = self - - if _is_single_cell_widths(text): - # Fast path with all 1 cell characters - if cut >= len(text): - return self, Segment("", style, control) - return ( - Segment(text[:cut], style, control), - Segment(text[cut:], style, control), - ) - - return self._split_cells(self, cut) - - @classmethod - def line(cls) -> "Segment": - """Make a new line segment.""" - return cls("\n") - - @classmethod - def apply_style( - cls, - segments: Iterable["Segment"], - style: Optional[Style] = None, - post_style: Optional[Style] = None, - ) -> Iterable["Segment"]: - """Apply style(s) to an iterable of segments. - - Returns an iterable of segments where the style is replaced by ``style + segment.style + post_style``. - - Args: - segments (Iterable[Segment]): Segments to process. - style (Style, optional): Base style. Defaults to None. - post_style (Style, optional): Style to apply on top of segment style. Defaults to None. - - Returns: - Iterable[Segments]: A new iterable of segments (possibly the same iterable). - """ - result_segments = segments - if style: - apply = style.__add__ - result_segments = ( - cls(text, None if control else apply(_style), control) - for text, _style, control in result_segments - ) - if post_style: - result_segments = ( - cls( - text, - ( - None - if control - else (_style + post_style if _style else post_style) - ), - control, - ) - for text, _style, control in result_segments - ) - return result_segments - - @classmethod - def filter_control( - cls, segments: Iterable["Segment"], is_control: bool = False - ) -> Iterable["Segment"]: - """Filter segments by ``is_control`` attribute. - - Args: - segments (Iterable[Segment]): An iterable of Segment instances. - is_control (bool, optional): is_control flag to match in search. - - Returns: - Iterable[Segment]: And iterable of Segment instances. - - """ - if is_control: - return filter(attrgetter("control"), segments) - else: - return filterfalse(attrgetter("control"), segments) - - @classmethod - def split_lines(cls, segments: Iterable["Segment"]) -> Iterable[List["Segment"]]: - """Split a sequence of segments in to a list of lines. - - Args: - segments (Iterable[Segment]): Segments potentially containing line feeds. - - Yields: - Iterable[List[Segment]]: Iterable of segment lists, one per line. - """ - line: List[Segment] = [] - append = line.append - - for segment in segments: - if "\n" in segment.text and not segment.control: - text, style, _ = segment - while text: - _text, new_line, text = text.partition("\n") - if _text: - append(cls(_text, style)) - if new_line: - yield line - line = [] - append = line.append - else: - append(segment) - if line: - yield line - - @classmethod - def split_and_crop_lines( - cls, - segments: Iterable["Segment"], - length: int, - style: Optional[Style] = None, - pad: bool = True, - include_new_lines: bool = True, - ) -> Iterable[List["Segment"]]: - """Split segments in to lines, and crop lines greater than a given length. - - Args: - segments (Iterable[Segment]): An iterable of segments, probably - generated from console.render. - length (int): Desired line length. - style (Style, optional): Style to use for any padding. - pad (bool): Enable padding of lines that are less than `length`. - - Returns: - Iterable[List[Segment]]: An iterable of lines of segments. - """ - line: List[Segment] = [] - append = line.append - - adjust_line_length = cls.adjust_line_length - new_line_segment = cls("\n") - - for segment in segments: - if "\n" in segment.text and not segment.control: - text, segment_style, _ = segment - while text: - _text, new_line, text = text.partition("\n") - if _text: - append(cls(_text, segment_style)) - if new_line: - cropped_line = adjust_line_length( - line, length, style=style, pad=pad - ) - if include_new_lines: - cropped_line.append(new_line_segment) - yield cropped_line - line.clear() - else: - append(segment) - if line: - yield adjust_line_length(line, length, style=style, pad=pad) - - @classmethod - def adjust_line_length( - cls, - line: List["Segment"], - length: int, - style: Optional[Style] = None, - pad: bool = True, - ) -> List["Segment"]: - """Adjust a line to a given width (cropping or padding as required). - - Args: - segments (Iterable[Segment]): A list of segments in a single line. - length (int): The desired width of the line. - style (Style, optional): The style of padding if used (space on the end). Defaults to None. - pad (bool, optional): Pad lines with spaces if they are shorter than `length`. Defaults to True. - - Returns: - List[Segment]: A line of segments with the desired length. - """ - line_length = sum(segment.cell_length for segment in line) - new_line: List[Segment] - - if line_length < length: - if pad: - new_line = line + [cls(" " * (length - line_length), style)] - else: - new_line = line[:] - elif line_length > length: - new_line = [] - append = new_line.append - line_length = 0 - for segment in line: - segment_length = segment.cell_length - if line_length + segment_length < length or segment.control: - append(segment) - line_length += segment_length - else: - text, segment_style, _ = segment - text = set_cell_size(text, length - line_length) - append(cls(text, segment_style)) - break - else: - new_line = line[:] - return new_line - - @classmethod - def get_line_length(cls, line: List["Segment"]) -> int: - """Get the length of list of segments. - - Args: - line (List[Segment]): A line encoded as a list of Segments (assumes no '\\\\n' characters), - - Returns: - int: The length of the line. - """ - _cell_len = cell_len - return sum(_cell_len(text) for text, style, control in line if not control) - - @classmethod - def get_shape(cls, lines: List[List["Segment"]]) -> Tuple[int, int]: - """Get the shape (enclosing rectangle) of a list of lines. - - Args: - lines (List[List[Segment]]): A list of lines (no '\\\\n' characters). - - Returns: - Tuple[int, int]: Width and height in characters. - """ - get_line_length = cls.get_line_length - max_width = max(get_line_length(line) for line in lines) if lines else 0 - return (max_width, len(lines)) - - @classmethod - def set_shape( - cls, - lines: List[List["Segment"]], - width: int, - height: Optional[int] = None, - style: Optional[Style] = None, - new_lines: bool = False, - ) -> List[List["Segment"]]: - """Set the shape of a list of lines (enclosing rectangle). - - Args: - lines (List[List[Segment]]): A list of lines. - width (int): Desired width. - height (int, optional): Desired height or None for no change. - style (Style, optional): Style of any padding added. - new_lines (bool, optional): Padded lines should include "\n". Defaults to False. - - Returns: - List[List[Segment]]: New list of lines. - """ - _height = height or len(lines) - - blank = ( - [cls(" " * width + "\n", style)] if new_lines else [cls(" " * width, style)] - ) - - adjust_line_length = cls.adjust_line_length - shaped_lines = lines[:_height] - shaped_lines[:] = [ - adjust_line_length(line, width, style=style) for line in lines - ] - if len(shaped_lines) < _height: - shaped_lines.extend([blank] * (_height - len(shaped_lines))) - return shaped_lines - - @classmethod - def align_top( - cls: Type["Segment"], - lines: List[List["Segment"]], - width: int, - height: int, - style: Style, - new_lines: bool = False, - ) -> List[List["Segment"]]: - """Aligns lines to top (adds extra lines to bottom as required). - - Args: - lines (List[List[Segment]]): A list of lines. - width (int): Desired width. - height (int, optional): Desired height or None for no change. - style (Style): Style of any padding added. - new_lines (bool, optional): Padded lines should include "\n". Defaults to False. - - Returns: - List[List[Segment]]: New list of lines. - """ - extra_lines = height - len(lines) - if not extra_lines: - return lines[:] - lines = lines[:height] - blank = cls(" " * width + "\n", style) if new_lines else cls(" " * width, style) - lines = lines + [[blank]] * extra_lines - return lines - - @classmethod - def align_bottom( - cls: Type["Segment"], - lines: List[List["Segment"]], - width: int, - height: int, - style: Style, - new_lines: bool = False, - ) -> List[List["Segment"]]: - """Aligns render to bottom (adds extra lines above as required). - - Args: - lines (List[List[Segment]]): A list of lines. - width (int): Desired width. - height (int, optional): Desired height or None for no change. - style (Style): Style of any padding added. Defaults to None. - new_lines (bool, optional): Padded lines should include "\n". Defaults to False. - - Returns: - List[List[Segment]]: New list of lines. - """ - extra_lines = height - len(lines) - if not extra_lines: - return lines[:] - lines = lines[:height] - blank = cls(" " * width + "\n", style) if new_lines else cls(" " * width, style) - lines = [[blank]] * extra_lines + lines - return lines - - @classmethod - def align_middle( - cls: Type["Segment"], - lines: List[List["Segment"]], - width: int, - height: int, - style: Style, - new_lines: bool = False, - ) -> List[List["Segment"]]: - """Aligns lines to middle (adds extra lines to above and below as required). - - Args: - lines (List[List[Segment]]): A list of lines. - width (int): Desired width. - height (int, optional): Desired height or None for no change. - style (Style): Style of any padding added. - new_lines (bool, optional): Padded lines should include "\n". Defaults to False. - - Returns: - List[List[Segment]]: New list of lines. - """ - extra_lines = height - len(lines) - if not extra_lines: - return lines[:] - lines = lines[:height] - blank = cls(" " * width + "\n", style) if new_lines else cls(" " * width, style) - top_lines = extra_lines // 2 - bottom_lines = extra_lines - top_lines - lines = [[blank]] * top_lines + lines + [[blank]] * bottom_lines - return lines - - @classmethod - def simplify(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]: - """Simplify an iterable of segments by combining contiguous segments with the same style. - - Args: - segments (Iterable[Segment]): An iterable of segments. - - Returns: - Iterable[Segment]: A possibly smaller iterable of segments that will render the same way. - """ - iter_segments = iter(segments) - try: - last_segment = next(iter_segments) - except StopIteration: - return - - _Segment = Segment - for segment in iter_segments: - if last_segment.style == segment.style and not segment.control: - last_segment = _Segment( - last_segment.text + segment.text, last_segment.style - ) - else: - yield last_segment - last_segment = segment - yield last_segment - - @classmethod - def strip_links(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]: - """Remove all links from an iterable of styles. - - Args: - segments (Iterable[Segment]): An iterable segments. - - Yields: - Segment: Segments with link removed. - """ - for segment in segments: - if segment.control or segment.style is None: - yield segment - else: - text, style, _control = segment - yield cls(text, style.update_link(None) if style else None) - - @classmethod - def strip_styles(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]: - """Remove all styles from an iterable of segments. - - Args: - segments (Iterable[Segment]): An iterable segments. - - Yields: - Segment: Segments with styles replace with None - """ - for text, _style, control in segments: - yield cls(text, None, control) - - @classmethod - def remove_color(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]: - """Remove all color from an iterable of segments. - - Args: - segments (Iterable[Segment]): An iterable segments. - - Yields: - Segment: Segments with colorless style. - """ - - cache: Dict[Style, Style] = {} - for text, style, control in segments: - if style: - colorless_style = cache.get(style) - if colorless_style is None: - colorless_style = style.without_color - cache[style] = colorless_style - yield cls(text, colorless_style, control) - else: - yield cls(text, None, control) - - @classmethod - def divide( - cls, segments: Iterable["Segment"], cuts: Iterable[int] - ) -> Iterable[List["Segment"]]: - """Divides an iterable of segments in to portions. - - Args: - cuts (Iterable[int]): Cell positions where to divide. - - Yields: - [Iterable[List[Segment]]]: An iterable of Segments in List. - """ - split_segments: List["Segment"] = [] - add_segment = split_segments.append - - iter_cuts = iter(cuts) - - while True: - cut = next(iter_cuts, -1) - if cut == -1: - return [] - if cut != 0: - break - yield [] - pos = 0 - - segments_clear = split_segments.clear - segments_copy = split_segments.copy - - _cell_len = cached_cell_len - for segment in segments: - text, _style, control = segment - while text: - end_pos = pos if control else pos + _cell_len(text) - if end_pos < cut: - add_segment(segment) - pos = end_pos - break - - if end_pos == cut: - add_segment(segment) - yield segments_copy() - segments_clear() - pos = end_pos - - cut = next(iter_cuts, -1) - if cut == -1: - if split_segments: - yield segments_copy() - return - - break - - else: - before, segment = segment.split_cells(cut - pos) - text, _style, control = segment - add_segment(before) - yield segments_copy() - segments_clear() - pos = cut - - cut = next(iter_cuts, -1) - if cut == -1: - if split_segments: - yield segments_copy() - return - - yield segments_copy() - - -class Segments: - """A simple renderable to render an iterable of segments. This class may be useful if - you want to print segments outside of a __rich_console__ method. - - Args: - segments (Iterable[Segment]): An iterable of segments. - new_lines (bool, optional): Add new lines between segments. Defaults to False. - """ - - def __init__(self, segments: Iterable[Segment], new_lines: bool = False) -> None: - self.segments = list(segments) - self.new_lines = new_lines - - def __rich_console__( - self, console: "Console", options: "ConsoleOptions" - ) -> "RenderResult": - if self.new_lines: - line = Segment.line() - for segment in self.segments: - yield segment - yield line - else: - yield from self.segments - - -class SegmentLines: - def __init__(self, lines: Iterable[List[Segment]], new_lines: bool = False) -> None: - """A simple renderable containing a number of lines of segments. May be used as an intermediate - in rendering process. - - Args: - lines (Iterable[List[Segment]]): Lists of segments forming lines. - new_lines (bool, optional): Insert new lines after each line. Defaults to False. - """ - self.lines = list(lines) - self.new_lines = new_lines - - def __rich_console__( - self, console: "Console", options: "ConsoleOptions" - ) -> "RenderResult": - if self.new_lines: - new_line = Segment.line() - for line in self.lines: - yield from line - yield new_line - else: - for line in self.lines: - yield from line - - -if __name__ == "__main__": # pragma: no cover - from pip._vendor.rich.console import Console - from pip._vendor.rich.syntax import Syntax - from pip._vendor.rich.text import Text - - code = """from rich.console import Console -console = Console() -text = Text.from_markup("Hello, [bold magenta]World[/]!") -console.print(text)""" - - text = Text.from_markup("Hello, [bold magenta]World[/]!") - - console = Console() - - console.rule("rich.Segment") - console.print( - "A Segment is the last step in the Rich render process before generating text with ANSI codes." - ) - console.print("\nConsider the following code:\n") - console.print(Syntax(code, "python", line_numbers=True)) - console.print() - console.print( - "When you call [b]print()[/b], Rich [i]renders[/i] the object in to the following:\n" - ) - fragments = list(console.render(text)) - console.print(fragments) - console.print() - console.print("The Segments are then processed to produce the following output:\n") - console.print(text) - console.print( - "\nYou will only need to know this if you are implementing your own Rich renderables." - ) diff --git a/spaces/TempoFunk/makeavid-sd-jax/makeavid_sd/flax_impl/flax_unet_pseudo3d_blocks.py b/spaces/TempoFunk/makeavid-sd-jax/makeavid_sd/flax_impl/flax_unet_pseudo3d_blocks.py deleted file mode 100644 index e310e6039c816644b9b25b165ee226fe4e1c8e0e..0000000000000000000000000000000000000000 --- a/spaces/TempoFunk/makeavid-sd-jax/makeavid_sd/flax_impl/flax_unet_pseudo3d_blocks.py +++ /dev/null @@ -1,254 +0,0 @@ - -from typing import Tuple - -import jax -import jax.numpy as jnp -import flax.linen as nn - -from .flax_attention_pseudo3d import TransformerPseudo3DModel -from .flax_resnet_pseudo3d import ResnetBlockPseudo3D, DownsamplePseudo3D, UpsamplePseudo3D - - -class UNetMidBlockPseudo3DCrossAttn(nn.Module): - in_channels: int - num_layers: int = 1 - attn_num_head_channels: int = 1 - use_memory_efficient_attention: bool = False - dtype: jnp.dtype = jnp.float32 - - def setup(self) -> None: - resnets = [ - ResnetBlockPseudo3D( - in_channels = self.in_channels, - out_channels = self.in_channels, - dtype = self.dtype - ) - ] - attentions = [] - for _ in range(self.num_layers): - attn_block = TransformerPseudo3DModel( - in_channels = self.in_channels, - num_attention_heads = self.attn_num_head_channels, - attention_head_dim = self.in_channels // self.attn_num_head_channels, - num_layers = 1, - use_memory_efficient_attention = self.use_memory_efficient_attention, - dtype = self.dtype - ) - attentions.append(attn_block) - res_block = ResnetBlockPseudo3D( - in_channels = self.in_channels, - out_channels = self.in_channels, - dtype = self.dtype - ) - resnets.append(res_block) - self.attentions = attentions - self.resnets = resnets - - def __call__(self, - hidden_states: jax.Array, - temb: jax.Array, - encoder_hidden_states = jax.Array - ) -> jax.Array: - hidden_states = self.resnets[0](hidden_states, temb) - for attn, resnet in zip(self.attentions, self.resnets[1:]): - hidden_states = attn(hidden_states, encoder_hidden_states) - hidden_states = resnet(hidden_states, temb) - return hidden_states - - -class CrossAttnDownBlockPseudo3D(nn.Module): - in_channels: int - out_channels: int - num_layers: int = 1 - attn_num_head_channels: int = 1 - add_downsample: bool = True - use_memory_efficient_attention: bool = False - dtype: jnp.dtype = jnp.float32 - - def setup(self) -> None: - attentions = [] - resnets = [] - for i in range(self.num_layers): - in_channels = self.in_channels if i == 0 else self.out_channels - res_block = ResnetBlockPseudo3D( - in_channels = in_channels, - out_channels = self.out_channels, - dtype = self.dtype - ) - resnets.append(res_block) - attn_block = TransformerPseudo3DModel( - in_channels = self.out_channels, - num_attention_heads = self.attn_num_head_channels, - attention_head_dim = self.out_channels // self.attn_num_head_channels, - num_layers = 1, - use_memory_efficient_attention = self.use_memory_efficient_attention, - dtype = self.dtype - ) - attentions.append(attn_block) - self.resnets = resnets - self.attentions = attentions - - if self.add_downsample: - self.downsamplers_0 = DownsamplePseudo3D( - out_channels = self.out_channels, - dtype = self.dtype - ) - else: - self.downsamplers_0 = None - - def __call__(self, - hidden_states: jax.Array, - temb: jax.Array, - encoder_hidden_states: jax.Array - ) -> Tuple[jax.Array, jax.Array]: - output_states = () - for resnet, attn in zip(self.resnets, self.attentions): - hidden_states = resnet(hidden_states, temb) - hidden_states = attn(hidden_states, encoder_hidden_states) - output_states += (hidden_states, ) - if self.add_downsample: - hidden_states = self.downsamplers_0(hidden_states) - output_states += (hidden_states, ) - return hidden_states, output_states - - -class DownBlockPseudo3D(nn.Module): - in_channels: int - out_channels: int - num_layers: int = 1 - add_downsample: bool = True - dtype: jnp.dtype = jnp.float32 - - def setup(self) -> None: - resnets = [] - for i in range(self.num_layers): - in_channels = self.in_channels if i == 0 else self.out_channels - res_block = ResnetBlockPseudo3D( - in_channels = in_channels, - out_channels = self.out_channels, - dtype = self.dtype - ) - resnets.append(res_block) - self.resnets = resnets - if self.add_downsample: - self.downsamplers_0 = DownsamplePseudo3D( - out_channels = self.out_channels, - dtype = self.dtype - ) - else: - self.downsamplers_0 = None - - def __call__(self, - hidden_states: jax.Array, - temb: jax.Array - ) -> Tuple[jax.Array, jax.Array]: - output_states = () - for resnet in self.resnets: - hidden_states = resnet(hidden_states, temb) - output_states += (hidden_states, ) - if self.add_downsample: - hidden_states = self.downsamplers_0(hidden_states) - output_states += (hidden_states, ) - return hidden_states, output_states - - -class CrossAttnUpBlockPseudo3D(nn.Module): - in_channels: int - out_channels: int - prev_output_channels: int - num_layers: int = 1 - attn_num_head_channels: int = 1 - add_upsample: bool = True - use_memory_efficient_attention: bool = False - dtype: jnp.dtype = jnp.float32 - - def setup(self) -> None: - resnets = [] - attentions = [] - for i in range(self.num_layers): - res_skip_channels = self.in_channels if (i == self.num_layers -1) else self.out_channels - resnet_in_channels = self.prev_output_channels if i == 0 else self.out_channels - res_block = ResnetBlockPseudo3D( - in_channels = resnet_in_channels + res_skip_channels, - out_channels = self.out_channels, - dtype = self.dtype - ) - resnets.append(res_block) - attn_block = TransformerPseudo3DModel( - in_channels = self.out_channels, - num_attention_heads = self.attn_num_head_channels, - attention_head_dim = self.out_channels // self.attn_num_head_channels, - num_layers = 1, - use_memory_efficient_attention = self.use_memory_efficient_attention, - dtype = self.dtype - ) - attentions.append(attn_block) - self.resnets = resnets - self.attentions = attentions - if self.add_upsample: - self.upsamplers_0 = UpsamplePseudo3D( - out_channels = self.out_channels, - dtype = self.dtype - ) - else: - self.upsamplers_0 = None - - def __call__(self, - hidden_states: jax.Array, - res_hidden_states_tuple: Tuple[jax.Array, ...], - temb: jax.Array, - encoder_hidden_states: jax.Array - ) -> jax.Array: - for resnet, attn in zip(self.resnets, self.attentions): - res_hidden_states = res_hidden_states_tuple[-1] - res_hidden_states_tuple = res_hidden_states_tuple[:-1] - hidden_states = jnp.concatenate((hidden_states, res_hidden_states), axis = -1) - hidden_states = resnet(hidden_states, temb) - hidden_states = attn(hidden_states, encoder_hidden_states) - if self.add_upsample: - hidden_states = self.upsamplers_0(hidden_states) - return hidden_states - - -class UpBlockPseudo3D(nn.Module): - in_channels: int - out_channels: int - prev_output_channels: int - num_layers: int = 1 - add_upsample: bool = True - dtype: jnp.dtype = jnp.float32 - - def setup(self) -> None: - resnets = [] - for i in range(self.num_layers): - res_skip_channels = self.in_channels if (i == self.num_layers - 1) else self.out_channels - resnet_in_channels = self.prev_output_channels if i == 0 else self.out_channels - res_block = ResnetBlockPseudo3D( - in_channels = resnet_in_channels + res_skip_channels, - out_channels = self.out_channels, - dtype = self.dtype - ) - resnets.append(res_block) - self.resnets = resnets - if self.add_upsample: - self.upsamplers_0 = UpsamplePseudo3D( - out_channels = self.out_channels, - dtype = self.dtype - ) - else: - self.upsamplers_0 = None - - def __call__(self, - hidden_states: jax.Array, - res_hidden_states_tuple: Tuple[jax.Array, ...], - temb: jax.Array - ) -> jax.Array: - for resnet in self.resnets: - res_hidden_states = res_hidden_states_tuple[-1] - res_hidden_states_tuple = res_hidden_states_tuple[:-1] - hidden_states = jnp.concatenate([hidden_states, res_hidden_states], axis = -1) - hidden_states = resnet(hidden_states, temb) - if self.add_upsample: - hidden_states = self.upsamplers_0(hidden_states) - return hidden_states - diff --git a/spaces/Tester002/Claudette/README.md b/spaces/Tester002/Claudette/README.md deleted file mode 100644 index 4d66fac0a952a138355eb698001b73c7758333e2..0000000000000000000000000000000000000000 --- a/spaces/Tester002/Claudette/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Claudette -emoji: 📚 -colorFrom: indigo -colorTo: pink -sdk: docker -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/TomLemsky/this_skin_does_not_exist/app.py b/spaces/TomLemsky/this_skin_does_not_exist/app.py deleted file mode 100644 index 920f8cdd746080c252d602cdc0f79b97249de090..0000000000000000000000000000000000000000 --- a/spaces/TomLemsky/this_skin_does_not_exist/app.py +++ /dev/null @@ -1,119 +0,0 @@ -from denoising_diffusion_pytorch import Unet, GaussianDiffusion, Trainer -import gradio as gr -import numpy as np -import torch -from PIL import Image - -HTML_TEMPLATE=""" - - - -
- -
- - - - - -""" - -def generate_image(diffusion_model, hide_layer=True, num_steps=200, num_img=1): - diffusion_model.sampling_timesteps = num_steps - images = diffusion_model.sample(num_img) - images = images.cpu().permute((0,2,3,1)).numpy() - return show_image(images, hide_layer=hide_layer) - -def show_image(images, hide_layer=True): - num_img = len(images) - masked_images = images.copy() - if hide_layer: - layer_mask = np.ones((1,64,64,4)) - layer_mask[:,:16,32:,:] = 0 # second layer head - layer_mask[:,32:, :,:] = 0 # second layer body - layer_mask[:,-16:,16:-16,:] = 1 # left arm and leg - masked_images = masked_images * layer_mask - masked_list = [masked_images[i] for i in range(num_img)] - output_list = [ images[i] for i in range(num_img)] - b64_img = [gr.processing_utils.encode_array_to_base64(i) for i in masked_list] - html = [HTML_TEMPLATE.replace("{BASE64_PLACEHOLDER}",i) for i in b64_img] - iframes= [f"""""" for h in html] - return iframes + output_list - -def show_defaults(): - default_img_paths = ["person.png", "jacket_man.png", "pink_woman.png", "violet_woman.png"] - images = [] - for p in default_img_paths: - img = np.array(Image.open(p))/255 - images.append(img) - stacked_images = np.stack(images) - return show_image(stacked_images, hide_layer=True) - - -if __name__ == '__main__': - - # define model and diffusion process - n_channels = 4 - num_img = 4 - - model = Unet( - dim = 64, - dim_mults = (1, 2, 4, 8), - channels = n_channels - ) - - diffusion = GaussianDiffusion( - model, - image_size = 64, - timesteps = 1000, # number of steps - sampling_timesteps = 400 # 400 - ) - - # dummy trainer instantiated to load model - trainer = Trainer(diffusion, ".", num_samples=num_img, results_folder=".") - trainer.load(160) - - with gr.Blocks(css=".gr-block {image-rendering: pixelated}") as demo: - gr.Markdown("""# This skin does not exist - - A simple diffusion model trained from scratch on 200 000 Minecraft skins for a day on just my GTX 1660Ti 6GB. - ([Write-up on how I made this](https://tomlemsky.github.io/2022/11/13/Minecraft-Skin-Generation-using-Diffusion.html)) - """) - with gr.Row(): - step_slider = gr.Slider(minimum=1, maximum=200, value=40, label="Diffusion steps (values above 50 will take more than a minute)") - hide_layer_checkbox = gr.Checkbox(True, label="Hide second skin layer (helmets, hair, outerwear, ...), often noisy due to sparse training data") - generate_btn = gr.Button("Generate new Minecraft skins!") - - with gr.Row(): - #image_box = gr.Image(shape=(64,64), image_mode="RGBA" - image_html = [gr.HTML() for i in range(num_img)] - with gr.Row(): - image_blocks = [gr.Image(shape=(64,64), image_mode="RGBA") for i in range(num_img)] - - gr.Markdown(""" - Acknowledgements: - - denoising_diffusion_pytorch (for the diffusion model): [https://github.com/lucidrains/denoising-diffusion-pytorch](https://github.com/lucidrains/denoising-diffusion-pytorch) - - skinview3d (for the 3D Minecraft skin viewer): [https://github.com/bs-community/skinview3d](https://github.com/bs-community/skinview3d) - - 3dmoljs (for the inspiration on how to use JavaScript 3D viewers with gradio): [https://huggingface.co/blog/spaces_3dmoljs](https://huggingface.co/blog/spaces_3dmoljs) - """) - # assign the skin generating function to the button - wrapper = lambda num_steps, hide_layer:generate_image(diffusion, hide_layer=hide_layer, num_steps=num_steps, num_img=num_img) - generate_btn.click(fn=wrapper, inputs=[step_slider, hide_layer_checkbox], outputs=image_html+image_blocks) - # display default images at page load - demo.load(show_defaults, inputs=None, outputs=image_html+image_blocks) - demo.launch() diff --git a/spaces/Toritto/Genshin-impact-IA-project-v1/rmvpe.py b/spaces/Toritto/Genshin-impact-IA-project-v1/rmvpe.py deleted file mode 100644 index 3ad346141340e03bdbaa20121e1ed435bb3da57a..0000000000000000000000000000000000000000 --- a/spaces/Toritto/Genshin-impact-IA-project-v1/rmvpe.py +++ /dev/null @@ -1,432 +0,0 @@ -import sys, torch, numpy as np, traceback, pdb -import torch.nn as nn -from time import time as ttime -import torch.nn.functional as F - - -class BiGRU(nn.Module): - def __init__(self, input_features, hidden_features, num_layers): - super(BiGRU, self).__init__() - self.gru = nn.GRU( - input_features, - hidden_features, - num_layers=num_layers, - batch_first=True, - bidirectional=True, - ) - - def forward(self, x): - return self.gru(x)[0] - - -class ConvBlockRes(nn.Module): - def __init__(self, in_channels, out_channels, momentum=0.01): - super(ConvBlockRes, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - in_channels=in_channels, - out_channels=out_channels, - kernel_size=(3, 3), - stride=(1, 1), - padding=(1, 1), - bias=False, - ), - nn.BatchNorm2d(out_channels, momentum=momentum), - nn.ReLU(), - nn.Conv2d( - in_channels=out_channels, - out_channels=out_channels, - kernel_size=(3, 3), - stride=(1, 1), - padding=(1, 1), - bias=False, - ), - nn.BatchNorm2d(out_channels, momentum=momentum), - nn.ReLU(), - ) - if in_channels != out_channels: - self.shortcut = nn.Conv2d(in_channels, out_channels, (1, 1)) - self.is_shortcut = True - else: - self.is_shortcut = False - - def forward(self, x): - if self.is_shortcut: - return self.conv(x) + self.shortcut(x) - else: - return self.conv(x) + x - - -class Encoder(nn.Module): - def __init__( - self, - in_channels, - in_size, - n_encoders, - kernel_size, - n_blocks, - out_channels=16, - momentum=0.01, - ): - super(Encoder, self).__init__() - self.n_encoders = n_encoders - self.bn = nn.BatchNorm2d(in_channels, momentum=momentum) - self.layers = nn.ModuleList() - self.latent_channels = [] - for i in range(self.n_encoders): - self.layers.append( - ResEncoderBlock( - in_channels, out_channels, kernel_size, n_blocks, momentum=momentum - ) - ) - self.latent_channels.append([out_channels, in_size]) - in_channels = out_channels - out_channels *= 2 - in_size //= 2 - self.out_size = in_size - self.out_channel = out_channels - - def forward(self, x): - concat_tensors = [] - x = self.bn(x) - for i in range(self.n_encoders): - _, x = self.layers[i](x) - concat_tensors.append(_) - return x, concat_tensors - - -class ResEncoderBlock(nn.Module): - def __init__( - self, in_channels, out_channels, kernel_size, n_blocks=1, momentum=0.01 - ): - super(ResEncoderBlock, self).__init__() - self.n_blocks = n_blocks - self.conv = nn.ModuleList() - self.conv.append(ConvBlockRes(in_channels, out_channels, momentum)) - for i in range(n_blocks - 1): - self.conv.append(ConvBlockRes(out_channels, out_channels, momentum)) - self.kernel_size = kernel_size - if self.kernel_size is not None: - self.pool = nn.AvgPool2d(kernel_size=kernel_size) - - def forward(self, x): - for i in range(self.n_blocks): - x = self.conv[i](x) - if self.kernel_size is not None: - return x, self.pool(x) - else: - return x - - -class Intermediate(nn.Module): # - def __init__(self, in_channels, out_channels, n_inters, n_blocks, momentum=0.01): - super(Intermediate, self).__init__() - self.n_inters = n_inters - self.layers = nn.ModuleList() - self.layers.append( - ResEncoderBlock(in_channels, out_channels, None, n_blocks, momentum) - ) - for i in range(self.n_inters - 1): - self.layers.append( - ResEncoderBlock(out_channels, out_channels, None, n_blocks, momentum) - ) - - def forward(self, x): - for i in range(self.n_inters): - x = self.layers[i](x) - return x - - -class ResDecoderBlock(nn.Module): - def __init__(self, in_channels, out_channels, stride, n_blocks=1, momentum=0.01): - super(ResDecoderBlock, self).__init__() - out_padding = (0, 1) if stride == (1, 2) else (1, 1) - self.n_blocks = n_blocks - self.conv1 = nn.Sequential( - nn.ConvTranspose2d( - in_channels=in_channels, - out_channels=out_channels, - kernel_size=(3, 3), - stride=stride, - padding=(1, 1), - output_padding=out_padding, - bias=False, - ), - nn.BatchNorm2d(out_channels, momentum=momentum), - nn.ReLU(), - ) - self.conv2 = nn.ModuleList() - self.conv2.append(ConvBlockRes(out_channels * 2, out_channels, momentum)) - for i in range(n_blocks - 1): - self.conv2.append(ConvBlockRes(out_channels, out_channels, momentum)) - - def forward(self, x, concat_tensor): - x = self.conv1(x) - x = torch.cat((x, concat_tensor), dim=1) - for i in range(self.n_blocks): - x = self.conv2[i](x) - return x - - -class Decoder(nn.Module): - def __init__(self, in_channels, n_decoders, stride, n_blocks, momentum=0.01): - super(Decoder, self).__init__() - self.layers = nn.ModuleList() - self.n_decoders = n_decoders - for i in range(self.n_decoders): - out_channels = in_channels // 2 - self.layers.append( - ResDecoderBlock(in_channels, out_channels, stride, n_blocks, momentum) - ) - in_channels = out_channels - - def forward(self, x, concat_tensors): - for i in range(self.n_decoders): - x = self.layers[i](x, concat_tensors[-1 - i]) - return x - - -class DeepUnet(nn.Module): - def __init__( - self, - kernel_size, - n_blocks, - en_de_layers=5, - inter_layers=4, - in_channels=1, - en_out_channels=16, - ): - super(DeepUnet, self).__init__() - self.encoder = Encoder( - in_channels, 128, en_de_layers, kernel_size, n_blocks, en_out_channels - ) - self.intermediate = Intermediate( - self.encoder.out_channel // 2, - self.encoder.out_channel, - inter_layers, - n_blocks, - ) - self.decoder = Decoder( - self.encoder.out_channel, en_de_layers, kernel_size, n_blocks - ) - - def forward(self, x): - x, concat_tensors = self.encoder(x) - x = self.intermediate(x) - x = self.decoder(x, concat_tensors) - return x - - -class E2E(nn.Module): - def __init__( - self, - n_blocks, - n_gru, - kernel_size, - en_de_layers=5, - inter_layers=4, - in_channels=1, - en_out_channels=16, - ): - super(E2E, self).__init__() - self.unet = DeepUnet( - kernel_size, - n_blocks, - en_de_layers, - inter_layers, - in_channels, - en_out_channels, - ) - self.cnn = nn.Conv2d(en_out_channels, 3, (3, 3), padding=(1, 1)) - if n_gru: - self.fc = nn.Sequential( - BiGRU(3 * 128, 256, n_gru), - nn.Linear(512, 360), - nn.Dropout(0.25), - nn.Sigmoid(), - ) - else: - self.fc = nn.Sequential( - nn.Linear(3 * N_MELS, N_CLASS), nn.Dropout(0.25), nn.Sigmoid() - ) - - def forward(self, mel): - mel = mel.transpose(-1, -2).unsqueeze(1) - x = self.cnn(self.unet(mel)).transpose(1, 2).flatten(-2) - x = self.fc(x) - return x - - -from librosa.filters import mel - - -class MelSpectrogram(torch.nn.Module): - def __init__( - self, - is_half, - n_mel_channels, - sampling_rate, - win_length, - hop_length, - n_fft=None, - mel_fmin=0, - mel_fmax=None, - clamp=1e-5, - ): - super().__init__() - n_fft = win_length if n_fft is None else n_fft - self.hann_window = {} - mel_basis = mel( - sr=sampling_rate, - n_fft=n_fft, - n_mels=n_mel_channels, - fmin=mel_fmin, - fmax=mel_fmax, - htk=True, - ) - mel_basis = torch.from_numpy(mel_basis).float() - self.register_buffer("mel_basis", mel_basis) - self.n_fft = win_length if n_fft is None else n_fft - self.hop_length = hop_length - self.win_length = win_length - self.sampling_rate = sampling_rate - self.n_mel_channels = n_mel_channels - self.clamp = clamp - self.is_half = is_half - - def forward(self, audio, keyshift=0, speed=1, center=True): - factor = 2 ** (keyshift / 12) - n_fft_new = int(np.round(self.n_fft * factor)) - win_length_new = int(np.round(self.win_length * factor)) - hop_length_new = int(np.round(self.hop_length * speed)) - keyshift_key = str(keyshift) + "_" + str(audio.device) - if keyshift_key not in self.hann_window: - self.hann_window[keyshift_key] = torch.hann_window(win_length_new).to( - audio.device - ) - fft = torch.stft( - audio, - n_fft=n_fft_new, - hop_length=hop_length_new, - win_length=win_length_new, - window=self.hann_window[keyshift_key], - center=center, - return_complex=True, - ) - magnitude = torch.sqrt(fft.real.pow(2) + fft.imag.pow(2)) - if keyshift != 0: - size = self.n_fft // 2 + 1 - resize = magnitude.size(1) - if resize < size: - magnitude = F.pad(magnitude, (0, 0, 0, size - resize)) - magnitude = magnitude[:, :size, :] * self.win_length / win_length_new - mel_output = torch.matmul(self.mel_basis, magnitude) - if self.is_half == True: - mel_output = mel_output.half() - log_mel_spec = torch.log(torch.clamp(mel_output, min=self.clamp)) - return log_mel_spec - - -class RMVPE: - def __init__(self, model_path, is_half, device=None): - self.resample_kernel = {} - model = E2E(4, 1, (2, 2)) - ckpt = torch.load(model_path, map_location="cpu") - model.load_state_dict(ckpt) - model.eval() - if is_half == True: - model = model.half() - self.model = model - self.resample_kernel = {} - self.is_half = is_half - if device is None: - device = "cuda" if torch.cuda.is_available() else "cpu" - self.device = device - self.mel_extractor = MelSpectrogram( - is_half, 128, 16000, 1024, 160, None, 30, 8000 - ).to(device) - self.model = self.model.to(device) - cents_mapping = 20 * np.arange(360) + 1997.3794084376191 - self.cents_mapping = np.pad(cents_mapping, (4, 4)) # 368 - - def mel2hidden(self, mel): - with torch.no_grad(): - n_frames = mel.shape[-1] - mel = F.pad( - mel, (0, 32 * ((n_frames - 1) // 32 + 1) - n_frames), mode="reflect" - ) - hidden = self.model(mel) - return hidden[:, :n_frames] - - def decode(self, hidden, thred=0.03): - cents_pred = self.to_local_average_cents(hidden, thred=thred) - f0 = 10 * (2 ** (cents_pred / 1200)) - f0[f0 == 10] = 0 - # f0 = np.array([10 * (2 ** (cent_pred / 1200)) if cent_pred else 0 for cent_pred in cents_pred]) - return f0 - - def infer_from_audio(self, audio, thred=0.03): - audio = torch.from_numpy(audio).float().to(self.device).unsqueeze(0) - # torch.cuda.synchronize() - # t0=ttime() - mel = self.mel_extractor(audio, center=True) - # torch.cuda.synchronize() - # t1=ttime() - hidden = self.mel2hidden(mel) - # torch.cuda.synchronize() - # t2=ttime() - hidden = hidden.squeeze(0).cpu().numpy() - if self.is_half == True: - hidden = hidden.astype("float32") - f0 = self.decode(hidden, thred=thred) - # torch.cuda.synchronize() - # t3=ttime() - # print("hmvpe:%s\t%s\t%s\t%s"%(t1-t0,t2-t1,t3-t2,t3-t0)) - return f0 - - def to_local_average_cents(self, salience, thred=0.05): - # t0 = ttime() - center = np.argmax(salience, axis=1) # 帧长#index - salience = np.pad(salience, ((0, 0), (4, 4))) # 帧长,368 - # t1 = ttime() - center += 4 - todo_salience = [] - todo_cents_mapping = [] - starts = center - 4 - ends = center + 5 - for idx in range(salience.shape[0]): - todo_salience.append(salience[:, starts[idx] : ends[idx]][idx]) - todo_cents_mapping.append(self.cents_mapping[starts[idx] : ends[idx]]) - # t2 = ttime() - todo_salience = np.array(todo_salience) # 帧长,9 - todo_cents_mapping = np.array(todo_cents_mapping) # 帧长,9 - product_sum = np.sum(todo_salience * todo_cents_mapping, 1) - weight_sum = np.sum(todo_salience, 1) # 帧长 - devided = product_sum / weight_sum # 帧长 - # t3 = ttime() - maxx = np.max(salience, axis=1) # 帧长 - devided[maxx <= thred] = 0 - # t4 = ttime() - # print("decode:%s\t%s\t%s\t%s" % (t1 - t0, t2 - t1, t3 - t2, t4 - t3)) - return devided - - -# if __name__ == '__main__': -# audio, sampling_rate = sf.read("卢本伟语录~1.wav") -# if len(audio.shape) > 1: -# audio = librosa.to_mono(audio.transpose(1, 0)) -# audio_bak = audio.copy() -# if sampling_rate != 16000: -# audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000) -# model_path = "/bili-coeus/jupyter/jupyterhub-liujing04/vits_ch/test-RMVPE/weights/rmvpe_llc_half.pt" -# thred = 0.03 # 0.01 -# device = 'cuda' if torch.cuda.is_available() else 'cpu' -# rmvpe = RMVPE(model_path,is_half=False, device=device) -# t0=ttime() -# f0 = rmvpe.infer_from_audio(audio, thred=thred) -# f0 = rmvpe.infer_from_audio(audio, thred=thred) -# f0 = rmvpe.infer_from_audio(audio, thred=thred) -# f0 = rmvpe.infer_from_audio(audio, thred=thred) -# f0 = rmvpe.infer_from_audio(audio, thred=thred) -# t1=ttime() -# print(f0.shape,t1-t0) diff --git a/spaces/VIPLab/Track-Anything/tracker/inference/inference_core.py b/spaces/VIPLab/Track-Anything/tracker/inference/inference_core.py deleted file mode 100644 index e77f0805e30d3967265ed458dd7357e65a20c24f..0000000000000000000000000000000000000000 --- a/spaces/VIPLab/Track-Anything/tracker/inference/inference_core.py +++ /dev/null @@ -1,115 +0,0 @@ -from inference.memory_manager import MemoryManager -from model.network import XMem -from model.aggregate import aggregate - -from tracker.util.tensor_util import pad_divide_by, unpad - - -class InferenceCore: - def __init__(self, network:XMem, config): - self.config = config - self.network = network - self.mem_every = config['mem_every'] - self.deep_update_every = config['deep_update_every'] - self.enable_long_term = config['enable_long_term'] - - # if deep_update_every < 0, synchronize deep update with memory frame - self.deep_update_sync = (self.deep_update_every < 0) - - self.clear_memory() - self.all_labels = None - - def clear_memory(self): - self.curr_ti = -1 - self.last_mem_ti = 0 - if not self.deep_update_sync: - self.last_deep_update_ti = -self.deep_update_every - self.memory = MemoryManager(config=self.config) - - def update_config(self, config): - self.mem_every = config['mem_every'] - self.deep_update_every = config['deep_update_every'] - self.enable_long_term = config['enable_long_term'] - - # if deep_update_every < 0, synchronize deep update with memory frame - self.deep_update_sync = (self.deep_update_every < 0) - self.memory.update_config(config) - - def set_all_labels(self, all_labels): - # self.all_labels = [l.item() for l in all_labels] - self.all_labels = all_labels - - def step(self, image, mask=None, valid_labels=None, end=False): - # image: 3*H*W - # mask: num_objects*H*W or None - self.curr_ti += 1 - image, self.pad = pad_divide_by(image, 16) - image = image.unsqueeze(0) # add the batch dimension - - is_mem_frame = ((self.curr_ti-self.last_mem_ti >= self.mem_every) or (mask is not None)) and (not end) - need_segment = (self.curr_ti > 0) and ((valid_labels is None) or (len(self.all_labels) != len(valid_labels))) - is_deep_update = ( - (self.deep_update_sync and is_mem_frame) or # synchronized - (not self.deep_update_sync and self.curr_ti-self.last_deep_update_ti >= self.deep_update_every) # no-sync - ) and (not end) - is_normal_update = (not self.deep_update_sync or not is_deep_update) and (not end) - - key, shrinkage, selection, f16, f8, f4 = self.network.encode_key(image, - need_ek=(self.enable_long_term or need_segment), - need_sk=is_mem_frame) - multi_scale_features = (f16, f8, f4) - - # segment the current frame is needed - if need_segment: - memory_readout = self.memory.match_memory(key, selection).unsqueeze(0) - - hidden, pred_logits_with_bg, pred_prob_with_bg = self.network.segment(multi_scale_features, memory_readout, - self.memory.get_hidden(), h_out=is_normal_update, strip_bg=False) - # remove batch dim - pred_prob_with_bg = pred_prob_with_bg[0] - pred_prob_no_bg = pred_prob_with_bg[1:] - - pred_logits_with_bg = pred_logits_with_bg[0] - pred_logits_no_bg = pred_logits_with_bg[1:] - - if is_normal_update: - self.memory.set_hidden(hidden) - else: - pred_prob_no_bg = pred_prob_with_bg = pred_logits_with_bg = pred_logits_no_bg = None - - # use the input mask if any - if mask is not None: - mask, _ = pad_divide_by(mask, 16) - - if pred_prob_no_bg is not None: - # if we have a predicted mask, we work on it - # make pred_prob_no_bg consistent with the input mask - mask_regions = (mask.sum(0) > 0.5) - pred_prob_no_bg[:, mask_regions] = 0 - # shift by 1 because mask/pred_prob_no_bg do not contain background - mask = mask.type_as(pred_prob_no_bg) - if valid_labels is not None: - shift_by_one_non_labels = [i for i in range(pred_prob_no_bg.shape[0]) if (i+1) not in valid_labels] - # non-labelled objects are copied from the predicted mask - mask[shift_by_one_non_labels] = pred_prob_no_bg[shift_by_one_non_labels] - pred_prob_with_bg = aggregate(mask, dim=0) - - # also create new hidden states - self.memory.create_hidden_state(len(self.all_labels), key) - - # save as memory if needed - if is_mem_frame: - value, hidden = self.network.encode_value(image, f16, self.memory.get_hidden(), - pred_prob_with_bg[1:].unsqueeze(0), is_deep_update=is_deep_update) - self.memory.add_memory(key, shrinkage, value, self.all_labels, - selection=selection if self.enable_long_term else None) - self.last_mem_ti = self.curr_ti - - if is_deep_update: - self.memory.set_hidden(hidden) - self.last_deep_update_ti = self.curr_ti - - if pred_logits_with_bg is None: - return unpad(pred_prob_with_bg, self.pad), None - else: - return unpad(pred_prob_with_bg, self.pad), unpad(pred_logits_with_bg, self.pad) diff --git a/spaces/Visgift/nyami/app.py b/spaces/Visgift/nyami/app.py deleted file mode 100644 index eec8e91f6d20fa0211e98f8053f295a26e1f0622..0000000000000000000000000000000000000000 --- a/spaces/Visgift/nyami/app.py +++ /dev/null @@ -1,17 +0,0 @@ -from engine import SentimentAnalyzer -import streamlit as st - - -# Load the sentiment analysis model from Hugging Face -sentiment_analysis = SentimentAnalyzer() - -# Define the Streamlit app interface -st.title("User Sentiment Analysis") - -sentence = st.text_input("Enter a sentence:") - -# Perform sentiment analysis on the input sentence -if sentence: - label = sentiment_analysis.get_sentiment(sentence) - # Display the sentiment analysis result to the user - st.write(f"Sentiment analysis result: {label}") \ No newline at end of file diff --git a/spaces/Vision-CAIR/MiniGPT-v2/minigpt4/tasks/base_task.py b/spaces/Vision-CAIR/MiniGPT-v2/minigpt4/tasks/base_task.py deleted file mode 100644 index 7ceee96bdf520f8d730651e815defd83b7ecfebb..0000000000000000000000000000000000000000 --- a/spaces/Vision-CAIR/MiniGPT-v2/minigpt4/tasks/base_task.py +++ /dev/null @@ -1,286 +0,0 @@ -""" - Copyright (c) 2022, salesforce.com, inc. - All rights reserved. - SPDX-License-Identifier: BSD-3-Clause - For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause -""" - -import logging -import os - -import torch -import torch.distributed as dist -from minigpt4.common.dist_utils import get_rank, get_world_size, is_main_process, is_dist_avail_and_initialized -from minigpt4.common.logger import MetricLogger, SmoothedValue -from minigpt4.common.registry import registry -from minigpt4.datasets.data_utils import prepare_sample - - -class BaseTask: - def __init__(self, **kwargs): - super().__init__() - - self.inst_id_key = "instance_id" - - @classmethod - def setup_task(cls, **kwargs): - return cls() - - def build_model(self, cfg): - model_config = cfg.model_cfg - - model_cls = registry.get_model_class(model_config.arch) - return model_cls.from_config(model_config) - - def build_datasets(self, cfg): - """ - Build a dictionary of datasets, keyed by split 'train', 'valid', 'test'. - Download dataset and annotations automatically if not exist. - - Args: - cfg (common.config.Config): _description_ - - Returns: - dict: Dictionary of torch.utils.data.Dataset objects by split. - """ - - datasets = dict() - - datasets_config = cfg.datasets_cfg - - assert len(datasets_config) > 0, "At least one dataset has to be specified." - - for name in datasets_config: - dataset_config = datasets_config[name] - - builder = registry.get_builder_class(name)(dataset_config) - dataset = builder.build_datasets() - - dataset['train'].name = name - if 'sample_ratio' in dataset_config: - dataset['train'].sample_ratio = dataset_config.sample_ratio - - datasets[name] = dataset - - return datasets - - def train_step(self, model, samples): - loss = model(samples)["loss"] - return loss - - def valid_step(self, model, samples): - raise NotImplementedError - - def before_evaluation(self, model, dataset, **kwargs): - model.before_evaluation(dataset=dataset, task_type=type(self)) - - def after_evaluation(self, **kwargs): - pass - - def inference_step(self): - raise NotImplementedError - - def evaluation(self, model, data_loader, cuda_enabled=True): - metric_logger = MetricLogger(delimiter=" ") - header = "Evaluation" - # TODO make it configurable - print_freq = 10 - - results = [] - - for samples in metric_logger.log_every(data_loader, print_freq, header): - samples = prepare_sample(samples, cuda_enabled=cuda_enabled) - - eval_output = self.valid_step(model=model, samples=samples) - results.extend(eval_output) - - if is_dist_avail_and_initialized(): - dist.barrier() - - return results - - def train_epoch( - self, - epoch, - model, - data_loader, - optimizer, - lr_scheduler, - scaler=None, - cuda_enabled=False, - log_freq=50, - accum_grad_iters=1, - ): - return self._train_inner_loop( - epoch=epoch, - iters_per_epoch=lr_scheduler.iters_per_epoch, - model=model, - data_loader=data_loader, - optimizer=optimizer, - scaler=scaler, - lr_scheduler=lr_scheduler, - log_freq=log_freq, - cuda_enabled=cuda_enabled, - accum_grad_iters=accum_grad_iters, - ) - - def train_iters( - self, - epoch, - start_iters, - iters_per_inner_epoch, - model, - data_loader, - optimizer, - lr_scheduler, - scaler=None, - cuda_enabled=False, - log_freq=50, - accum_grad_iters=1, - ): - return self._train_inner_loop( - epoch=epoch, - start_iters=start_iters, - iters_per_epoch=iters_per_inner_epoch, - model=model, - data_loader=data_loader, - optimizer=optimizer, - scaler=scaler, - lr_scheduler=lr_scheduler, - log_freq=log_freq, - cuda_enabled=cuda_enabled, - accum_grad_iters=accum_grad_iters, - ) - - def _train_inner_loop( - self, - epoch, - iters_per_epoch, - model, - data_loader, - optimizer, - lr_scheduler, - scaler=None, - start_iters=None, - log_freq=50, - cuda_enabled=False, - accum_grad_iters=1, - ): - """ - An inner training loop compatible with both epoch-based and iter-based training. - - When using epoch-based, training stops after one epoch; when using iter-based, - training stops after #iters_per_epoch iterations. - """ - use_amp = scaler is not None - - if not hasattr(data_loader, "__next__"): - # convert to iterator if not already - data_loader = iter(data_loader) - - metric_logger = MetricLogger(delimiter=" ") - metric_logger.add_meter("lr", SmoothedValue(window_size=1, fmt="{value:.6f}")) - metric_logger.add_meter("loss", SmoothedValue(window_size=1, fmt="{value:.4f}")) - - # if iter-based runner, schedule lr based on inner epoch. - logging.info( - "Start training epoch {}, {} iters per inner epoch.".format( - epoch, iters_per_epoch - ) - ) - header = "Train: data epoch: [{}]".format(epoch) - if start_iters is None: - # epoch-based runner - inner_epoch = epoch - else: - # In iter-based runner, we schedule the learning rate based on iterations. - inner_epoch = start_iters // iters_per_epoch - header = header + "; inner epoch [{}]".format(inner_epoch) - - for i in metric_logger.log_every(range(iters_per_epoch), log_freq, header): - # if using iter-based runner, we stop after iters_per_epoch iterations. - if i >= iters_per_epoch: - break - - samples = next(data_loader) - - samples = prepare_sample(samples, cuda_enabled=cuda_enabled) - samples.update( - { - "epoch": inner_epoch, - "num_iters_per_epoch": iters_per_epoch, - "iters": i, - } - ) - - lr_scheduler.step(cur_epoch=inner_epoch, cur_step=i) - - with torch.cuda.amp.autocast(enabled=use_amp): - loss = self.train_step(model=model, samples=samples) - - # after_train_step() - if use_amp: - scaler.scale(loss).backward() - else: - loss.backward() - - # update gradients every accum_grad_iters iterations - if (i + 1) % accum_grad_iters == 0: - if use_amp: - scaler.step(optimizer) - scaler.update() - else: - optimizer.step() - optimizer.zero_grad() - - metric_logger.update(loss=loss.item()) - metric_logger.update(lr=optimizer.param_groups[0]["lr"]) - - # after train_epoch() - # gather the stats from all processes - metric_logger.synchronize_between_processes() - logging.info("Averaged stats: " + str(metric_logger.global_avg())) - return { - k: "{:.3f}".format(meter.global_avg) - for k, meter in metric_logger.meters.items() - } - - @staticmethod - def save_result(result, result_dir, filename, remove_duplicate=""): - import json - - result_file = os.path.join( - result_dir, "%s_rank%d.json" % (filename, get_rank()) - ) - final_result_file = os.path.join(result_dir, "%s.json" % filename) - - json.dump(result, open(result_file, "w")) - - if is_dist_avail_and_initialized(): - dist.barrier() - - if is_main_process(): - logging.warning("rank %d starts merging results." % get_rank()) - # combine results from all processes - result = [] - - for rank in range(get_world_size()): - result_file = os.path.join( - result_dir, "%s_rank%d.json" % (filename, rank) - ) - res = json.load(open(result_file, "r")) - result += res - - if remove_duplicate: - result_new = [] - id_list = [] - for res in result: - if res[remove_duplicate] not in id_list: - id_list.append(res[remove_duplicate]) - result_new.append(res) - result = result_new - - json.dump(result, open(final_result_file, "w")) - print("result file saved to %s" % final_result_file) - - return final_result_file diff --git a/spaces/Vision-CAIR/MiniGPT-v2/minigpt4/tasks/image_text_pretrain.py b/spaces/Vision-CAIR/MiniGPT-v2/minigpt4/tasks/image_text_pretrain.py deleted file mode 100644 index bbe8ec83a5dc95ee26a36e457feb394d18b7cd17..0000000000000000000000000000000000000000 --- a/spaces/Vision-CAIR/MiniGPT-v2/minigpt4/tasks/image_text_pretrain.py +++ /dev/null @@ -1,18 +0,0 @@ -""" - Copyright (c) 2022, salesforce.com, inc. - All rights reserved. - SPDX-License-Identifier: BSD-3-Clause - For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause -""" - -from minigpt4.common.registry import registry -from minigpt4.tasks.base_task import BaseTask - - -@registry.register_task("image_text_pretrain") -class ImageTextPretrainTask(BaseTask): - def __init__(self): - super().__init__() - - def evaluation(self, model, data_loader, cuda_enabled=True): - pass diff --git a/spaces/Vrk/SeeFood/FoodNoFood.py b/spaces/Vrk/SeeFood/FoodNoFood.py deleted file mode 100644 index 8b41b3a834ec74715ce6ba346ec98408ffde289e..0000000000000000000000000000000000000000 --- a/spaces/Vrk/SeeFood/FoodNoFood.py +++ /dev/null @@ -1,16 +0,0 @@ -from PIL import Image -import requests - -from transformers import CLIPProcessor, CLIPModel - -def food_not_food(input_image): - model = CLIPModel.from_pretrained("flax-community/clip-rsicd-v2") - processor = CLIPProcessor.from_pretrained("flax-community/clip-rsicd-v2") - - labels = ["food", "not food"] - inputs = processor(text=[f"a photo of a {l}" for l in labels], images=input_image, return_tensors="pt", padding=True) - - outputs = model(**inputs) - logits_per_image = outputs.logits_per_image - prob = logits_per_image.softmax(dim=1).detach().cpu().numpy().argmax(axis=1) - return labels[prob[0]] \ No newline at end of file diff --git a/spaces/Vrk/SkimLit/setup.sh b/spaces/Vrk/SkimLit/setup.sh deleted file mode 100644 index d8f97044be8894928d03fa6fe7e79af09be7edba..0000000000000000000000000000000000000000 --- a/spaces/Vrk/SkimLit/setup.sh +++ /dev/null @@ -1,11 +0,0 @@ -mkdir -p ~/.streamlit/ -echo "\ -[general]\n\ -email = \"your-email@domain.com\"\n\ -" > ~/.streamlit/credentials.toml -echo "\ -[server]\n\ -headless = true\n\ -enableCORS=false\n\ -port = $PORT\n\ -" > ~/.streamlit/config.toml diff --git a/spaces/WZUN666/vits-uma-genshin-honkai/utils.py b/spaces/WZUN666/vits-uma-genshin-honkai/utils.py deleted file mode 100644 index ee4b01ddfbe8173965371b29f770f3e87615fe71..0000000000000000000000000000000000000000 --- a/spaces/WZUN666/vits-uma-genshin-honkai/utils.py +++ /dev/null @@ -1,225 +0,0 @@ -import os -import sys -import argparse -import logging -import json -import subprocess -import numpy as np -import librosa -import torch - -MATPLOTLIB_FLAG = False - -logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) -logger = logging - - -def load_checkpoint(checkpoint_path, model, optimizer=None): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') - iteration = checkpoint_dict['iteration'] - learning_rate = checkpoint_dict['learning_rate'] - if optimizer is not None: - optimizer.load_state_dict(checkpoint_dict['optimizer']) - saved_state_dict = checkpoint_dict['model'] - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict= {} - for k, v in state_dict.items(): - try: - new_state_dict[k] = saved_state_dict[k] - except: - logger.info("%s is not in the checkpoint" % k) - new_state_dict[k] = v - if hasattr(model, 'module'): - model.module.load_state_dict(new_state_dict) - else: - model.load_state_dict(new_state_dict) - logger.info("Loaded checkpoint '{}' (iteration {})" .format( - checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration - - -def plot_spectrogram_to_numpy(spectrogram): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(10,2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - plt.xlabel("Frames") - plt.ylabel("Channels") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def plot_alignment_to_numpy(alignment, info=None): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(6, 4)) - im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower', - interpolation='none') - fig.colorbar(im, ax=ax) - xlabel = 'Decoder timestep' - if info is not None: - xlabel += '\n\n' + info - plt.xlabel(xlabel) - plt.ylabel('Encoder timestep') - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def load_audio_to_torch(full_path, target_sampling_rate): - audio, sampling_rate = librosa.load(full_path, sr=target_sampling_rate, mono=True) - return torch.FloatTensor(audio.astype(np.float32)) - - -def load_filepaths_and_text(filename, split="|"): - with open(filename, encoding='utf-8') as f: - filepaths_and_text = [line.strip().split(split) for line in f] - return filepaths_and_text - - -def get_hparams(init=True): - parser = argparse.ArgumentParser() - parser.add_argument('-c', '--config', type=str, default="./configs/base.json", - help='JSON file for configuration') - parser.add_argument('-m', '--model', type=str, required=True, - help='Model name') - - args = parser.parse_args() - model_dir = os.path.join("./logs", args.model) - - if not os.path.exists(model_dir): - os.makedirs(model_dir) - - config_path = args.config - config_save_path = os.path.join(model_dir, "config.json") - if init: - with open(config_path, "r") as f: - data = f.read() - with open(config_save_path, "w") as f: - f.write(data) - else: - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_dir(model_dir): - config_save_path = os.path.join(model_dir, "config.json") - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams =HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_file(config_path): - with open(config_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams =HParams(**config) - return hparams - - -def check_git_hash(model_dir): - source_dir = os.path.dirname(os.path.realpath(__file__)) - if not os.path.exists(os.path.join(source_dir, ".git")): - logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format( - source_dir - )) - return - - cur_hash = subprocess.getoutput("git rev-parse HEAD") - - path = os.path.join(model_dir, "githash") - if os.path.exists(path): - saved_hash = open(path).read() - if saved_hash != cur_hash: - logger.warn("git hash values are different. {}(saved) != {}(current)".format( - saved_hash[:8], cur_hash[:8])) - else: - open(path, "w").write(cur_hash) - - -def get_logger(model_dir, filename="train.log"): - global logger - logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.DEBUG) - - formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") - if not os.path.exists(model_dir): - os.makedirs(model_dir) - h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.DEBUG) - h.setFormatter(formatter) - logger.addHandler(h) - return logger - - -class HParams(): - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() diff --git a/spaces/XlalalaX/VITS-Umamusume-voice-synthesizer/monotonic_align/setup.py b/spaces/XlalalaX/VITS-Umamusume-voice-synthesizer/monotonic_align/setup.py deleted file mode 100644 index 30c224807a70faa9df9c9eb75f8e80c8c867b16b..0000000000000000000000000000000000000000 --- a/spaces/XlalalaX/VITS-Umamusume-voice-synthesizer/monotonic_align/setup.py +++ /dev/null @@ -1,9 +0,0 @@ -from distutils.core import setup -from Cython.Build import cythonize -import numpy - -setup( - name = 'monotonic_align', - ext_modules = cythonize("core.pyx"), - include_dirs=[numpy.get_include()] -) diff --git a/spaces/XuebaoDingZhen/YOLOv50.0.1/hubconf.py b/spaces/XuebaoDingZhen/YOLOv50.0.1/hubconf.py deleted file mode 100644 index f0192698fbe39f463e21a3092230258565cc7e0f..0000000000000000000000000000000000000000 --- a/spaces/XuebaoDingZhen/YOLOv50.0.1/hubconf.py +++ /dev/null @@ -1,169 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license -""" -PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5 - -Usage: - import torch - model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # official model - model = torch.hub.load('ultralytics/yolov5:master', 'yolov5s') # from branch - model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s.pt') # custom/local model - model = torch.hub.load('.', 'custom', 'yolov5s.pt', source='local') # local repo -""" - -import torch - - -def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): - """Creates or loads a YOLOv5 model - - Arguments: - name (str): model name 'yolov5s' or path 'path/to/best.pt' - pretrained (bool): load pretrained weights into the model - channels (int): number of input channels - classes (int): number of model classes - autoshape (bool): apply YOLOv5 .autoshape() wrapper to model - verbose (bool): print all information to screen - device (str, torch.device, None): device to use for model parameters - - Returns: - YOLOv5 model - """ - from pathlib import Path - - from models.common import AutoShape, DetectMultiBackend - from models.experimental import attempt_load - from models.yolo import ClassificationModel, DetectionModel, SegmentationModel - from utils.downloads import attempt_download - from utils.general import LOGGER, ROOT, check_requirements, intersect_dicts, logging - from utils.torch_utils import select_device - - if not verbose: - LOGGER.setLevel(logging.WARNING) - check_requirements(ROOT / 'requirements.txt', exclude=('opencv-python', 'tensorboard', 'thop')) - name = Path(name) - path = name.with_suffix('.pt') if name.suffix == '' and not name.is_dir() else name # checkpoint path - try: - device = select_device(device) - if pretrained and channels == 3 and classes == 80: - try: - model = DetectMultiBackend(path, device=device, fuse=autoshape) # detection model - if autoshape: - if model.pt and isinstance(model.model, ClassificationModel): - LOGGER.warning('WARNING ⚠️ YOLOv5 ClassificationModel is not yet AutoShape compatible. ' - 'You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224).') - elif model.pt and isinstance(model.model, SegmentationModel): - LOGGER.warning('WARNING ⚠️ YOLOv5 SegmentationModel is not yet AutoShape compatible. ' - 'You will not be able to run inference with this model.') - else: - model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS - except Exception: - model = attempt_load(path, device=device, fuse=False) # arbitrary model - else: - cfg = list((Path(__file__).parent / 'models').rglob(f'{path.stem}.yaml'))[0] # model.yaml path - model = DetectionModel(cfg, channels, classes) # create model - if pretrained: - ckpt = torch.load(attempt_download(path), map_location=device) # load - csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 - csd = intersect_dicts(csd, model.state_dict(), exclude=['anchors']) # intersect - model.load_state_dict(csd, strict=False) # load - if len(ckpt['model'].names) == classes: - model.names = ckpt['model'].names # set class names attribute - if not verbose: - LOGGER.setLevel(logging.INFO) # reset to default - return model.to(device) - - except Exception as e: - help_url = 'https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading' - s = f'{e}. Cache may be out of date, try `force_reload=True` or see {help_url} for help.' - raise Exception(s) from e - - -def custom(path='path/to/model.pt', autoshape=True, _verbose=True, device=None): - # YOLOv5 custom or local model - return _create(path, autoshape=autoshape, verbose=_verbose, device=device) - - -def yolov5n(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-nano model https://github.com/ultralytics/yolov5 - return _create('yolov5n', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-small model https://github.com/ultralytics/yolov5 - return _create('yolov5s', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-medium model https://github.com/ultralytics/yolov5 - return _create('yolov5m', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-large model https://github.com/ultralytics/yolov5 - return _create('yolov5l', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-xlarge model https://github.com/ultralytics/yolov5 - return _create('yolov5x', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5n6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-nano-P6 model https://github.com/ultralytics/yolov5 - return _create('yolov5n6', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-small-P6 model https://github.com/ultralytics/yolov5 - return _create('yolov5s6', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-medium-P6 model https://github.com/ultralytics/yolov5 - return _create('yolov5m6', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-large-P6 model https://github.com/ultralytics/yolov5 - return _create('yolov5l6', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-xlarge-P6 model https://github.com/ultralytics/yolov5 - return _create('yolov5x6', pretrained, channels, classes, autoshape, _verbose, device) - - -if __name__ == '__main__': - import argparse - from pathlib import Path - - import numpy as np - from PIL import Image - - from utils.general import cv2, print_args - - # Argparser - parser = argparse.ArgumentParser() - parser.add_argument('--model', type=str, default='yolov5s', help='model name') - opt = parser.parse_args() - print_args(vars(opt)) - - # Model - model = _create(name=opt.model, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True) - # model = custom(path='path/to/model.pt') # custom - - # Images - imgs = [ - 'data/images/zidane.jpg', # filename - Path('data/images/zidane.jpg'), # Path - 'https://ultralytics.com/images/zidane.jpg', # URI - cv2.imread('data/images/bus.jpg')[:, :, ::-1], # OpenCV - Image.open('data/images/bus.jpg'), # PIL - np.zeros((320, 640, 3))] # numpy - - # Inference - results = model(imgs, size=320) # batched inference - - # Results - results.print() - results.save() diff --git a/spaces/XzJosh/Ava-Bert-VITS2/resample.py b/spaces/XzJosh/Ava-Bert-VITS2/resample.py deleted file mode 100644 index 2ed1685654a371c5722168e9987809b05b1cb224..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/Ava-Bert-VITS2/resample.py +++ /dev/null @@ -1,42 +0,0 @@ -import os -import argparse -import librosa -import numpy as np -from multiprocessing import Pool, cpu_count - -import soundfile -from scipy.io import wavfile -from tqdm import tqdm - - -def process(item): - spkdir, wav_name, args = item - speaker = spkdir.replace("\\", "/").split("/")[-1] - wav_path = os.path.join(args.in_dir, speaker, wav_name) - if os.path.exists(wav_path) and '.wav' in wav_path: - os.makedirs(os.path.join(args.out_dir, speaker), exist_ok=True) - wav, sr = librosa.load(wav_path, sr=args.sr) - soundfile.write( - os.path.join(args.out_dir, speaker, wav_name), - wav, - sr - ) - - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--sr", type=int, default=44100, help="sampling rate") - parser.add_argument("--in_dir", type=str, default="./raw", help="path to source dir") - parser.add_argument("--out_dir", type=str, default="./dataset", help="path to target dir") - args = parser.parse_args() - # processs = 8 - processs = cpu_count()-2 if cpu_count() >4 else 1 - pool = Pool(processes=processs) - - for speaker in os.listdir(args.in_dir): - spk_dir = os.path.join(args.in_dir, speaker) - if os.path.isdir(spk_dir): - print(spk_dir) - for _ in tqdm(pool.imap_unordered(process, [(spk_dir, i, args) for i in os.listdir(spk_dir) if i.endswith("wav")])): - pass diff --git a/spaces/XzJosh/otto-Bert-VITS2/utils.py b/spaces/XzJosh/otto-Bert-VITS2/utils.py deleted file mode 100644 index c6aa6cfc64c33e2eed33e9845239e831fc1c4a1a..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/otto-Bert-VITS2/utils.py +++ /dev/null @@ -1,293 +0,0 @@ -import os -import glob -import sys -import argparse -import logging -import json -import subprocess -import numpy as np -from scipy.io.wavfile import read -import torch - -MATPLOTLIB_FLAG = False - -logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) -logger = logging - - -def load_checkpoint(checkpoint_path, model, optimizer=None, skip_optimizer=False): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') - iteration = checkpoint_dict['iteration'] - learning_rate = checkpoint_dict['learning_rate'] - if optimizer is not None and not skip_optimizer and checkpoint_dict['optimizer'] is not None: - optimizer.load_state_dict(checkpoint_dict['optimizer']) - elif optimizer is None and not skip_optimizer: - #else: #Disable this line if Infer ,and enable the line upper - new_opt_dict = optimizer.state_dict() - new_opt_dict_params = new_opt_dict['param_groups'][0]['params'] - new_opt_dict['param_groups'] = checkpoint_dict['optimizer']['param_groups'] - new_opt_dict['param_groups'][0]['params'] = new_opt_dict_params - optimizer.load_state_dict(new_opt_dict) - saved_state_dict = checkpoint_dict['model'] - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict = {} - for k, v in state_dict.items(): - try: - #assert "emb_g" not in k - # print("load", k) - new_state_dict[k] = saved_state_dict[k] - assert saved_state_dict[k].shape == v.shape, (saved_state_dict[k].shape, v.shape) - except: - print("error, %s is not in the checkpoint" % k) - new_state_dict[k] = v - if hasattr(model, 'module'): - model.module.load_state_dict(new_state_dict, strict=False) - else: - model.load_state_dict(new_state_dict, strict=False) - print("load ") - logger.info("Loaded checkpoint '{}' (iteration {})".format( - checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration - - -def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path): - logger.info("Saving model and optimizer state at iteration {} to {}".format( - iteration, checkpoint_path)) - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - torch.save({'model': state_dict, - 'iteration': iteration, - 'optimizer': optimizer.state_dict(), - 'learning_rate': learning_rate}, checkpoint_path) - - -def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050): - for k, v in scalars.items(): - writer.add_scalar(k, v, global_step) - for k, v in histograms.items(): - writer.add_histogram(k, v, global_step) - for k, v in images.items(): - writer.add_image(k, v, global_step, dataformats='HWC') - for k, v in audios.items(): - writer.add_audio(k, v, global_step, audio_sampling_rate) - - -def latest_checkpoint_path(dir_path, regex="G_*.pth"): - f_list = glob.glob(os.path.join(dir_path, regex)) - f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f)))) - x = f_list[-1] - print(x) - return x - - -def plot_spectrogram_to_numpy(spectrogram): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(10, 2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - plt.xlabel("Frames") - plt.ylabel("Channels") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def plot_alignment_to_numpy(alignment, info=None): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(6, 4)) - im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower', - interpolation='none') - fig.colorbar(im, ax=ax) - xlabel = 'Decoder timestep' - if info is not None: - xlabel += '\n\n' + info - plt.xlabel(xlabel) - plt.ylabel('Encoder timestep') - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def load_wav_to_torch(full_path): - sampling_rate, data = read(full_path) - return torch.FloatTensor(data.astype(np.float32)), sampling_rate - - -def load_filepaths_and_text(filename, split="|"): - with open(filename, encoding='utf-8') as f: - filepaths_and_text = [line.strip().split(split) for line in f] - return filepaths_and_text - - -def get_hparams(init=True): - parser = argparse.ArgumentParser() - parser.add_argument('-c', '--config', type=str, default="./configs/base.json", - help='JSON file for configuration') - parser.add_argument('-m', '--model', type=str, default="./OUTPUT_MODEL", - help='Model name') - parser.add_argument('--cont', dest='cont', action="store_true", default=False, help="whether to continue training on the latest checkpoint") - - args = parser.parse_args() - model_dir = os.path.join("./logs", args.model) - - if not os.path.exists(model_dir): - os.makedirs(model_dir) - - config_path = args.config - config_save_path = os.path.join(model_dir, "config.json") - if init: - with open(config_path, "r") as f: - data = f.read() - with open(config_save_path, "w") as f: - f.write(data) - else: - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - hparams.cont = args.cont - return hparams - - -def clean_checkpoints(path_to_models='logs/44k/', n_ckpts_to_keep=2, sort_by_time=True): - """Freeing up space by deleting saved ckpts - - Arguments: - path_to_models -- Path to the model directory - n_ckpts_to_keep -- Number of ckpts to keep, excluding G_0.pth and D_0.pth - sort_by_time -- True -> chronologically delete ckpts - False -> lexicographically delete ckpts - """ - import re - ckpts_files = [f for f in os.listdir(path_to_models) if os.path.isfile(os.path.join(path_to_models, f))] - name_key = (lambda _f: int(re.compile('._(\d+)\.pth').match(_f).group(1))) - time_key = (lambda _f: os.path.getmtime(os.path.join(path_to_models, _f))) - sort_key = time_key if sort_by_time else name_key - x_sorted = lambda _x: sorted([f for f in ckpts_files if f.startswith(_x) and not f.endswith('_0.pth')], - key=sort_key) - to_del = [os.path.join(path_to_models, fn) for fn in - (x_sorted('G')[:-n_ckpts_to_keep] + x_sorted('D')[:-n_ckpts_to_keep])] - del_info = lambda fn: logger.info(f".. Free up space by deleting ckpt {fn}") - del_routine = lambda x: [os.remove(x), del_info(x)] - rs = [del_routine(fn) for fn in to_del] - -def get_hparams_from_dir(model_dir): - config_save_path = os.path.join(model_dir, "config.json") - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_file(config_path): - with open(config_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - return hparams - - -def check_git_hash(model_dir): - source_dir = os.path.dirname(os.path.realpath(__file__)) - if not os.path.exists(os.path.join(source_dir, ".git")): - logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format( - source_dir - )) - return - - cur_hash = subprocess.getoutput("git rev-parse HEAD") - - path = os.path.join(model_dir, "githash") - if os.path.exists(path): - saved_hash = open(path).read() - if saved_hash != cur_hash: - logger.warn("git hash values are different. {}(saved) != {}(current)".format( - saved_hash[:8], cur_hash[:8])) - else: - open(path, "w").write(cur_hash) - - -def get_logger(model_dir, filename="train.log"): - global logger - logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.DEBUG) - - formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") - if not os.path.exists(model_dir): - os.makedirs(model_dir) - h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.DEBUG) - h.setFormatter(formatter) - logger.addHandler(h) - return logger - - -class HParams(): - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() diff --git a/spaces/Yuliang/ECON/lib/dataset/NormalModule.py b/spaces/Yuliang/ECON/lib/dataset/NormalModule.py deleted file mode 100644 index 16dd02ec26789d40715b24b67f371da45aff2f8f..0000000000000000000000000000000000000000 --- a/spaces/Yuliang/ECON/lib/dataset/NormalModule.py +++ /dev/null @@ -1,82 +0,0 @@ -# -*- coding: utf-8 -*- - -# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is -# holder of all proprietary rights on this computer program. -# You can only use this computer program if you have closed -# a license agreement with MPG or you get the right to use the computer -# program from someone who is authorized to grant you that right. -# Any use of the computer program without a valid license is prohibited and -# liable to prosecution. -# -# Copyright©2019 Max-Planck-Gesellschaft zur Förderung -# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute -# for Intelligent Systems. All rights reserved. -# -# Contact: ps-license@tuebingen.mpg.de - -# pytorch lightning related libs -import pytorch_lightning as pl -from torch.utils.data import DataLoader - -from lib.dataset.NormalDataset import NormalDataset - - -class NormalModule(pl.LightningDataModule): - def __init__(self, cfg): - super(NormalModule, self).__init__() - self.cfg = cfg - - self.batch_size = self.cfg.batch_size - - self.data_size = {} - - def prepare_data(self): - - pass - - def setup(self, stage): - - self.train_dataset = NormalDataset(cfg=self.cfg, split="train") - self.val_dataset = NormalDataset(cfg=self.cfg, split="val") - self.test_dataset = NormalDataset(cfg=self.cfg, split="test") - - self.data_size = { - "train": len(self.train_dataset), - "val": len(self.val_dataset), - } - - def train_dataloader(self): - - train_data_loader = DataLoader( - self.train_dataset, - batch_size=self.batch_size, - shuffle=True, - num_workers=self.cfg.num_threads, - pin_memory=True, - ) - - return train_data_loader - - def val_dataloader(self): - - val_data_loader = DataLoader( - self.val_dataset, - batch_size=self.batch_size, - shuffle=False, - num_workers=self.cfg.num_threads, - pin_memory=True, - ) - - return val_data_loader - - def val_dataloader(self): - - test_data_loader = DataLoader( - self.test_dataset, - batch_size=1, - shuffle=False, - num_workers=self.cfg.num_threads, - pin_memory=True, - ) - - return test_data_loader diff --git a/spaces/Yusin/docker_test/main.py b/spaces/Yusin/docker_test/main.py deleted file mode 100644 index 11d1e04c3c9836d2d17adcfc79e0f8d560b89b14..0000000000000000000000000000000000000000 --- a/spaces/Yusin/docker_test/main.py +++ /dev/null @@ -1,54 +0,0 @@ -import requests -''' -import os -session_token = os.environ.get('SessionToken') -conversation_id = os.environ.get('conversation_id') -from revChatGPT.ChatGPT import Chatbot -chatbot = Chatbot({"session_token": session_token}) # You can start a custom conversation - - -import undetected_chromedriver.v2 as uc -from selenium.webdriver.support import expected_conditions as EC -from selenium.webdriver.support.ui import WebDriverWait -from selenium.webdriver.common.by import By - - -def get_element_or_none(driver, xpath, wait=None): - try: - if wait is None: - return driver.find_element(By.XPATH, xpath) - else: - return WebDriverWait(driver, wait).until( - EC.presence_of_element_located((By.XPATH, xpath))) - except: - return None - - -def run(): - print("Welcome to game of Tom and Jerry. Here Cloudflare is the cat, Jerry is the Programmer. Our Goal as a good Jerry is to trick Cloudflare.") - - options = uc.ChromeOptions() - options.arguments.extend( - ["--no-sandbox", "--disable-setuid-sandbox"]) - print("Creating Driver...") - driver = uc.Chrome( - options=options - ) - print("Created Driver...") - - driver.get('https://nowsecure.nl') - - element = get_element_or_none(driver, "/html/body/div[2]/div/main/h1", 20) - if element is not None: - print("We defeated Cloudflare, 🎉🥳 :)") - else: - print("Cloudflare defeated us :(, No woory we will try again. ") - driver.quit() -''' - -if __name__ == "__main__": - #run() - headers = {'Authorization': 'yusin'} - data = {"content": 'am I stupid'} - response = requests.post('http://93.56.204.222:7788/api/ask', headers=headers, json=data) - print('this is my answear', response.text) \ No newline at end of file diff --git a/spaces/abdvl/datahub_qa_bot/docs/townhalls.md b/spaces/abdvl/datahub_qa_bot/docs/townhalls.md deleted file mode 100644 index f9c3bb16150cd8b9cd52510d5e2180540fc857d8..0000000000000000000000000000000000000000 --- a/spaces/abdvl/datahub_qa_bot/docs/townhalls.md +++ /dev/null @@ -1,14 +0,0 @@ -# DataHub Town Halls - -We hold regular virtual town hall meetings to meet with DataHub community. -Currently it's held on the fourth Thursday of every month (with some exceptions such as holiday weekends). -It's the perfect venue to meet the team behind DataHub and other users, as well as to ask higher-level questions, such as roadmap and product direction. -From time to time we also use the opportunity to showcase upcoming features. - -## Meeting Invite & Agenda - -You can join with this link https://zoom.datahubproject.io, or [RSVP](https://rsvp.datahubproject.io/) to get a calendar invite - this will always have the most up-to-date agenda for upcoming sessions. - -## Past Meetings - -See [Town Hall History](townhall-history.md) for recordings of past town halls. diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/utils/registry.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/utils/registry.py deleted file mode 100644 index fa9df39bc9f3d8d568361e7250ab35468f2b74e0..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/utils/registry.py +++ /dev/null @@ -1,315 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import inspect -import warnings -from functools import partial - -from .misc import is_seq_of - - -def build_from_cfg(cfg, registry, default_args=None): - """Build a module from config dict. - - Args: - cfg (dict): Config dict. It should at least contain the key "type". - registry (:obj:`Registry`): The registry to search the type from. - default_args (dict, optional): Default initialization arguments. - - Returns: - object: The constructed object. - """ - if not isinstance(cfg, dict): - raise TypeError(f'cfg must be a dict, but got {type(cfg)}') - if 'type' not in cfg: - if default_args is None or 'type' not in default_args: - raise KeyError( - '`cfg` or `default_args` must contain the key "type", ' - f'but got {cfg}\n{default_args}') - if not isinstance(registry, Registry): - raise TypeError('registry must be an mmcv.Registry object, ' - f'but got {type(registry)}') - if not (isinstance(default_args, dict) or default_args is None): - raise TypeError('default_args must be a dict or None, ' - f'but got {type(default_args)}') - - args = cfg.copy() - - if default_args is not None: - for name, value in default_args.items(): - args.setdefault(name, value) - - obj_type = args.pop('type') - if isinstance(obj_type, str): - obj_cls = registry.get(obj_type) - if obj_cls is None: - raise KeyError( - f'{obj_type} is not in the {registry.name} registry') - elif inspect.isclass(obj_type): - obj_cls = obj_type - else: - raise TypeError( - f'type must be a str or valid type, but got {type(obj_type)}') - try: - return obj_cls(**args) - except Exception as e: - # Normal TypeError does not print class name. - raise type(e)(f'{obj_cls.__name__}: {e}') - - -class Registry: - """A registry to map strings to classes. - - Registered object could be built from registry. - Example: - >>> MODELS = Registry('models') - >>> @MODELS.register_module() - >>> class ResNet: - >>> pass - >>> resnet = MODELS.build(dict(type='ResNet')) - - Please refer to - https://mmcv.readthedocs.io/en/latest/understand_mmcv/registry.html for - advanced usage. - - Args: - name (str): Registry name. - build_func(func, optional): Build function to construct instance from - Registry, func:`build_from_cfg` is used if neither ``parent`` or - ``build_func`` is specified. If ``parent`` is specified and - ``build_func`` is not given, ``build_func`` will be inherited - from ``parent``. Default: None. - parent (Registry, optional): Parent registry. The class registered in - children registry could be built from parent. Default: None. - scope (str, optional): The scope of registry. It is the key to search - for children registry. If not specified, scope will be the name of - the package where class is defined, e.g. mmdet, mmcls, mmseg. - Default: None. - """ - - def __init__(self, name, build_func=None, parent=None, scope=None): - self._name = name - self._module_dict = dict() - self._children = dict() - self._scope = self.infer_scope() if scope is None else scope - - # self.build_func will be set with the following priority: - # 1. build_func - # 2. parent.build_func - # 3. build_from_cfg - if build_func is None: - if parent is not None: - self.build_func = parent.build_func - else: - self.build_func = build_from_cfg - else: - self.build_func = build_func - if parent is not None: - assert isinstance(parent, Registry) - parent._add_children(self) - self.parent = parent - else: - self.parent = None - - def __len__(self): - return len(self._module_dict) - - def __contains__(self, key): - return self.get(key) is not None - - def __repr__(self): - format_str = self.__class__.__name__ + \ - f'(name={self._name}, ' \ - f'items={self._module_dict})' - return format_str - - @staticmethod - def infer_scope(): - """Infer the scope of registry. - - The name of the package where registry is defined will be returned. - - Example: - # in mmdet/models/backbone/resnet.py - >>> MODELS = Registry('models') - >>> @MODELS.register_module() - >>> class ResNet: - >>> pass - The scope of ``ResNet`` will be ``mmdet``. - - - Returns: - scope (str): The inferred scope name. - """ - # inspect.stack() trace where this function is called, the index-2 - # indicates the frame where `infer_scope()` is called - filename = inspect.getmodule(inspect.stack()[2][0]).__name__ - split_filename = filename.split('.') - return split_filename[0] - - @staticmethod - def split_scope_key(key): - """Split scope and key. - - The first scope will be split from key. - - Examples: - >>> Registry.split_scope_key('mmdet.ResNet') - 'mmdet', 'ResNet' - >>> Registry.split_scope_key('ResNet') - None, 'ResNet' - - Return: - scope (str, None): The first scope. - key (str): The remaining key. - """ - split_index = key.find('.') - if split_index != -1: - return key[:split_index], key[split_index + 1:] - else: - return None, key - - @property - def name(self): - return self._name - - @property - def scope(self): - return self._scope - - @property - def module_dict(self): - return self._module_dict - - @property - def children(self): - return self._children - - def get(self, key): - """Get the registry record. - - Args: - key (str): The class name in string format. - - Returns: - class: The corresponding class. - """ - scope, real_key = self.split_scope_key(key) - if scope is None or scope == self._scope: - # get from self - if real_key in self._module_dict: - return self._module_dict[real_key] - else: - # get from self._children - if scope in self._children: - return self._children[scope].get(real_key) - else: - # goto root - parent = self.parent - while parent.parent is not None: - parent = parent.parent - return parent.get(key) - - def build(self, *args, **kwargs): - return self.build_func(*args, **kwargs, registry=self) - - def _add_children(self, registry): - """Add children for a registry. - - The ``registry`` will be added as children based on its scope. - The parent registry could build objects from children registry. - - Example: - >>> models = Registry('models') - >>> mmdet_models = Registry('models', parent=models) - >>> @mmdet_models.register_module() - >>> class ResNet: - >>> pass - >>> resnet = models.build(dict(type='mmdet.ResNet')) - """ - - assert isinstance(registry, Registry) - assert registry.scope is not None - assert registry.scope not in self.children, \ - f'scope {registry.scope} exists in {self.name} registry' - self.children[registry.scope] = registry - - def _register_module(self, module_class, module_name=None, force=False): - if not inspect.isclass(module_class): - raise TypeError('module must be a class, ' - f'but got {type(module_class)}') - - if module_name is None: - module_name = module_class.__name__ - if isinstance(module_name, str): - module_name = [module_name] - for name in module_name: - if not force and name in self._module_dict: - raise KeyError(f'{name} is already registered ' - f'in {self.name}') - self._module_dict[name] = module_class - - def deprecated_register_module(self, cls=None, force=False): - warnings.warn( - 'The old API of register_module(module, force=False) ' - 'is deprecated and will be removed, please use the new API ' - 'register_module(name=None, force=False, module=None) instead.') - if cls is None: - return partial(self.deprecated_register_module, force=force) - self._register_module(cls, force=force) - return cls - - def register_module(self, name=None, force=False, module=None): - """Register a module. - - A record will be added to `self._module_dict`, whose key is the class - name or the specified name, and value is the class itself. - It can be used as a decorator or a normal function. - - Example: - >>> backbones = Registry('backbone') - >>> @backbones.register_module() - >>> class ResNet: - >>> pass - - >>> backbones = Registry('backbone') - >>> @backbones.register_module(name='mnet') - >>> class MobileNet: - >>> pass - - >>> backbones = Registry('backbone') - >>> class ResNet: - >>> pass - >>> backbones.register_module(ResNet) - - Args: - name (str | None): The module name to be registered. If not - specified, the class name will be used. - force (bool, optional): Whether to override an existing class with - the same name. Default: False. - module (type): Module class to be registered. - """ - if not isinstance(force, bool): - raise TypeError(f'force must be a boolean, but got {type(force)}') - # NOTE: This is a walkaround to be compatible with the old api, - # while it may introduce unexpected bugs. - if isinstance(name, type): - return self.deprecated_register_module(name, force=force) - - # raise the error ahead of time - if not (name is None or isinstance(name, str) or is_seq_of(name, str)): - raise TypeError( - 'name must be either of None, an instance of str or a sequence' - f' of str, but got {type(name)}') - - # use it as a normal method: x.register_module(module=SomeClass) - if module is not None: - self._register_module( - module_class=module, module_name=name, force=force) - return module - - # use it as a decorator: @x.register_module() - def _register(cls): - self._register_module( - module_class=cls, module_name=name, force=force) - return cls - - return _register diff --git a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/media/codecs/ffmpeg_lib/libavutil.py b/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/media/codecs/ffmpeg_lib/libavutil.py deleted file mode 100644 index 540bf23c5c34ce8bed08eefb944f7c022ee5d3c6..0000000000000000000000000000000000000000 --- a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/media/codecs/ffmpeg_lib/libavutil.py +++ /dev/null @@ -1,240 +0,0 @@ -"""Wrapper for include/libavutil/avutil.h -""" -from ctypes import c_char_p, c_void_p, POINTER, Structure -from ctypes import c_int, c_int64, c_uint64 -from ctypes import c_uint8, c_int8, c_uint, c_size_t - -import pyglet.lib -from pyglet.util import debug_print -from . import compat - -_debug = debug_print('debug_media') - -avutil = pyglet.lib.load_library( - 'avutil', - win32=('avutil-57', 'avutil-56'), - darwin=('avutil.57', 'avutil-56') -) - -avutil.avutil_version.restype = c_int -compat.set_version('avutil', avutil.avutil_version() >> 16) - -AVMEDIA_TYPE_UNKNOWN = -1 -AVMEDIA_TYPE_VIDEO = 0 -AVMEDIA_TYPE_AUDIO = 1 -AVMEDIA_TYPE_DATA = 2 -AVMEDIA_TYPE_SUBTITLE = 3 -AVMEDIA_TYPE_ATTACHMENT = 4 -AVMEDIA_TYPE_NB = 5 - -AV_SAMPLE_FMT_U8 = 0 -AV_SAMPLE_FMT_S16 = 1 -AV_SAMPLE_FMT_S32 = 2 -AV_SAMPLE_FMT_FLT = 3 -AV_SAMPLE_FORMAT_DOUBLE = 4 -AV_SAMPLE_FMT_U8P = 5 -AV_SAMPLE_FMT_S16P = 6 -AV_SAMPLE_FMT_S32P = 7 -AV_SAMPLE_FMT_FLTP = 8 -AV_SAMPLE_FMT_DBLP = 9 -AV_SAMPLE_FMT_S64 = 10 -AV_SAMPLE_FMT_S64P = 11 - -AV_NUM_DATA_POINTERS = 8 - -AV_PIX_FMT_RGB24 = 2 -AV_PIX_FMT_ARGB = 25 -AV_PIX_FMT_RGBA = 26 - -AVChannelOrder = c_int -class AVChannelLayout(Structure): - _fields_ = [ - ('order', c_int), - ('nb_channels', c_int), - # .. more - ] -class AVBuffer(Structure): - _fields_ = [ - ('data', POINTER(c_uint8)), - ('size', c_int), - # .. more - ] - - -class AVBufferRef(Structure): - _fields_ = [ - ('buffer', POINTER(AVBuffer)), - ('data', POINTER(c_uint8)), - ('size', c_int) - ] - - -class AVDictionaryEntry(Structure): - _fields_ = [ - ('key', c_char_p), - ('value', c_char_p) - ] - - -class AVDictionary(Structure): - _fields_ = [ - ('count', c_int), - ('elems', POINTER(AVDictionaryEntry)) - ] - - -class AVClass(Structure): - pass - - -class AVRational(Structure): - _fields_ = [ - ('num', c_int), - ('den', c_int) - ] - - def __repr__(self): - return f"AVRational({self.num}/{self.den})" - - -class AVFrameSideData(Structure): - pass - - -class AVFrame(Structure): - pass - -AVFrame_Fields = [ - ('data', POINTER(c_uint8) * AV_NUM_DATA_POINTERS), - ('linesize', c_int * AV_NUM_DATA_POINTERS), - ('extended_data', POINTER(POINTER(c_uint8))), - ('width', c_int), - ('height', c_int), - ('nb_samples', c_int), - ('format', c_int), - ('key_frame', c_int), - ('pict_type', c_int), - ('sample_aspect_ratio', AVRational), - ('pts', c_int64), - ('pkt_pts', c_int64), # Deprecated. Removed in 57. - ('pkt_dts', c_int64), - ('time_base', AVRational), # (5.x) - ('coded_picture_number', c_int), - ('display_picture_number', c_int), - ('quality', c_int), - ('opaque', c_void_p), - ('error', c_uint64 * AV_NUM_DATA_POINTERS), # Deprecated. Removed in 57. - ('repeat_pict', c_int), - ('interlaced_frame', c_int), - ('top_field_first', c_int), - ('palette_has_changed', c_int), - ('reordered_opaque', c_int64), - ('sample_rate', c_int), - ('channel_layout', c_uint64), - ('buf', POINTER(AVBufferRef) * AV_NUM_DATA_POINTERS), - ('extended_buf', POINTER(POINTER(AVBufferRef))), - ('nb_extended_buf', c_int), - ('side_data', POINTER(POINTER(AVFrameSideData))), - ('nb_side_data', c_int), - ('flags', c_int), - ('color_range', c_int), - ('color_primaries', c_int), - ('color_trc', c_int), - ('colorspace', c_int), - ('chroma_location', c_int), - ('best_effort_timestamp', c_int64), - ('pkt_pos', c_int64), - ('pkt_duration', c_int64), - # ! - ('metadata', POINTER(AVDictionary)), - ('decode_error_flags', c_int), - ('channels', c_int), - ('pkt_size', c_int), - ('qscale_table', POINTER(c_int8)), # Deprecated. Removed in 57. - ('qstride', c_int), # Deprecated. Removed in 57. - ('qscale_type', c_int), # Deprecated. Removed in 57. - ('qp_table_buf', POINTER(AVBufferRef)), # Deprecated. Removed in 57. - ('hw_frames_ctx', POINTER(AVBufferRef)), - ('opaque_ref', POINTER(AVBufferRef)), - ('crop_top', c_size_t), # video frames only - ('crop_bottom', c_size_t), # video frames only - ('crop_left', c_size_t), # video frames only - ('crop_right', c_size_t), # video frames only - ('private_ref', POINTER(AVBufferRef)), -] - -compat.add_version_changes('avutil', 56, AVFrame, AVFrame_Fields, - removals=('time_base',)) - -compat.add_version_changes('avutil', 57, AVFrame, AVFrame_Fields, - removals=('pkt_pts', 'error', 'qscale_table', 'qstride', 'qscale_type', 'qp_table_buf')) - -AV_NOPTS_VALUE = -0x8000000000000000 -AV_TIME_BASE = 1000000 -AV_TIME_BASE_Q = AVRational(1, AV_TIME_BASE) - -avutil.av_version_info.restype = c_char_p -avutil.av_dict_get.restype = POINTER(AVDictionaryEntry) -avutil.av_dict_get.argtypes = [POINTER(AVDictionary), - c_char_p, POINTER(AVDictionaryEntry), - c_int] -avutil.av_rescale_q.restype = c_int64 -avutil.av_rescale_q.argtypes = [c_int64, AVRational, AVRational] -avutil.av_samples_get_buffer_size.restype = c_int -avutil.av_samples_get_buffer_size.argtypes = [POINTER(c_int), - c_int, c_int, c_int] -avutil.av_frame_alloc.restype = POINTER(AVFrame) -avutil.av_frame_free.argtypes = [POINTER(POINTER(AVFrame))] -avutil.av_get_default_channel_layout.restype = c_int64 -avutil.av_get_default_channel_layout.argtypes = [c_int] -avutil.av_get_bytes_per_sample.restype = c_int -avutil.av_get_bytes_per_sample.argtypes = [c_int] -avutil.av_strerror.restype = c_int -avutil.av_strerror.argtypes = [c_int, c_char_p, c_size_t] - -avutil.av_image_fill_arrays.restype = c_int -avutil.av_image_fill_arrays.argtypes = [POINTER(c_uint8) * 4, c_int * 4, - POINTER(c_uint8), c_int, c_int, c_int, c_int] -avutil.av_dict_set.restype = c_int -avutil.av_dict_set.argtypes = [POINTER(POINTER(AVDictionary)), - c_char_p, c_char_p, c_int] -avutil.av_dict_free.argtypes = [POINTER(POINTER(AVDictionary))] -avutil.av_log_set_level.restype = c_int -avutil.av_log_set_level.argtypes = [c_uint] -avutil.av_malloc.restype = c_void_p -avutil.av_malloc.argtypes = [c_int] -avutil.av_freep.restype = c_void_p -avutil.av_freep.argtypes = [c_void_p] - -__all__ = [ - 'avutil', - 'AVMEDIA_TYPE_UNKNOWN', - 'AVMEDIA_TYPE_VIDEO', - 'AVMEDIA_TYPE_AUDIO', - 'AVMEDIA_TYPE_DATA', - 'AVMEDIA_TYPE_SUBTITLE', - 'AVMEDIA_TYPE_ATTACHMENT', - 'AVMEDIA_TYPE_NB', - 'AV_SAMPLE_FMT_U8', - 'AV_SAMPLE_FMT_S16', - 'AV_SAMPLE_FMT_S32', - 'AV_SAMPLE_FMT_FLT', - 'AV_SAMPLE_FORMAT_DOUBLE', - 'AV_SAMPLE_FMT_U8P', - 'AV_SAMPLE_FMT_S16P', - 'AV_SAMPLE_FMT_S32P', - 'AV_SAMPLE_FMT_FLTP', - 'AV_SAMPLE_FMT_DBLP', - 'AV_SAMPLE_FMT_S64', - 'AV_SAMPLE_FMT_S64P', - 'AV_NUM_DATA_POINTERS', - 'AV_PIX_FMT_RGB24', - 'AV_PIX_FMT_ARGB', - 'AV_PIX_FMT_RGBA', - 'AV_NOPTS_VALUE', - 'AV_TIME_BASE', - 'AV_TIME_BASE_Q', - 'AVFrame', - 'AVRational', - 'AVDictionary', -] diff --git a/spaces/active-learning/webhook/main.py b/spaces/active-learning/webhook/main.py deleted file mode 100644 index 70c965ce2c01d7231cba861ff662ad69dae8ac04..0000000000000000000000000000000000000000 --- a/spaces/active-learning/webhook/main.py +++ /dev/null @@ -1,118 +0,0 @@ -import os - -from fastapi import FastAPI, Request, Response -import numpy as np -from tensorflow import keras -from tensorflow.keras import layers -import tensorflow as tf - -from datasets import load_dataset -from huggingface_hub import push_to_hub_keras, from_pretrained_keras - -KEY = os.environ.get("WEBHOOK_SECRET") - -app = FastAPI() - -def to_numpy(examples): - examples["pixel_values"] = [np.array(image.convert('1')) for image in examples["image"]] - return examples - -def preprocess(): - train_dataset = load_dataset("active-learning/labeled_samples")["train"] - train_dataset = train_dataset.map(to_numpy, batched=True) - - test_dataset = load_dataset("active-learning/test_mnist")["test"] - test_dataset = test_dataset.map(to_numpy, batched=True) - - x_train = train_dataset["pixel_values"] - y_train = train_dataset["label"] - - x_test = test_dataset["pixel_values"] - y_test = test_dataset["label"] - - x_train = np.expand_dims(x_train, -1) - x_test = np.expand_dims(x_test, -1) - - num_classes = 10 - - y_train = keras.utils.to_categorical(y_train, num_classes) - y_test = keras.utils.to_categorical(y_test, num_classes) - - return x_train, y_train, x_test, y_test - -def train(): - input_shape = (28, 28, 1) - x_train, y_train, x_test, y_test = preprocess() - num_classes = 10 - - model = keras.Sequential( - [ - keras.Input(shape=input_shape), - layers.Conv2D(32, kernel_size=(3, 3), activation="relu"), - layers.MaxPooling2D(pool_size=(2, 2)), - layers.Conv2D(64, kernel_size=(3, 3), activation="relu"), - layers.MaxPooling2D(pool_size=(2, 2)), - layers.Flatten(), - layers.Dropout(0.5), - layers.Dense(num_classes, activation="softmax"), - ] - ) - - model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) - model.fit(x_train, y_train, batch_size=128, epochs=15, validation_split=0.1) - - score = model.evaluate(x_test, y_test, verbose=0) - print("Test loss:", score[0]) - print("Test accuracy:", score[1]) - - push_to_hub_keras(model, "active-learning/mnist_classifier") - -def find_samples_to_label(): - loaded_model = from_pretrained_keras("active-learning/mnist_classifier") - loaded_model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) - - unlabeled_data = load_dataset("active-learning/unlabeled_samples")["train"] - processed_data = unlabeled_data.map(to_numpy, batched=True) - processed_data = processed_data["pixel_values"] - processed_data = tf.expand_dims(processed_data, -1) - - # Get all predictions - # And then get the 5 samples with the lowest prediction score - preds = loaded_model.predict(processed_data) - top_pred_confs = 1 - np.max(preds, axis=1) - idx_to_label = np.argpartition(top_pred_confs, -5)[-5:] - - # Upload samples to the dataset to label - to_label_data = unlabeled_data.select(idx_to_label) - to_label_data.push_to_hub("active-learning/to_label_samples") - - # Remove from the pool of samples - unlabeled_data = unlabeled_data.select( - ( - i for i in range(len(unlabeled_data)) - if i not in set(idx_to_label) - ) - ) - unlabeled_data.push_to_hub("active-learning/unlabeled_samples") - -@app.get("/") -def read_root(): - data = """ -

Active Learning Trainer

-

This is a demo app showing how to webhooks to do Active Learning.

- """ - return Response(content=data, media_type="text/html") - -@app.post("/webhook") -async def webhook(request: Request): - print("Received request") - if request.headers.get("X-Webhook-Secret") is None: - return Response("No secret", status_code=401) - if request.headers.get("X-Webhook-Secret") != KEY: - return Response("Invalid secret", status_code=401) - data = await request.json() - print("Webhook received!") - train() - find_samples_to_label() - return "Model trained!" - diff --git a/spaces/adirik/ChangeIt/header.html b/spaces/adirik/ChangeIt/header.html deleted file mode 100644 index 90d8fa50f1438180dcdfdc6a341028a2486ec53e..0000000000000000000000000000000000000000 --- a/spaces/adirik/ChangeIt/header.html +++ /dev/null @@ -1,22 +0,0 @@ -
-
-

- Change It! -

-
-
-

- Change it! Upload a source image, input which clothing item/s you would like to change with text (e.g. "t-shirt") and upload an example image of what you'd like to replace it with. -

-

- This demo is built using CLIPSeg and Paint by example. -

-

You can skip the queue by duplicating this space and upgrading to gpu in settings: Duplicate Space

-
-
\ No newline at end of file diff --git a/spaces/ajayhk/colorize/README.md b/spaces/ajayhk/colorize/README.md deleted file mode 100644 index 2d6800aff5aa36082337b42b6d4b78b8ebd9e987..0000000000000000000000000000000000000000 --- a/spaces/ajayhk/colorize/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Colorize -emoji: 🌟 -colorFrom: blue -colorTo: purple -sdk: gradio -sdk_version: 3.4 -app_file: start.py -pinned: false -license: mit ---- \ No newline at end of file diff --git a/spaces/akhaliq/GPEN/face_detect/utils/nms/__init__.py b/spaces/akhaliq/GPEN/face_detect/utils/nms/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/akhaliq/SwinIR/utils/util_calculate_psnr_ssim.py b/spaces/akhaliq/SwinIR/utils/util_calculate_psnr_ssim.py deleted file mode 100644 index 1a8fb27161f9c1fd3e37b14654dfe05eaadf619c..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/SwinIR/utils/util_calculate_psnr_ssim.py +++ /dev/null @@ -1,346 +0,0 @@ -import cv2 -import numpy as np -import torch - - -def calculate_psnr(img1, img2, crop_border, input_order='HWC', test_y_channel=False): - """Calculate PSNR (Peak Signal-to-Noise Ratio). - - Ref: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio - - Args: - img1 (ndarray): Images with range [0, 255]. - img2 (ndarray): Images with range [0, 255]. - crop_border (int): Cropped pixels in each edge of an image. These - pixels are not involved in the PSNR calculation. - input_order (str): Whether the input order is 'HWC' or 'CHW'. - Default: 'HWC'. - test_y_channel (bool): Test on Y channel of YCbCr. Default: False. - - Returns: - float: psnr result. - """ - - assert img1.shape == img2.shape, (f'Image shapes are differnet: {img1.shape}, {img2.shape}.') - if input_order not in ['HWC', 'CHW']: - raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are ' '"HWC" and "CHW"') - img1 = reorder_image(img1, input_order=input_order) - img2 = reorder_image(img2, input_order=input_order) - img1 = img1.astype(np.float64) - img2 = img2.astype(np.float64) - - if crop_border != 0: - img1 = img1[crop_border:-crop_border, crop_border:-crop_border, ...] - img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...] - - if test_y_channel: - img1 = to_y_channel(img1) - img2 = to_y_channel(img2) - - mse = np.mean((img1 - img2) ** 2) - if mse == 0: - return float('inf') - return 20. * np.log10(255. / np.sqrt(mse)) - - -def _ssim(img1, img2): - """Calculate SSIM (structural similarity) for one channel images. - - It is called by func:`calculate_ssim`. - - Args: - img1 (ndarray): Images with range [0, 255] with order 'HWC'. - img2 (ndarray): Images with range [0, 255] with order 'HWC'. - - Returns: - float: ssim result. - """ - - C1 = (0.01 * 255) ** 2 - C2 = (0.03 * 255) ** 2 - - img1 = img1.astype(np.float64) - img2 = img2.astype(np.float64) - kernel = cv2.getGaussianKernel(11, 1.5) - window = np.outer(kernel, kernel.transpose()) - - mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] - mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5] - mu1_sq = mu1 ** 2 - mu2_sq = mu2 ** 2 - mu1_mu2 = mu1 * mu2 - sigma1_sq = cv2.filter2D(img1 ** 2, -1, window)[5:-5, 5:-5] - mu1_sq - sigma2_sq = cv2.filter2D(img2 ** 2, -1, window)[5:-5, 5:-5] - mu2_sq - sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2 - - ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2)) - return ssim_map.mean() - - -def calculate_ssim(img1, img2, crop_border, input_order='HWC', test_y_channel=False): - """Calculate SSIM (structural similarity). - - Ref: - Image quality assessment: From error visibility to structural similarity - - The results are the same as that of the official released MATLAB code in - https://ece.uwaterloo.ca/~z70wang/research/ssim/. - - For three-channel images, SSIM is calculated for each channel and then - averaged. - - Args: - img1 (ndarray): Images with range [0, 255]. - img2 (ndarray): Images with range [0, 255]. - crop_border (int): Cropped pixels in each edge of an image. These - pixels are not involved in the SSIM calculation. - input_order (str): Whether the input order is 'HWC' or 'CHW'. - Default: 'HWC'. - test_y_channel (bool): Test on Y channel of YCbCr. Default: False. - - Returns: - float: ssim result. - """ - - assert img1.shape == img2.shape, (f'Image shapes are differnet: {img1.shape}, {img2.shape}.') - if input_order not in ['HWC', 'CHW']: - raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are ' '"HWC" and "CHW"') - img1 = reorder_image(img1, input_order=input_order) - img2 = reorder_image(img2, input_order=input_order) - img1 = img1.astype(np.float64) - img2 = img2.astype(np.float64) - - if crop_border != 0: - img1 = img1[crop_border:-crop_border, crop_border:-crop_border, ...] - img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...] - - if test_y_channel: - img1 = to_y_channel(img1) - img2 = to_y_channel(img2) - - ssims = [] - for i in range(img1.shape[2]): - ssims.append(_ssim(img1[..., i], img2[..., i])) - return np.array(ssims).mean() - - -def _blocking_effect_factor(im): - block_size = 8 - - block_horizontal_positions = torch.arange(7, im.shape[3] - 1, 8) - block_vertical_positions = torch.arange(7, im.shape[2] - 1, 8) - - horizontal_block_difference = ( - (im[:, :, :, block_horizontal_positions] - im[:, :, :, block_horizontal_positions + 1]) ** 2).sum( - 3).sum(2).sum(1) - vertical_block_difference = ( - (im[:, :, block_vertical_positions, :] - im[:, :, block_vertical_positions + 1, :]) ** 2).sum(3).sum( - 2).sum(1) - - nonblock_horizontal_positions = np.setdiff1d(torch.arange(0, im.shape[3] - 1), block_horizontal_positions) - nonblock_vertical_positions = np.setdiff1d(torch.arange(0, im.shape[2] - 1), block_vertical_positions) - - horizontal_nonblock_difference = ( - (im[:, :, :, nonblock_horizontal_positions] - im[:, :, :, nonblock_horizontal_positions + 1]) ** 2).sum( - 3).sum(2).sum(1) - vertical_nonblock_difference = ( - (im[:, :, nonblock_vertical_positions, :] - im[:, :, nonblock_vertical_positions + 1, :]) ** 2).sum( - 3).sum(2).sum(1) - - n_boundary_horiz = im.shape[2] * (im.shape[3] // block_size - 1) - n_boundary_vert = im.shape[3] * (im.shape[2] // block_size - 1) - boundary_difference = (horizontal_block_difference + vertical_block_difference) / ( - n_boundary_horiz + n_boundary_vert) - - n_nonboundary_horiz = im.shape[2] * (im.shape[3] - 1) - n_boundary_horiz - n_nonboundary_vert = im.shape[3] * (im.shape[2] - 1) - n_boundary_vert - nonboundary_difference = (horizontal_nonblock_difference + vertical_nonblock_difference) / ( - n_nonboundary_horiz + n_nonboundary_vert) - - scaler = np.log2(block_size) / np.log2(min([im.shape[2], im.shape[3]])) - bef = scaler * (boundary_difference - nonboundary_difference) - - bef[boundary_difference <= nonboundary_difference] = 0 - return bef - - -def calculate_psnrb(img1, img2, crop_border, input_order='HWC', test_y_channel=False): - """Calculate PSNR-B (Peak Signal-to-Noise Ratio). - - Ref: Quality assessment of deblocked images, for JPEG image deblocking evaluation - # https://gitlab.com/Queuecumber/quantization-guided-ac/-/blob/master/metrics/psnrb.py - - Args: - img1 (ndarray): Images with range [0, 255]. - img2 (ndarray): Images with range [0, 255]. - crop_border (int): Cropped pixels in each edge of an image. These - pixels are not involved in the PSNR calculation. - input_order (str): Whether the input order is 'HWC' or 'CHW'. - Default: 'HWC'. - test_y_channel (bool): Test on Y channel of YCbCr. Default: False. - - Returns: - float: psnr result. - """ - - assert img1.shape == img2.shape, (f'Image shapes are differnet: {img1.shape}, {img2.shape}.') - if input_order not in ['HWC', 'CHW']: - raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are ' '"HWC" and "CHW"') - img1 = reorder_image(img1, input_order=input_order) - img2 = reorder_image(img2, input_order=input_order) - img1 = img1.astype(np.float64) - img2 = img2.astype(np.float64) - - if crop_border != 0: - img1 = img1[crop_border:-crop_border, crop_border:-crop_border, ...] - img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...] - - if test_y_channel: - img1 = to_y_channel(img1) - img2 = to_y_channel(img2) - - # follow https://gitlab.com/Queuecumber/quantization-guided-ac/-/blob/master/metrics/psnrb.py - img1 = torch.from_numpy(img1).permute(2, 0, 1).unsqueeze(0) / 255. - img2 = torch.from_numpy(img2).permute(2, 0, 1).unsqueeze(0) / 255. - - total = 0 - for c in range(img1.shape[1]): - mse = torch.nn.functional.mse_loss(img1[:, c:c + 1, :, :], img2[:, c:c + 1, :, :], reduction='none') - bef = _blocking_effect_factor(img1[:, c:c + 1, :, :]) - - mse = mse.view(mse.shape[0], -1).mean(1) - total += 10 * torch.log10(1 / (mse + bef)) - - return float(total) / img1.shape[1] - - -def reorder_image(img, input_order='HWC'): - """Reorder images to 'HWC' order. - - If the input_order is (h, w), return (h, w, 1); - If the input_order is (c, h, w), return (h, w, c); - If the input_order is (h, w, c), return as it is. - - Args: - img (ndarray): Input image. - input_order (str): Whether the input order is 'HWC' or 'CHW'. - If the input image shape is (h, w), input_order will not have - effects. Default: 'HWC'. - - Returns: - ndarray: reordered image. - """ - - if input_order not in ['HWC', 'CHW']: - raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are ' "'HWC' and 'CHW'") - if len(img.shape) == 2: - img = img[..., None] - if input_order == 'CHW': - img = img.transpose(1, 2, 0) - return img - - -def to_y_channel(img): - """Change to Y channel of YCbCr. - - Args: - img (ndarray): Images with range [0, 255]. - - Returns: - (ndarray): Images with range [0, 255] (float type) without round. - """ - img = img.astype(np.float32) / 255. - if img.ndim == 3 and img.shape[2] == 3: - img = bgr2ycbcr(img, y_only=True) - img = img[..., None] - return img * 255. - - -def _convert_input_type_range(img): - """Convert the type and range of the input image. - - It converts the input image to np.float32 type and range of [0, 1]. - It is mainly used for pre-processing the input image in colorspace - convertion functions such as rgb2ycbcr and ycbcr2rgb. - - Args: - img (ndarray): The input image. It accepts: - 1. np.uint8 type with range [0, 255]; - 2. np.float32 type with range [0, 1]. - - Returns: - (ndarray): The converted image with type of np.float32 and range of - [0, 1]. - """ - img_type = img.dtype - img = img.astype(np.float32) - if img_type == np.float32: - pass - elif img_type == np.uint8: - img /= 255. - else: - raise TypeError('The img type should be np.float32 or np.uint8, ' f'but got {img_type}') - return img - - -def _convert_output_type_range(img, dst_type): - """Convert the type and range of the image according to dst_type. - - It converts the image to desired type and range. If `dst_type` is np.uint8, - images will be converted to np.uint8 type with range [0, 255]. If - `dst_type` is np.float32, it converts the image to np.float32 type with - range [0, 1]. - It is mainly used for post-processing images in colorspace convertion - functions such as rgb2ycbcr and ycbcr2rgb. - - Args: - img (ndarray): The image to be converted with np.float32 type and - range [0, 255]. - dst_type (np.uint8 | np.float32): If dst_type is np.uint8, it - converts the image to np.uint8 type with range [0, 255]. If - dst_type is np.float32, it converts the image to np.float32 type - with range [0, 1]. - - Returns: - (ndarray): The converted image with desired type and range. - """ - if dst_type not in (np.uint8, np.float32): - raise TypeError('The dst_type should be np.float32 or np.uint8, ' f'but got {dst_type}') - if dst_type == np.uint8: - img = img.round() - else: - img /= 255. - return img.astype(dst_type) - - -def bgr2ycbcr(img, y_only=False): - """Convert a BGR image to YCbCr image. - - The bgr version of rgb2ycbcr. - It implements the ITU-R BT.601 conversion for standard-definition - television. See more details in - https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion. - - It differs from a similar function in cv2.cvtColor: `BGR <-> YCrCb`. - In OpenCV, it implements a JPEG conversion. See more details in - https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion. - - Args: - img (ndarray): The input image. It accepts: - 1. np.uint8 type with range [0, 255]; - 2. np.float32 type with range [0, 1]. - y_only (bool): Whether to only return Y channel. Default: False. - - Returns: - ndarray: The converted YCbCr image. The output image has the same type - and range as input image. - """ - img_type = img.dtype - img = _convert_input_type_range(img) - if y_only: - out_img = np.dot(img, [24.966, 128.553, 65.481]) + 16.0 - else: - out_img = np.matmul( - img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786], [65.481, -37.797, 112.0]]) + [16, 128, 128] - out_img = _convert_output_type_range(out_img, img_type) - return out_img diff --git a/spaces/algomuffin/jojo_fork/e4e/criteria/lpips/__init__.py b/spaces/algomuffin/jojo_fork/e4e/criteria/lpips/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/aliabid94/AutoGPT/autogpt/commands/image_gen.py b/spaces/aliabid94/AutoGPT/autogpt/commands/image_gen.py deleted file mode 100644 index 0809fcdd3e38b52a2ce09ca1444f2574813d40f9..0000000000000000000000000000000000000000 --- a/spaces/aliabid94/AutoGPT/autogpt/commands/image_gen.py +++ /dev/null @@ -1,163 +0,0 @@ -""" Image Generation Module for AutoGPT.""" -import io -import os.path -import uuid -from base64 import b64decode - -import openai -import requests -from PIL import Image - -from autogpt.config import Config -from autogpt.workspace import path_in_workspace - -CFG = Config() - - -def generate_image(prompt: str, size: int = 256) -> str: - """Generate an image from a prompt. - - Args: - prompt (str): The prompt to use - size (int, optional): The size of the image. Defaults to 256. (Not supported by HuggingFace) - - Returns: - str: The filename of the image - """ - filename = f"{str(uuid.uuid4())}.jpg" - - # DALL-E - if CFG.image_provider == "dalle": - return generate_image_with_dalle(prompt, filename, size) - # HuggingFace - elif CFG.image_provider == "huggingface": - return generate_image_with_hf(prompt, filename) - # SD WebUI - elif CFG.image_provider == "sdwebui": - return generate_image_with_sd_webui(prompt, filename, size) - return "No Image Provider Set" - - -def generate_image_with_hf(prompt: str, filename: str) -> str: - """Generate an image with HuggingFace's API. - - Args: - prompt (str): The prompt to use - filename (str): The filename to save the image to - - Returns: - str: The filename of the image - """ - API_URL = ( - f"https://api-inference.huggingface.co/models/{CFG.huggingface_image_model}" - ) - if CFG.huggingface_api_token is None: - raise ValueError( - "You need to set your Hugging Face API token in the config file." - ) - headers = { - "Authorization": f"Bearer {CFG.huggingface_api_token}", - "X-Use-Cache": "false", - } - - response = requests.post( - API_URL, - headers=headers, - json={ - "inputs": prompt, - }, - ) - - image = Image.open(io.BytesIO(response.content)) - print(f"Image Generated for prompt:{prompt}") - - image.save(path_in_workspace(filename)) - - return f"Saved to disk:{filename}" - - -def generate_image_with_dalle(prompt: str, filename: str) -> str: - """Generate an image with DALL-E. - - Args: - prompt (str): The prompt to use - filename (str): The filename to save the image to - - Returns: - str: The filename of the image - """ - openai.api_key = CFG.openai_api_key - - # Check for supported image sizes - if size not in [256, 512, 1024]: - closest = min([256, 512, 1024], key=lambda x: abs(x - size)) - print( - f"DALL-E only supports image sizes of 256x256, 512x512, or 1024x1024. Setting to {closest}, was {size}." - ) - size = closest - - response = openai.Image.create( - prompt=prompt, - n=1, - size=f"{size}x{size}", - response_format="b64_json", - ) - - print(f"Image Generated for prompt:{prompt}") - - image_data = b64decode(response["data"][0]["b64_json"]) - - with open(path_in_workspace(filename), mode="wb") as png: - png.write(image_data) - - return f"Saved to disk:{filename}" - - -def generate_image_with_sd_webui( - prompt: str, - filename: str, - size: int = 512, - negative_prompt: str = "", - extra: dict = {}, -) -> str: - """Generate an image with Stable Diffusion webui. - Args: - prompt (str): The prompt to use - filename (str): The filename to save the image to - size (int, optional): The size of the image. Defaults to 256. - negative_prompt (str, optional): The negative prompt to use. Defaults to "". - extra (dict, optional): Extra parameters to pass to the API. Defaults to {}. - Returns: - str: The filename of the image - """ - # Create a session and set the basic auth if needed - s = requests.Session() - if CFG.sd_webui_auth: - username, password = CFG.sd_webui_auth.split(":") - s.auth = (username, password or "") - - # Generate the images - response = requests.post( - f"{CFG.sd_webui_url}/sdapi/v1/txt2img", - json={ - "prompt": prompt, - "negative_prompt": negative_prompt, - "sampler_index": "DDIM", - "steps": 20, - "cfg_scale": 7.0, - "width": size, - "height": size, - "n_iter": 1, - **extra, - }, - ) - - print(f"Image Generated for prompt:{prompt}") - - # Save the image to disk - response = response.json() - b64 = b64decode(response["images"][0].split(",", 1)[0]) - image = Image.open(io.BytesIO(b64)) - image.save(path_in_workspace(filename)) - - return f"Saved to disk:{filename}" diff --git a/spaces/amagastya/SPARK/chainlit.md b/spaces/amagastya/SPARK/chainlit.md deleted file mode 100644 index 12bdaa897ed271bb3c827c118ed6bf33a1428d9a..0000000000000000000000000000000000000000 --- a/spaces/amagastya/SPARK/chainlit.md +++ /dev/null @@ -1,13 +0,0 @@ -# Welcome to SPARK! ⚡ - -Hi there! 👋 SPARK is your Smart Prompt Assistant and Resource Knowledgebase. I'm here to help you navigate the exciting world of prompt engineering 💻😊 -Whether you need help setting the context, refining your desired outcome, or encouraging detailed responses, I've got you covered. -### Data Sources 📚 -SPARK has access to the following sources: -- **Brex's Prompt Engineering Guide:** [Brex's introduction to language models and prompt engineering](https://github.com/brexhq/prompt-engineering) -- **promptingguide.ai:** [A prompt engineering guide that demonstrates many techniques](https://www.promptingguide.ai) -- **OpenAI Cookbook:** [Techniques to improve reliability: A slightly dated (Sep 2022) review of techniques for prompting language models.](https://github.com/openai/openai-cookbook/blob/main/techniques_to_improve_reliability.md) -- **learnprompting.org:** [An introductory course to prompt engineering](https://learnprompting.org/) -- **Lil'Log Prompt Engineering:** [An OpenAI researcher's review of the prompt engineering literature (as of March 2023)](https://lilianweng.github.io/posts/2023-03-15-prompt-engineering/) - -Feel free to ask any questions, seek guidance, or request prompt examples to accelerate your learning and prompt writing process. Let's dive into the fascinating world of prompt engineering and unlock the full potential of AI models together! \ No newline at end of file diff --git a/spaces/amarchheda/ChordDuplicate/portaudio/src/os/win/pa_win_waveformat.c b/spaces/amarchheda/ChordDuplicate/portaudio/src/os/win/pa_win_waveformat.c deleted file mode 100644 index 0436a399ba564993204afd9bef29c11864cc8488..0000000000000000000000000000000000000000 --- a/spaces/amarchheda/ChordDuplicate/portaudio/src/os/win/pa_win_waveformat.c +++ /dev/null @@ -1,163 +0,0 @@ -/* - * PortAudio Portable Real-Time Audio Library - * Windows WAVEFORMAT* data structure utilities - * portaudio.h should be included before this file. - * - * Copyright (c) 2007 Ross Bencina - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files - * (the "Software"), to deal in the Software without restriction, - * including without limitation the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR - * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF - * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/* - * The text above constitutes the entire PortAudio license; however, - * the PortAudio community also makes the following non-binding requests: - * - * Any person wishing to distribute modifications to the Software is - * requested to send the modifications to the original developer so that - * they can be incorporated into the canonical version. It is also - * requested that these non-binding requests be included along with the - * license above. - */ - -#include -#include -#if defined(WINAPI_FAMILY) && (WINAPI_FAMILY == WINAPI_FAMILY_APP) - #include /* for WAVEFORMATEX */ -#endif - -#include "portaudio.h" -#include "pa_win_waveformat.h" - - -#if !defined(WAVE_FORMAT_EXTENSIBLE) -#define WAVE_FORMAT_EXTENSIBLE 0xFFFE -#endif - - -static GUID pawin_ksDataFormatSubtypeGuidBase = - { (USHORT)(WAVE_FORMAT_PCM), 0x0000, 0x0010, 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71 }; - - -int PaWin_SampleFormatToLinearWaveFormatTag( PaSampleFormat sampleFormat ) -{ - if( sampleFormat == paFloat32 ) - return PAWIN_WAVE_FORMAT_IEEE_FLOAT; - - return PAWIN_WAVE_FORMAT_PCM; -} - - -void PaWin_InitializeWaveFormatEx( PaWinWaveFormat *waveFormat, - int numChannels, PaSampleFormat sampleFormat, int waveFormatTag, double sampleRate ) -{ - WAVEFORMATEX *waveFormatEx = (WAVEFORMATEX*)waveFormat; - int bytesPerSample = Pa_GetSampleSize(sampleFormat); - unsigned long bytesPerFrame = numChannels * bytesPerSample; - - waveFormatEx->wFormatTag = waveFormatTag; - waveFormatEx->nChannels = (WORD)numChannels; - waveFormatEx->nSamplesPerSec = (DWORD)sampleRate; - waveFormatEx->nAvgBytesPerSec = waveFormatEx->nSamplesPerSec * bytesPerFrame; - waveFormatEx->nBlockAlign = (WORD)bytesPerFrame; - waveFormatEx->wBitsPerSample = bytesPerSample * 8; - waveFormatEx->cbSize = 0; -} - - -void PaWin_InitializeWaveFormatExtensible( PaWinWaveFormat *waveFormat, - int numChannels, PaSampleFormat sampleFormat, int waveFormatTag, double sampleRate, - PaWinWaveFormatChannelMask channelMask ) -{ - WAVEFORMATEX *waveFormatEx = (WAVEFORMATEX*)waveFormat; - int bytesPerSample = Pa_GetSampleSize(sampleFormat); - unsigned long bytesPerFrame = numChannels * bytesPerSample; - GUID guid; - - waveFormatEx->wFormatTag = WAVE_FORMAT_EXTENSIBLE; - waveFormatEx->nChannels = (WORD)numChannels; - waveFormatEx->nSamplesPerSec = (DWORD)sampleRate; - waveFormatEx->nAvgBytesPerSec = waveFormatEx->nSamplesPerSec * bytesPerFrame; - waveFormatEx->nBlockAlign = (WORD)bytesPerFrame; - waveFormatEx->wBitsPerSample = bytesPerSample * 8; - waveFormatEx->cbSize = 22; - - memcpy(&waveFormat->fields[PAWIN_INDEXOF_WVALIDBITSPERSAMPLE], - &waveFormatEx->wBitsPerSample, sizeof(WORD)); - - memcpy(&waveFormat->fields[PAWIN_INDEXOF_DWCHANNELMASK], - &channelMask, sizeof(DWORD)); - - guid = pawin_ksDataFormatSubtypeGuidBase; - guid.Data1 = (USHORT)waveFormatTag; - memcpy(&waveFormat->fields[PAWIN_INDEXOF_SUBFORMAT], &guid, sizeof(GUID)); -} - -PaWinWaveFormatChannelMask PaWin_DefaultChannelMask( int numChannels ) -{ - switch( numChannels ){ - case 1: - return PAWIN_SPEAKER_MONO; - case 2: - return PAWIN_SPEAKER_STEREO; - case 3: - return PAWIN_SPEAKER_FRONT_LEFT | PAWIN_SPEAKER_FRONT_CENTER | PAWIN_SPEAKER_FRONT_RIGHT; - case 4: - return PAWIN_SPEAKER_QUAD; - case 5: - return PAWIN_SPEAKER_QUAD | PAWIN_SPEAKER_FRONT_CENTER; - case 6: - /* The meaning of the PAWIN_SPEAKER_5POINT1 flag has changed over time: - http://msdn2.microsoft.com/en-us/library/aa474707.aspx - We use PAWIN_SPEAKER_5POINT1 (not PAWIN_SPEAKER_5POINT1_SURROUND) - because on some cards (eg Audigy) PAWIN_SPEAKER_5POINT1_SURROUND - results in a virtual mixdown placing the rear output in the - front _and_ rear speakers. - */ - return PAWIN_SPEAKER_5POINT1; - /* case 7: */ - case 8: - /* RoBi: PAWIN_SPEAKER_7POINT1_SURROUND fits normal surround sound setups better than PAWIN_SPEAKER_7POINT1, f.i. NVidia HDMI Audio - output is silent on channels 5&6 with NVidia drivers, and channel 7&8 with Microsoft HD Audio driver using PAWIN_SPEAKER_7POINT1. - With PAWIN_SPEAKER_7POINT1_SURROUND both setups work OK. */ - return PAWIN_SPEAKER_7POINT1_SURROUND; - } - - /* Apparently some Audigy drivers will output silence - if the direct-out constant (0) is used. So this is not ideal. - - RoBi 2012-12-19: Also, NVidia driver seem to output garbage instead. Again not very ideal. - */ - return PAWIN_SPEAKER_DIRECTOUT; - - /* Note that Alec Rogers proposed the following as an alternate method to - generate the default channel mask, however it doesn't seem to be an improvement - over the above, since some drivers will matrix outputs mapping to non-present - speakers across multiple physical speakers. - - if(nChannels==1) { - pwfFormat->dwChannelMask = SPEAKER_FRONT_CENTER; - } - else { - pwfFormat->dwChannelMask = 0; - for(i=0; idwChannelMask = (pwfFormat->dwChannelMask << 1) | 0x1; - } - */ -} diff --git a/spaces/anakin87/who-killed-laura-palmer/presentations/README.md b/spaces/anakin87/who-killed-laura-palmer/presentations/README.md deleted file mode 100644 index 422f41d55711723701b7d61f7a8a5744f2ac24b4..0000000000000000000000000000000000000000 --- a/spaces/anakin87/who-killed-laura-palmer/presentations/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# 🧑‍🏫 Presentations -*PyCon Italy 2022* - -- [Video presentation](https://www.youtube.com/watch?v=V-c-qmDEJVg) -- [Slides](./presentations/wklp_pycon.pdf) diff --git a/spaces/aodianyun/ChatGLM-6B/README.md b/spaces/aodianyun/ChatGLM-6B/README.md deleted file mode 100644 index 27b28672dd3482aa29f40bf55926adaa071c186f..0000000000000000000000000000000000000000 --- a/spaces/aodianyun/ChatGLM-6B/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: ChatGLM 6B -emoji: 🏃 -colorFrom: indigo -colorTo: pink -sdk: gradio -sdk_version: 3.21.0 -app_file: app.py -pinned: false -license: mit -duplicated_from: multimodalart/ChatGLM-6B ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/aphenx/bingo/src/components/chat-image.tsx b/spaces/aphenx/bingo/src/components/chat-image.tsx deleted file mode 100644 index 05ecc9771eada27a0f2d160bb01cba170d37bb09..0000000000000000000000000000000000000000 --- a/spaces/aphenx/bingo/src/components/chat-image.tsx +++ /dev/null @@ -1,170 +0,0 @@ -import { - useEffect, - useState, - useCallback, - ChangeEvent, - ClipboardEvent, - MouseEventHandler, - FormEvent, - useRef -} from "react" -import Image from 'next/image' -import PasteIcon from '@/assets/images/paste.svg' -import UploadIcon from '@/assets/images/upload.svg' -import CameraIcon from '@/assets/images/camera.svg' -import { useBing } from '@/lib/hooks/use-bing' -import { cn } from '@/lib/utils' - -interface ChatImageProps extends Pick, 'uploadImage'> {} - -const preventDefault: MouseEventHandler = (event) => { - event.nativeEvent.stopImmediatePropagation() -} - -const toBase64 = (file: File): Promise => new Promise((resolve, reject) => { - const reader = new FileReader() - reader.readAsDataURL(file) - reader.onload = () => resolve(reader.result as string) - reader.onerror = reject -}) - -export function ChatImage({ children, uploadImage }: React.PropsWithChildren) { - const videoRef = useRef(null) - const canvasRef = useRef(null) - const mediaStream = useRef() - const [panel, setPanel] = useState('none') - - const upload = useCallback((url: string) => { - if (url) { - uploadImage(url) - } - setPanel('none') - }, [panel]) - - const onUpload = useCallback(async (event: ChangeEvent) => { - const file = event.target.files?.[0] - if (file) { - const fileDataUrl = await toBase64(file) - if (fileDataUrl) { - upload(fileDataUrl) - } - } - }, []) - - const onPaste = useCallback((event: ClipboardEvent) => { - const pasteUrl = event.clipboardData.getData('text') ?? '' - upload(pasteUrl) - }, []) - - const onEnter = useCallback((event: FormEvent) => { - event.preventDefault() - event.stopPropagation() - // @ts-ignore - const inputUrl = event.target.elements.image.value - if (inputUrl) { - upload(inputUrl) - } - }, []) - - const openVideo: MouseEventHandler = async (event) => { - event.stopPropagation() - setPanel('camera-mode') - } - - const onCapture = () => { - if (canvasRef.current && videoRef.current) { - const canvas = canvasRef.current - canvas.width = videoRef.current!.videoWidth - canvas.height = videoRef.current!.videoHeight - canvas.getContext('2d')?.drawImage(videoRef.current, 0, 0, canvas.width, canvas.height) - const cameraUrl = canvas.toDataURL('image/jpeg') - upload(cameraUrl) - } - } - - useEffect(() => { - const handleBlur = () => { - if (panel !== 'none') { - setPanel('none') - } - } - document.addEventListener('click', handleBlur) - return () => { - document.removeEventListener('click', handleBlur) - } - }, [panel]) - - useEffect(() => { - if (panel === 'camera-mode') { - navigator.mediaDevices.getUserMedia({ video: true, audio: false }) - .then(videoStream => { - mediaStream.current = videoStream - if (videoRef.current) { - videoRef.current.srcObject = videoStream - } - }) - } else { - if (mediaStream.current) { - mediaStream.current.getTracks().forEach(function(track) { - track.stop() - }) - mediaStream.current = undefined - } - } - }, [panel]) - - return ( -
-
panel === 'none' ? setPanel('normal') : setPanel('none')}>{children}
-
-
-
-

添加图像

-
-
- paste -
- e.stopPropagation()} - /> -
-
-
- - -
-
- {panel === 'camera-mode' &&
-
- - -
-
-
-
-
-
} -
-
- ) -} diff --git a/spaces/aphenx/bingo/tailwind.config.js b/spaces/aphenx/bingo/tailwind.config.js deleted file mode 100644 index 03da3c3c45be6983b9f5ffa6df5f1fd0870e9636..0000000000000000000000000000000000000000 --- a/spaces/aphenx/bingo/tailwind.config.js +++ /dev/null @@ -1,48 +0,0 @@ -/** @type {import('tailwindcss').Config} */ -module.exports = { - content: [ - './src/pages/**/*.{js,ts,jsx,tsx,mdx}', - './src/components/**/*.{js,ts,jsx,tsx,mdx}', - './src/app/**/*.{js,ts,jsx,tsx,mdx}', - './src/ui/**/*.{js,ts,jsx,tsx,mdx}', - ], - "darkMode": "class", - theme: { - extend: { - colors: { - 'primary-blue': 'rgb(var(--color-primary-blue) / )', - secondary: 'rgb(var(--color-secondary) / )', - 'primary-background': 'rgb(var(--primary-background) / )', - 'primary-text': 'rgb(var(--primary-text) / )', - 'secondary-text': 'rgb(var(--secondary-text) / )', - 'light-text': 'rgb(var(--light-text) / )', - 'primary-border': 'rgb(var(--primary-border) / )', - }, - keyframes: { - slideDownAndFade: { - from: { opacity: 0, transform: 'translateY(-2px)' }, - to: { opacity: 1, transform: 'translateY(0)' }, - }, - slideLeftAndFade: { - from: { opacity: 0, transform: 'translateX(2px)' }, - to: { opacity: 1, transform: 'translateX(0)' }, - }, - slideUpAndFade: { - from: { opacity: 0, transform: 'translateY(2px)' }, - to: { opacity: 1, transform: 'translateY(0)' }, - }, - slideRightAndFade: { - from: { opacity: 0, transform: 'translateX(2px)' }, - to: { opacity: 1, transform: 'translateX(0)' }, - }, - }, - animation: { - slideDownAndFade: 'slideDownAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideLeftAndFade: 'slideLeftAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideUpAndFade: 'slideUpAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideRightAndFade: 'slideRightAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - }, - }, - }, - plugins: [require('@headlessui/tailwindcss'), require('tailwind-scrollbar')], -} diff --git a/spaces/aquaaaaaaaaaaaa/AI-minato_aqua/models.py b/spaces/aquaaaaaaaaaaaa/AI-minato_aqua/models.py deleted file mode 100644 index 5d8f154887a43a5c5f67cf6340f74268398e32d5..0000000000000000000000000000000000000000 --- a/spaces/aquaaaaaaaaaaaa/AI-minato_aqua/models.py +++ /dev/null @@ -1,351 +0,0 @@ -import copy -import math -import torch -from torch import nn -from torch.nn import functional as F - -import attentions -import commons -import modules - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from commons import init_weights, get_padding -from vdecoder.hifigan.models import Generator -from utils import f0_to_coarse - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class Encoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - # print(x.shape,x_lengths.shape) - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class TextEncoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - filter_channels=None, - n_heads=None, - p_dropout=None): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - self.f0_emb = nn.Embedding(256, hidden_channels) - - self.enc_ = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - - def forward(self, x, x_lengths, f0=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = x + self.f0_emb(f0).transpose(1,2) - x = self.enc_(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - - return z, m, logs, x_mask - - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2,3,5,7,11] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class SpeakerEncoder(torch.nn.Module): - def __init__(self, mel_n_channels=80, model_num_layers=3, model_hidden_size=256, model_embedding_size=256): - super(SpeakerEncoder, self).__init__() - self.lstm = nn.LSTM(mel_n_channels, model_hidden_size, model_num_layers, batch_first=True) - self.linear = nn.Linear(model_hidden_size, model_embedding_size) - self.relu = nn.ReLU() - - def forward(self, mels): - self.lstm.flatten_parameters() - _, (hidden, _) = self.lstm(mels) - embeds_raw = self.relu(self.linear(hidden[-1])) - return embeds_raw / torch.norm(embeds_raw, dim=1, keepdim=True) - - def compute_partial_slices(self, total_frames, partial_frames, partial_hop): - mel_slices = [] - for i in range(0, total_frames-partial_frames, partial_hop): - mel_range = torch.arange(i, i+partial_frames) - mel_slices.append(mel_range) - - return mel_slices - - def embed_utterance(self, mel, partial_frames=128, partial_hop=64): - mel_len = mel.size(1) - last_mel = mel[:,-partial_frames:] - - if mel_len > partial_frames: - mel_slices = self.compute_partial_slices(mel_len, partial_frames, partial_hop) - mels = list(mel[:,s] for s in mel_slices) - mels.append(last_mel) - mels = torch.stack(tuple(mels), 0).squeeze(1) - - with torch.no_grad(): - partial_embeds = self(mels) - embed = torch.mean(partial_embeds, axis=0).unsqueeze(0) - #embed = embed / torch.linalg.norm(embed, 2) - else: - with torch.no_grad(): - embed = self(last_mel) - - return embed - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - ssl_dim, - n_speakers, - **kwargs): - - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - self.ssl_dim = ssl_dim - self.emb_g = nn.Embedding(n_speakers, gin_channels) - - self.enc_p_ = TextEncoder(ssl_dim, inter_channels, hidden_channels, 5, 1, 16,0, filter_channels, n_heads, p_dropout) - hps = { - "sampling_rate": 48000, - "inter_channels": 192, - "resblock": "1", - "resblock_kernel_sizes": [3, 7, 11], - "resblock_dilation_sizes": [[1, 3, 5], [1, 3, 5], [1, 3, 5]], - "upsample_rates": [10, 8, 2, 2], - "upsample_initial_channel": 512, - "upsample_kernel_sizes": [16, 16, 4, 4], - "gin_channels": 256, - } - self.dec = Generator(h=hps) - self.enc_q = Encoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) - - def forward(self, c, f0, spec, g=None, mel=None, c_lengths=None, spec_lengths=None): - if c_lengths == None: - c_lengths = (torch.ones(c.size(0)) * c.size(-1)).to(c.device) - if spec_lengths == None: - spec_lengths = (torch.ones(spec.size(0)) * spec.size(-1)).to(spec.device) - - g = self.emb_g(g).transpose(1,2) - - z_ptemp, m_p, logs_p, _ = self.enc_p_(c, c_lengths, f0=f0_to_coarse(f0)) - z, m_q, logs_q, spec_mask = self.enc_q(spec, spec_lengths, g=g) - - z_p = self.flow(z, spec_mask, g=g) - z_slice, pitch_slice, ids_slice = commons.rand_slice_segments_with_pitch(z, f0, spec_lengths, self.segment_size) - - # o = self.dec(z_slice, g=g) - o = self.dec(z_slice, g=g, f0=pitch_slice) - - return o, ids_slice, spec_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, c, f0, g=None, mel=None, c_lengths=None): - if c_lengths == None: - c_lengths = (torch.ones(c.size(0)) * c.size(-1)).to(c.device) - g = self.emb_g(g).transpose(1,2) - - z_p, m_p, logs_p, c_mask = self.enc_p_(c, c_lengths, f0=f0_to_coarse(f0)) - z = self.flow(z_p, c_mask, g=g, reverse=True) - - o = self.dec(z * c_mask, g=g, f0=f0) - - return o diff --git a/spaces/artificialguybr/video-dubbing/whisper/whisper/transcribe.py b/spaces/artificialguybr/video-dubbing/whisper/whisper/transcribe.py deleted file mode 100644 index 6e43a22faefb8b1ce927cf4e019a2c03e19be0db..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/video-dubbing/whisper/whisper/transcribe.py +++ /dev/null @@ -1,461 +0,0 @@ -import argparse -import os -import warnings -from typing import TYPE_CHECKING, Optional, Tuple, Union - -import numpy as np -import torch -import tqdm - -from .audio import ( - FRAMES_PER_SECOND, - HOP_LENGTH, - N_FRAMES, - N_SAMPLES, - SAMPLE_RATE, - log_mel_spectrogram, - pad_or_trim, -) -from .decoding import DecodingOptions, DecodingResult -from .timing import add_word_timestamps -from .tokenizer import LANGUAGES, TO_LANGUAGE_CODE, get_tokenizer -from .utils import ( - exact_div, - format_timestamp, - get_writer, - make_safe, - optional_float, - optional_int, - str2bool, -) - -if TYPE_CHECKING: - from .model import Whisper - - -def transcribe( - model: "Whisper", - audio: Union[str, np.ndarray, torch.Tensor], - *, - verbose: Optional[bool] = None, - temperature: Union[float, Tuple[float, ...]] = (0.0, 0.2, 0.4, 0.6, 0.8, 1.0), - compression_ratio_threshold: Optional[float] = 2.4, - logprob_threshold: Optional[float] = -1.0, - no_speech_threshold: Optional[float] = 0.6, - condition_on_previous_text: bool = True, - initial_prompt: Optional[str] = None, - word_timestamps: bool = False, - prepend_punctuations: str = "\"'“¿([{-", - append_punctuations: str = "\"'.。,,!!??::”)]}、", - **decode_options, -): - """ - Transcribe an audio file using Whisper - - Parameters - ---------- - model: Whisper - The Whisper model instance - - audio: Union[str, np.ndarray, torch.Tensor] - The path to the audio file to open, or the audio waveform - - verbose: bool - Whether to display the text being decoded to the console. If True, displays all the details, - If False, displays minimal details. If None, does not display anything - - temperature: Union[float, Tuple[float, ...]] - Temperature for sampling. It can be a tuple of temperatures, which will be successively used - upon failures according to either `compression_ratio_threshold` or `logprob_threshold`. - - compression_ratio_threshold: float - If the gzip compression ratio is above this value, treat as failed - - logprob_threshold: float - If the average log probability over sampled tokens is below this value, treat as failed - - no_speech_threshold: float - If the no_speech probability is higher than this value AND the average log probability - over sampled tokens is below `logprob_threshold`, consider the segment as silent - - condition_on_previous_text: bool - if True, the previous output of the model is provided as a prompt for the next window; - disabling may make the text inconsistent across windows, but the model becomes less prone to - getting stuck in a failure loop, such as repetition looping or timestamps going out of sync. - - word_timestamps: bool - Extract word-level timestamps using the cross-attention pattern and dynamic time warping, - and include the timestamps for each word in each segment. - - prepend_punctuations: str - If word_timestamps is True, merge these punctuation symbols with the next word - - append_punctuations: str - If word_timestamps is True, merge these punctuation symbols with the previous word - - initial_prompt: Optional[str] - Optional text to provide as a prompt for the first window. This can be used to provide, or - "prompt-engineer" a context for transcription, e.g. custom vocabularies or proper nouns - to make it more likely to predict those word correctly. - - decode_options: dict - Keyword arguments to construct `DecodingOptions` instances - - Returns - ------- - A dictionary containing the resulting text ("text") and segment-level details ("segments"), and - the spoken language ("language"), which is detected when `decode_options["language"]` is None. - """ - dtype = torch.float16 if decode_options.get("fp16", True) else torch.float32 - if model.device == torch.device("cpu"): - if torch.cuda.is_available(): - warnings.warn("Performing inference on CPU when CUDA is available") - if dtype == torch.float16: - warnings.warn("FP16 is not supported on CPU; using FP32 instead") - dtype = torch.float32 - - if dtype == torch.float32: - decode_options["fp16"] = False - - # Pad 30-seconds of silence to the input audio, for slicing - mel = log_mel_spectrogram(audio, padding=N_SAMPLES) - content_frames = mel.shape[-1] - N_FRAMES - - if decode_options.get("language", None) is None: - if not model.is_multilingual: - decode_options["language"] = "en" - else: - if verbose: - print( - "Detecting language using up to the first 30 seconds. Use `--language` to specify the language" - ) - mel_segment = pad_or_trim(mel, N_FRAMES).to(model.device).to(dtype) - _, probs = model.detect_language(mel_segment) - decode_options["language"] = max(probs, key=probs.get) - if verbose is not None: - print( - f"Detected language: {LANGUAGES[decode_options['language']].title()}" - ) - - language: str = decode_options["language"] - task: str = decode_options.get("task", "transcribe") - tokenizer = get_tokenizer(model.is_multilingual, language=language, task=task) - - if word_timestamps and task == "translate": - warnings.warn("Word-level timestamps on translations may not be reliable.") - - def decode_with_fallback(segment: torch.Tensor) -> DecodingResult: - temperatures = ( - [temperature] if isinstance(temperature, (int, float)) else temperature - ) - decode_result = None - - for t in temperatures: - kwargs = {**decode_options} - if t > 0: - # disable beam_size and patience when t > 0 - kwargs.pop("beam_size", None) - kwargs.pop("patience", None) - else: - # disable best_of when t == 0 - kwargs.pop("best_of", None) - - options = DecodingOptions(**kwargs, temperature=t) - decode_result = model.decode(segment, options) - - needs_fallback = False - if ( - compression_ratio_threshold is not None - and decode_result.compression_ratio > compression_ratio_threshold - ): - needs_fallback = True # too repetitive - if ( - logprob_threshold is not None - and decode_result.avg_logprob < logprob_threshold - ): - needs_fallback = True # average log probability is too low - if ( - no_speech_threshold is not None - and decode_result.no_speech_prob > no_speech_threshold - ): - needs_fallback = False # silence - if not needs_fallback: - break - - return decode_result - - seek = 0 - input_stride = exact_div( - N_FRAMES, model.dims.n_audio_ctx - ) # mel frames per output token: 2 - time_precision = ( - input_stride * HOP_LENGTH / SAMPLE_RATE - ) # time per output token: 0.02 (seconds) - all_tokens = [] - all_segments = [] - prompt_reset_since = 0 - - if initial_prompt is not None: - initial_prompt_tokens = tokenizer.encode(" " + initial_prompt.strip()) - all_tokens.extend(initial_prompt_tokens) - else: - initial_prompt_tokens = [] - - def new_segment( - *, start: float, end: float, tokens: torch.Tensor, result: DecodingResult - ): - tokens = tokens.tolist() - text_tokens = [token for token in tokens if token < tokenizer.eot] - return { - "seek": seek, - "start": start, - "end": end, - "text": tokenizer.decode(text_tokens), - "tokens": tokens, - "temperature": result.temperature, - "avg_logprob": result.avg_logprob, - "compression_ratio": result.compression_ratio, - "no_speech_prob": result.no_speech_prob, - } - - # show the progress bar when verbose is False (if True, transcribed text will be printed) - with tqdm.tqdm( - total=content_frames, unit="frames", disable=verbose is not False - ) as pbar: - last_speech_timestamp = 0.0 - while seek < content_frames: - time_offset = float(seek * HOP_LENGTH / SAMPLE_RATE) - mel_segment = mel[:, seek : seek + N_FRAMES] - segment_size = min(N_FRAMES, content_frames - seek) - segment_duration = segment_size * HOP_LENGTH / SAMPLE_RATE - mel_segment = pad_or_trim(mel_segment, N_FRAMES).to(model.device).to(dtype) - - decode_options["prompt"] = all_tokens[prompt_reset_since:] - result: DecodingResult = decode_with_fallback(mel_segment) - tokens = torch.tensor(result.tokens) - - if no_speech_threshold is not None: - # no voice activity check - should_skip = result.no_speech_prob > no_speech_threshold - if ( - logprob_threshold is not None - and result.avg_logprob > logprob_threshold - ): - # don't skip if the logprob is high enough, despite the no_speech_prob - should_skip = False - - if should_skip: - seek += segment_size # fast-forward to the next segment boundary - continue - - previous_seek = seek - current_segments = [] - - timestamp_tokens: torch.Tensor = tokens.ge(tokenizer.timestamp_begin) - single_timestamp_ending = timestamp_tokens[-2:].tolist() == [False, True] - - consecutive = torch.where(timestamp_tokens[:-1] & timestamp_tokens[1:])[0] - consecutive.add_(1) - if len(consecutive) > 0: - # if the output contains two consecutive timestamp tokens - slices = consecutive.tolist() - if single_timestamp_ending: - slices.append(len(tokens)) - - last_slice = 0 - for current_slice in slices: - sliced_tokens = tokens[last_slice:current_slice] - start_timestamp_pos = ( - sliced_tokens[0].item() - tokenizer.timestamp_begin - ) - end_timestamp_pos = ( - sliced_tokens[-1].item() - tokenizer.timestamp_begin - ) - current_segments.append( - new_segment( - start=time_offset + start_timestamp_pos * time_precision, - end=time_offset + end_timestamp_pos * time_precision, - tokens=sliced_tokens, - result=result, - ) - ) - last_slice = current_slice - - if single_timestamp_ending: - # single timestamp at the end means no speech after the last timestamp. - seek += segment_size - else: - # otherwise, ignore the unfinished segment and seek to the last timestamp - last_timestamp_pos = ( - tokens[last_slice - 1].item() - tokenizer.timestamp_begin - ) - seek += last_timestamp_pos * input_stride - else: - duration = segment_duration - timestamps = tokens[timestamp_tokens.nonzero().flatten()] - if ( - len(timestamps) > 0 - and timestamps[-1].item() != tokenizer.timestamp_begin - ): - # no consecutive timestamps but it has a timestamp; use the last one. - last_timestamp_pos = ( - timestamps[-1].item() - tokenizer.timestamp_begin - ) - duration = last_timestamp_pos * time_precision - - current_segments.append( - new_segment( - start=time_offset, - end=time_offset + duration, - tokens=tokens, - result=result, - ) - ) - seek += segment_size - - if word_timestamps: - add_word_timestamps( - segments=current_segments, - model=model, - tokenizer=tokenizer, - mel=mel_segment, - num_frames=segment_size, - prepend_punctuations=prepend_punctuations, - append_punctuations=append_punctuations, - last_speech_timestamp=last_speech_timestamp, - ) - word_end_timestamps = [ - w["end"] for s in current_segments for w in s["words"] - ] - if len(word_end_timestamps) > 0: - last_speech_timestamp = word_end_timestamps[-1] - if not single_timestamp_ending and len(word_end_timestamps) > 0: - seek_shift = round( - (word_end_timestamps[-1] - time_offset) * FRAMES_PER_SECOND - ) - if seek_shift > 0: - seek = previous_seek + seek_shift - - if verbose: - for segment in current_segments: - start, end, text = segment["start"], segment["end"], segment["text"] - line = f"[{format_timestamp(start)} --> {format_timestamp(end)}] {text}" - print(make_safe(line)) - - # if a segment is instantaneous or does not contain text, clear it - for i, segment in enumerate(current_segments): - if segment["start"] == segment["end"] or segment["text"].strip() == "": - segment["text"] = "" - segment["tokens"] = [] - segment["words"] = [] - - all_segments.extend( - [ - {"id": i, **segment} - for i, segment in enumerate( - current_segments, start=len(all_segments) - ) - ] - ) - all_tokens.extend( - [token for segment in current_segments for token in segment["tokens"]] - ) - - if not condition_on_previous_text or result.temperature > 0.5: - # do not feed the prompt tokens if a high temperature was used - prompt_reset_since = len(all_tokens) - - # update progress bar - pbar.update(min(content_frames, seek) - previous_seek) - - return dict( - text=tokenizer.decode(all_tokens[len(initial_prompt_tokens) :]), - segments=all_segments, - language=language, - ) - - -def cli(): - from . import available_models - - # fmt: off - parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument("audio", nargs="+", type=str, help="audio file(s) to transcribe") - parser.add_argument("--model", default="small", choices=available_models(), help="name of the Whisper model to use") - parser.add_argument("--model_dir", type=str, default=None, help="the path to save model files; uses ~/.cache/whisper by default") - parser.add_argument("--device", default="cuda" if torch.cuda.is_available() else "cpu", help="device to use for PyTorch inference") - parser.add_argument("--output_dir", "-o", type=str, default=".", help="directory to save the outputs") - parser.add_argument("--output_format", "-f", type=str, default="all", choices=["txt", "vtt", "srt", "tsv", "json", "all"], help="format of the output file; if not specified, all available formats will be produced") - parser.add_argument("--verbose", type=str2bool, default=True, help="whether to print out the progress and debug messages") - - parser.add_argument("--task", type=str, default="transcribe", choices=["transcribe", "translate"], help="whether to perform X->X speech recognition ('transcribe') or X->English translation ('translate')") - parser.add_argument("--language", type=str, default=None, choices=sorted(LANGUAGES.keys()) + sorted([k.title() for k in TO_LANGUAGE_CODE.keys()]), help="language spoken in the audio, specify None to perform language detection") - - parser.add_argument("--temperature", type=float, default=0, help="temperature to use for sampling") - parser.add_argument("--best_of", type=optional_int, default=5, help="number of candidates when sampling with non-zero temperature") - parser.add_argument("--beam_size", type=optional_int, default=5, help="number of beams in beam search, only applicable when temperature is zero") - parser.add_argument("--patience", type=float, default=None, help="optional patience value to use in beam decoding, as in https://arxiv.org/abs/2204.05424, the default (1.0) is equivalent to conventional beam search") - parser.add_argument("--length_penalty", type=float, default=None, help="optional token length penalty coefficient (alpha) as in https://arxiv.org/abs/1609.08144, uses simple length normalization by default") - - parser.add_argument("--suppress_tokens", type=str, default="-1", help="comma-separated list of token ids to suppress during sampling; '-1' will suppress most special characters except common punctuations") - parser.add_argument("--initial_prompt", type=str, default=None, help="optional text to provide as a prompt for the first window.") - parser.add_argument("--condition_on_previous_text", type=str2bool, default=True, help="if True, provide the previous output of the model as a prompt for the next window; disabling may make the text inconsistent across windows, but the model becomes less prone to getting stuck in a failure loop") - parser.add_argument("--fp16", type=str2bool, default=True, help="whether to perform inference in fp16; True by default") - - parser.add_argument("--temperature_increment_on_fallback", type=optional_float, default=0.2, help="temperature to increase when falling back when the decoding fails to meet either of the thresholds below") - parser.add_argument("--compression_ratio_threshold", type=optional_float, default=2.4, help="if the gzip compression ratio is higher than this value, treat the decoding as failed") - parser.add_argument("--logprob_threshold", type=optional_float, default=-1.0, help="if the average log probability is lower than this value, treat the decoding as failed") - parser.add_argument("--no_speech_threshold", type=optional_float, default=0.6, help="if the probability of the <|nospeech|> token is higher than this value AND the decoding has failed due to `logprob_threshold`, consider the segment as silence") - parser.add_argument("--word_timestamps", type=str2bool, default=False, help="(experimental) extract word-level timestamps and refine the results based on them") - parser.add_argument("--prepend_punctuations", type=str, default="\"\'“¿([{-", help="if word_timestamps is True, merge these punctuation symbols with the next word") - parser.add_argument("--append_punctuations", type=str, default="\"\'.。,,!!??::”)]}、", help="if word_timestamps is True, merge these punctuation symbols with the previous word") - parser.add_argument("--highlight_words", type=str2bool, default=False, help="(requires --word_timestamps True) underline each word as it is spoken in srt and vtt") - parser.add_argument("--max_line_width", type=optional_int, default=None, help="(requires --word_timestamps True) the maximum number of characters in a line before breaking the line") - parser.add_argument("--max_line_count", type=optional_int, default=None, help="(requires --word_timestamps True) the maximum number of lines in a segment") - parser.add_argument("--threads", type=optional_int, default=0, help="number of threads used by torch for CPU inference; supercedes MKL_NUM_THREADS/OMP_NUM_THREADS") - # fmt: on - - args = parser.parse_args().__dict__ - model_name: str = args.pop("model") - model_dir: str = args.pop("model_dir") - output_dir: str = args.pop("output_dir") - output_format: str = args.pop("output_format") - device: str = args.pop("device") - os.makedirs(output_dir, exist_ok=True) - - if model_name.endswith(".en") and args["language"] not in {"en", "English"}: - if args["language"] is not None: - warnings.warn( - f"{model_name} is an English-only model but receipted '{args['language']}'; using English instead." - ) - args["language"] = "en" - - temperature = args.pop("temperature") - if (increment := args.pop("temperature_increment_on_fallback")) is not None: - temperature = tuple(np.arange(temperature, 1.0 + 1e-6, increment)) - else: - temperature = [temperature] - - if (threads := args.pop("threads")) > 0: - torch.set_num_threads(threads) - - from . import load_model - - model = load_model(model_name, device=device, download_root=model_dir) - - writer = get_writer(output_format, output_dir) - word_options = ["highlight_words", "max_line_count", "max_line_width"] - if not args["word_timestamps"]: - for option in word_options: - if args[option]: - parser.error(f"--{option} requires --word_timestamps True") - if args["max_line_count"] and not args["max_line_width"]: - warnings.warn("--max_line_count has no effect without --max_line_width") - writer_args = {arg: args.pop(arg) for arg in word_options} - for audio_path in args.pop("audio"): - result = transcribe(model, audio_path, temperature=temperature, **args) - writer(result, audio_path, writer_args) - - -if __name__ == "__main__": - cli() diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Utility/AsyncGen.c b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Utility/AsyncGen.c deleted file mode 100644 index 9a11d6a129ccbc7a7590b058f3dc21fdc7049fa1..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Utility/AsyncGen.c +++ /dev/null @@ -1,1133 +0,0 @@ -// This is copied from genobject.c in CPython 3.6. -// Try to keep it in sync by doing this from time to time: -// sed -e 's|__pyx_||ig' Cython/Utility/AsyncGen.c | diff -udw - cpython/Objects/genobject.c | less - -//////////////////// AsyncGenerator.proto //////////////////// -//@requires: Coroutine.c::Coroutine - -#define __Pyx_AsyncGen_USED -typedef struct { - __pyx_CoroutineObject coro; - PyObject *ag_finalizer; - int ag_hooks_inited; - int ag_closed; -} __pyx_PyAsyncGenObject; - -static PyTypeObject *__pyx__PyAsyncGenWrappedValueType = 0; -static PyTypeObject *__pyx__PyAsyncGenASendType = 0; -static PyTypeObject *__pyx__PyAsyncGenAThrowType = 0; -static PyTypeObject *__pyx_AsyncGenType = 0; - -#define __Pyx_AsyncGen_CheckExact(obj) (Py_TYPE(obj) == __pyx_AsyncGenType) -#define __pyx_PyAsyncGenASend_CheckExact(o) \ - (Py_TYPE(o) == __pyx__PyAsyncGenASendType) -#define __pyx_PyAsyncGenAThrow_CheckExact(o) \ - (Py_TYPE(o) == __pyx__PyAsyncGenAThrowType) - -static PyObject *__Pyx_async_gen_anext(PyObject *o); -static CYTHON_INLINE PyObject *__Pyx_async_gen_asend_iternext(PyObject *o); -static PyObject *__Pyx_async_gen_asend_send(PyObject *o, PyObject *arg); -static PyObject *__Pyx_async_gen_asend_close(PyObject *o, PyObject *args); -static PyObject *__Pyx_async_gen_athrow_close(PyObject *o, PyObject *args); - -static PyObject *__Pyx__PyAsyncGenValueWrapperNew(PyObject *val); - - -static __pyx_CoroutineObject *__Pyx_AsyncGen_New( - __pyx_coroutine_body_t body, PyObject *code, PyObject *closure, - PyObject *name, PyObject *qualname, PyObject *module_name) { - __pyx_PyAsyncGenObject *gen = PyObject_GC_New(__pyx_PyAsyncGenObject, __pyx_AsyncGenType); - if (unlikely(!gen)) - return NULL; - gen->ag_finalizer = NULL; - gen->ag_closed = 0; - gen->ag_hooks_inited = 0; - return __Pyx__Coroutine_NewInit((__pyx_CoroutineObject*)gen, body, code, closure, name, qualname, module_name); -} - -static int __pyx_AsyncGen_init(void); -static void __Pyx_PyAsyncGen_Fini(void); - -//////////////////// AsyncGenerator.cleanup //////////////////// - -__Pyx_PyAsyncGen_Fini(); - -//////////////////// AsyncGeneratorInitFinalizer //////////////////// - -// this is separated out because it needs more adaptation - -#if PY_VERSION_HEX < 0x030600B0 -static int __Pyx_async_gen_init_hooks(__pyx_PyAsyncGenObject *o) { -#if 0 - // TODO: implement finalizer support in older Python versions - PyThreadState *tstate; - PyObject *finalizer; - PyObject *firstiter; -#endif - - if (likely(o->ag_hooks_inited)) { - return 0; - } - - o->ag_hooks_inited = 1; - -#if 0 - tstate = __Pyx_PyThreadState_Current; - - finalizer = tstate->async_gen_finalizer; - if (finalizer) { - Py_INCREF(finalizer); - o->ag_finalizer = finalizer; - } - - firstiter = tstate->async_gen_firstiter; - if (firstiter) { - PyObject *res; - - Py_INCREF(firstiter); - res = __Pyx_PyObject_CallOneArg(firstiter, (PyObject*)o); - Py_DECREF(firstiter); - if (res == NULL) { - return 1; - } - Py_DECREF(res); - } -#endif - - return 0; -} -#endif - - -//////////////////// AsyncGenerator //////////////////// -//@requires: AsyncGeneratorInitFinalizer -//@requires: Coroutine.c::Coroutine -//@requires: Coroutine.c::ReturnWithStopIteration -//@requires: ObjectHandling.c::PyObjectCall2Args -//@requires: ObjectHandling.c::PyObject_GenericGetAttrNoDict - -PyDoc_STRVAR(__Pyx_async_gen_send_doc, -"send(arg) -> send 'arg' into generator,\n\ -return next yielded value or raise StopIteration."); - -PyDoc_STRVAR(__Pyx_async_gen_close_doc, -"close() -> raise GeneratorExit inside generator."); - -PyDoc_STRVAR(__Pyx_async_gen_throw_doc, -"throw(typ[,val[,tb]]) -> raise exception in generator,\n\ -return next yielded value or raise StopIteration."); - -PyDoc_STRVAR(__Pyx_async_gen_await_doc, -"__await__() -> return a representation that can be passed into the 'await' expression."); - -// COPY STARTS HERE: - -static PyObject *__Pyx_async_gen_asend_new(__pyx_PyAsyncGenObject *, PyObject *); -static PyObject *__Pyx_async_gen_athrow_new(__pyx_PyAsyncGenObject *, PyObject *); - -static const char *__Pyx_NON_INIT_CORO_MSG = "can't send non-None value to a just-started coroutine"; -static const char *__Pyx_ASYNC_GEN_IGNORED_EXIT_MSG = "async generator ignored GeneratorExit"; - -typedef enum { - __PYX_AWAITABLE_STATE_INIT, /* new awaitable, has not yet been iterated */ - __PYX_AWAITABLE_STATE_ITER, /* being iterated */ - __PYX_AWAITABLE_STATE_CLOSED, /* closed */ -} __pyx_AwaitableState; - -typedef struct { - PyObject_HEAD - __pyx_PyAsyncGenObject *ags_gen; - - /* Can be NULL, when in the __anext__() mode (equivalent of "asend(None)") */ - PyObject *ags_sendval; - - __pyx_AwaitableState ags_state; -} __pyx_PyAsyncGenASend; - - -typedef struct { - PyObject_HEAD - __pyx_PyAsyncGenObject *agt_gen; - - /* Can be NULL, when in the "aclose()" mode (equivalent of "athrow(GeneratorExit)") */ - PyObject *agt_args; - - __pyx_AwaitableState agt_state; -} __pyx_PyAsyncGenAThrow; - - -typedef struct { - PyObject_HEAD - PyObject *agw_val; -} __pyx__PyAsyncGenWrappedValue; - - -#ifndef _PyAsyncGen_MAXFREELIST -#define _PyAsyncGen_MAXFREELIST 80 -#endif - -// Freelists boost performance 6-10%; they also reduce memory -// fragmentation, as _PyAsyncGenWrappedValue and PyAsyncGenASend -// are short-living objects that are instantiated for every -// __anext__ call. - -static __pyx__PyAsyncGenWrappedValue *__Pyx_ag_value_freelist[_PyAsyncGen_MAXFREELIST]; -static int __Pyx_ag_value_freelist_free = 0; - -static __pyx_PyAsyncGenASend *__Pyx_ag_asend_freelist[_PyAsyncGen_MAXFREELIST]; -static int __Pyx_ag_asend_freelist_free = 0; - -#define __pyx__PyAsyncGenWrappedValue_CheckExact(o) \ - (Py_TYPE(o) == __pyx__PyAsyncGenWrappedValueType) - - -static int -__Pyx_async_gen_traverse(__pyx_PyAsyncGenObject *gen, visitproc visit, void *arg) -{ - Py_VISIT(gen->ag_finalizer); - return __Pyx_Coroutine_traverse((__pyx_CoroutineObject*)gen, visit, arg); -} - - -static PyObject * -__Pyx_async_gen_repr(__pyx_CoroutineObject *o) -{ - // avoid NULL pointer dereference for qualname during garbage collection - return PyUnicode_FromFormat("", - o->gi_qualname ? o->gi_qualname : Py_None, o); -} - - -#if PY_VERSION_HEX >= 0x030600B0 -static int -__Pyx_async_gen_init_hooks(__pyx_PyAsyncGenObject *o) -{ - PyThreadState *tstate; - PyObject *finalizer; - PyObject *firstiter; - - if (o->ag_hooks_inited) { - return 0; - } - - o->ag_hooks_inited = 1; - - tstate = __Pyx_PyThreadState_Current; - - finalizer = tstate->async_gen_finalizer; - if (finalizer) { - Py_INCREF(finalizer); - o->ag_finalizer = finalizer; - } - - firstiter = tstate->async_gen_firstiter; - if (firstiter) { - PyObject *res; -#if CYTHON_UNPACK_METHODS - PyObject *self; -#endif - - Py_INCREF(firstiter); - // at least asyncio stores methods here => optimise the call -#if CYTHON_UNPACK_METHODS - if (likely(PyMethod_Check(firstiter)) && likely((self = PyMethod_GET_SELF(firstiter)) != NULL)) { - PyObject *function = PyMethod_GET_FUNCTION(firstiter); - res = __Pyx_PyObject_Call2Args(function, self, (PyObject*)o); - } else -#endif - res = __Pyx_PyObject_CallOneArg(firstiter, (PyObject*)o); - - Py_DECREF(firstiter); - if (unlikely(res == NULL)) { - return 1; - } - Py_DECREF(res); - } - - return 0; -} -#endif - - -static PyObject * -__Pyx_async_gen_anext(PyObject *g) -{ - __pyx_PyAsyncGenObject *o = (__pyx_PyAsyncGenObject*) g; - if (__Pyx_async_gen_init_hooks(o)) { - return NULL; - } - return __Pyx_async_gen_asend_new(o, NULL); -} - -static PyObject * -__Pyx_async_gen_anext_method(PyObject *g, CYTHON_UNUSED PyObject *arg) { - return __Pyx_async_gen_anext(g); -} - - -static PyObject * -__Pyx_async_gen_asend(__pyx_PyAsyncGenObject *o, PyObject *arg) -{ - if (__Pyx_async_gen_init_hooks(o)) { - return NULL; - } - return __Pyx_async_gen_asend_new(o, arg); -} - - -static PyObject * -__Pyx_async_gen_aclose(__pyx_PyAsyncGenObject *o, CYTHON_UNUSED PyObject *arg) -{ - if (__Pyx_async_gen_init_hooks(o)) { - return NULL; - } - return __Pyx_async_gen_athrow_new(o, NULL); -} - - -static PyObject * -__Pyx_async_gen_athrow(__pyx_PyAsyncGenObject *o, PyObject *args) -{ - if (__Pyx_async_gen_init_hooks(o)) { - return NULL; - } - return __Pyx_async_gen_athrow_new(o, args); -} - - -static PyObject * -__Pyx_async_gen_self_method(PyObject *g, CYTHON_UNUSED PyObject *arg) { - return __Pyx_NewRef(g); -} - - -static PyGetSetDef __Pyx_async_gen_getsetlist[] = { - {(char*) "__name__", (getter)__Pyx_Coroutine_get_name, (setter)__Pyx_Coroutine_set_name, - (char*) PyDoc_STR("name of the async generator"), 0}, - {(char*) "__qualname__", (getter)__Pyx_Coroutine_get_qualname, (setter)__Pyx_Coroutine_set_qualname, - (char*) PyDoc_STR("qualified name of the async generator"), 0}, - //REMOVED: {(char*) "ag_await", (getter)coro_get_cr_await, NULL, - //REMOVED: (char*) PyDoc_STR("object being awaited on, or None")}, - {0, 0, 0, 0, 0} /* Sentinel */ -}; - -static PyMemberDef __Pyx_async_gen_memberlist[] = { - //REMOVED: {(char*) "ag_frame", T_OBJECT, offsetof(__pyx_PyAsyncGenObject, ag_frame), READONLY}, - {(char*) "ag_running", T_BOOL, offsetof(__pyx_CoroutineObject, is_running), READONLY, NULL}, - //REMOVED: {(char*) "ag_code", T_OBJECT, offsetof(__pyx_PyAsyncGenObject, ag_code), READONLY}, - //ADDED: "ag_await" - {(char*) "ag_await", T_OBJECT, offsetof(__pyx_CoroutineObject, yieldfrom), READONLY, - (char*) PyDoc_STR("object being awaited on, or None")}, - {0, 0, 0, 0, 0} /* Sentinel */ -}; - -PyDoc_STRVAR(__Pyx_async_aclose_doc, -"aclose() -> raise GeneratorExit inside generator."); - -PyDoc_STRVAR(__Pyx_async_asend_doc, -"asend(v) -> send 'v' in generator."); - -PyDoc_STRVAR(__Pyx_async_athrow_doc, -"athrow(typ[,val[,tb]]) -> raise exception in generator."); - -PyDoc_STRVAR(__Pyx_async_aiter_doc, -"__aiter__(v) -> return an asynchronous iterator."); - -PyDoc_STRVAR(__Pyx_async_anext_doc, -"__anext__(v) -> continue asynchronous iteration and return the next element."); - -static PyMethodDef __Pyx_async_gen_methods[] = { - {"asend", (PyCFunction)__Pyx_async_gen_asend, METH_O, __Pyx_async_asend_doc}, - {"athrow",(PyCFunction)__Pyx_async_gen_athrow, METH_VARARGS, __Pyx_async_athrow_doc}, - {"aclose", (PyCFunction)__Pyx_async_gen_aclose, METH_NOARGS, __Pyx_async_aclose_doc}, - {"__aiter__", (PyCFunction)__Pyx_async_gen_self_method, METH_NOARGS, __Pyx_async_aiter_doc}, - {"__anext__", (PyCFunction)__Pyx_async_gen_anext_method, METH_NOARGS, __Pyx_async_anext_doc}, - {0, 0, 0, 0} /* Sentinel */ -}; - - -#if CYTHON_USE_ASYNC_SLOTS -static __Pyx_PyAsyncMethodsStruct __Pyx_async_gen_as_async = { - 0, /* am_await */ - PyObject_SelfIter, /* am_aiter */ - (unaryfunc)__Pyx_async_gen_anext, /* am_anext */ -#if PY_VERSION_HEX >= 0x030A00A3 - 0, /*am_send*/ -#endif -}; -#endif - -static PyTypeObject __pyx_AsyncGenType_type = { - PyVarObject_HEAD_INIT(0, 0) - "async_generator", /* tp_name */ - sizeof(__pyx_PyAsyncGenObject), /* tp_basicsize */ - 0, /* tp_itemsize */ - (destructor)__Pyx_Coroutine_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ -#if CYTHON_USE_ASYNC_SLOTS - &__Pyx_async_gen_as_async, /* tp_as_async */ -#else - 0, /*tp_reserved*/ -#endif - (reprfunc)__Pyx_async_gen_repr, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | - Py_TPFLAGS_HAVE_FINALIZE, /* tp_flags */ - 0, /* tp_doc */ - (traverseproc)__Pyx_async_gen_traverse, /* tp_traverse */ - 0, /* tp_clear */ -#if CYTHON_USE_ASYNC_SLOTS && CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 && PY_VERSION_HEX < 0x030500B1 - // in order to (mis-)use tp_reserved above, we must also implement tp_richcompare - __Pyx_Coroutine_compare, /*tp_richcompare*/ -#else - 0, /*tp_richcompare*/ -#endif - offsetof(__pyx_CoroutineObject, gi_weakreflist), /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - __Pyx_async_gen_methods, /* tp_methods */ - __Pyx_async_gen_memberlist, /* tp_members */ - __Pyx_async_gen_getsetlist, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ -#if CYTHON_USE_TP_FINALIZE - 0, /*tp_del*/ -#else - __Pyx_Coroutine_del, /*tp_del*/ -#endif - 0, /* tp_version_tag */ -#if CYTHON_USE_TP_FINALIZE - __Pyx_Coroutine_del, /* tp_finalize */ -#elif PY_VERSION_HEX >= 0x030400a1 - 0, /* tp_finalize */ -#endif -#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) - 0, /*tp_vectorcall*/ -#endif -#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 - 0, /*tp_print*/ -#endif -#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 - 0, /*tp_pypy_flags*/ -#endif -}; - - -static int -__Pyx_PyAsyncGen_ClearFreeLists(void) -{ - int ret = __Pyx_ag_value_freelist_free + __Pyx_ag_asend_freelist_free; - - while (__Pyx_ag_value_freelist_free) { - __pyx__PyAsyncGenWrappedValue *o; - o = __Pyx_ag_value_freelist[--__Pyx_ag_value_freelist_free]; - assert(__pyx__PyAsyncGenWrappedValue_CheckExact(o)); - PyObject_GC_Del(o); - } - - while (__Pyx_ag_asend_freelist_free) { - __pyx_PyAsyncGenASend *o; - o = __Pyx_ag_asend_freelist[--__Pyx_ag_asend_freelist_free]; - assert(Py_TYPE(o) == __pyx__PyAsyncGenASendType); - PyObject_GC_Del(o); - } - - return ret; -} - -static void -__Pyx_PyAsyncGen_Fini(void) -{ - __Pyx_PyAsyncGen_ClearFreeLists(); -} - - -static PyObject * -__Pyx_async_gen_unwrap_value(__pyx_PyAsyncGenObject *gen, PyObject *result) -{ - if (result == NULL) { - PyObject *exc_type = PyErr_Occurred(); - if (!exc_type) { - PyErr_SetNone(__Pyx_PyExc_StopAsyncIteration); - gen->ag_closed = 1; - } else if (__Pyx_PyErr_GivenExceptionMatches2(exc_type, __Pyx_PyExc_StopAsyncIteration, PyExc_GeneratorExit)) { - gen->ag_closed = 1; - } - - return NULL; - } - - if (__pyx__PyAsyncGenWrappedValue_CheckExact(result)) { - /* async yield */ - __Pyx_ReturnWithStopIteration(((__pyx__PyAsyncGenWrappedValue*)result)->agw_val); - Py_DECREF(result); - return NULL; - } - - return result; -} - - -/* ---------- Async Generator ASend Awaitable ------------ */ - - -static void -__Pyx_async_gen_asend_dealloc(__pyx_PyAsyncGenASend *o) -{ - PyObject_GC_UnTrack((PyObject *)o); - Py_CLEAR(o->ags_gen); - Py_CLEAR(o->ags_sendval); - if (__Pyx_ag_asend_freelist_free < _PyAsyncGen_MAXFREELIST) { - assert(__pyx_PyAsyncGenASend_CheckExact(o)); - __Pyx_ag_asend_freelist[__Pyx_ag_asend_freelist_free++] = o; - } else { - PyObject_GC_Del(o); - } -} - -static int -__Pyx_async_gen_asend_traverse(__pyx_PyAsyncGenASend *o, visitproc visit, void *arg) -{ - Py_VISIT(o->ags_gen); - Py_VISIT(o->ags_sendval); - return 0; -} - - -static PyObject * -__Pyx_async_gen_asend_send(PyObject *g, PyObject *arg) -{ - __pyx_PyAsyncGenASend *o = (__pyx_PyAsyncGenASend*) g; - PyObject *result; - - if (unlikely(o->ags_state == __PYX_AWAITABLE_STATE_CLOSED)) { - PyErr_SetNone(PyExc_StopIteration); - return NULL; - } - - if (o->ags_state == __PYX_AWAITABLE_STATE_INIT) { - if (arg == NULL || arg == Py_None) { - arg = o->ags_sendval ? o->ags_sendval : Py_None; - } - o->ags_state = __PYX_AWAITABLE_STATE_ITER; - } - - result = __Pyx_Coroutine_Send((PyObject*)o->ags_gen, arg); - result = __Pyx_async_gen_unwrap_value(o->ags_gen, result); - - if (result == NULL) { - o->ags_state = __PYX_AWAITABLE_STATE_CLOSED; - } - - return result; -} - - -static CYTHON_INLINE PyObject * -__Pyx_async_gen_asend_iternext(PyObject *o) -{ - return __Pyx_async_gen_asend_send(o, Py_None); -} - - -static PyObject * -__Pyx_async_gen_asend_throw(__pyx_PyAsyncGenASend *o, PyObject *args) -{ - PyObject *result; - - if (unlikely(o->ags_state == __PYX_AWAITABLE_STATE_CLOSED)) { - PyErr_SetNone(PyExc_StopIteration); - return NULL; - } - - result = __Pyx_Coroutine_Throw((PyObject*)o->ags_gen, args); - result = __Pyx_async_gen_unwrap_value(o->ags_gen, result); - - if (result == NULL) { - o->ags_state = __PYX_AWAITABLE_STATE_CLOSED; - } - - return result; -} - - -static PyObject * -__Pyx_async_gen_asend_close(PyObject *g, CYTHON_UNUSED PyObject *args) -{ - __pyx_PyAsyncGenASend *o = (__pyx_PyAsyncGenASend*) g; - o->ags_state = __PYX_AWAITABLE_STATE_CLOSED; - Py_RETURN_NONE; -} - - -static PyMethodDef __Pyx_async_gen_asend_methods[] = { - {"send", (PyCFunction)__Pyx_async_gen_asend_send, METH_O, __Pyx_async_gen_send_doc}, - {"throw", (PyCFunction)__Pyx_async_gen_asend_throw, METH_VARARGS, __Pyx_async_gen_throw_doc}, - {"close", (PyCFunction)__Pyx_async_gen_asend_close, METH_NOARGS, __Pyx_async_gen_close_doc}, - {"__await__", (PyCFunction)__Pyx_async_gen_self_method, METH_NOARGS, __Pyx_async_gen_await_doc}, - {0, 0, 0, 0} /* Sentinel */ -}; - - -#if CYTHON_USE_ASYNC_SLOTS -static __Pyx_PyAsyncMethodsStruct __Pyx_async_gen_asend_as_async = { - PyObject_SelfIter, /* am_await */ - 0, /* am_aiter */ - 0, /* am_anext */ -#if PY_VERSION_HEX >= 0x030A00A3 - 0, /*am_send*/ -#endif -}; -#endif - - -static PyTypeObject __pyx__PyAsyncGenASendType_type = { - PyVarObject_HEAD_INIT(0, 0) - "async_generator_asend", /* tp_name */ - sizeof(__pyx_PyAsyncGenASend), /* tp_basicsize */ - 0, /* tp_itemsize */ - /* methods */ - (destructor)__Pyx_async_gen_asend_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ -#if CYTHON_USE_ASYNC_SLOTS - &__Pyx_async_gen_asend_as_async, /* tp_as_async */ -#else - 0, /*tp_reserved*/ -#endif - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */ - 0, /* tp_doc */ - (traverseproc)__Pyx_async_gen_asend_traverse, /* tp_traverse */ - 0, /* tp_clear */ -#if CYTHON_USE_ASYNC_SLOTS && CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 && PY_VERSION_HEX < 0x030500B1 - // in order to (mis-)use tp_reserved above, we must also implement tp_richcompare - __Pyx_Coroutine_compare, /*tp_richcompare*/ -#else - 0, /*tp_richcompare*/ -#endif - 0, /* tp_weaklistoffset */ - PyObject_SelfIter, /* tp_iter */ - (iternextfunc)__Pyx_async_gen_asend_iternext, /* tp_iternext */ - __Pyx_async_gen_asend_methods, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ - 0, /* tp_version_tag */ -#if PY_VERSION_HEX >= 0x030400a1 - 0, /* tp_finalize */ -#endif -#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) - 0, /*tp_vectorcall*/ -#endif -#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 - 0, /*tp_print*/ -#endif -#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 - 0, /*tp_pypy_flags*/ -#endif -}; - - -static PyObject * -__Pyx_async_gen_asend_new(__pyx_PyAsyncGenObject *gen, PyObject *sendval) -{ - __pyx_PyAsyncGenASend *o; - if (__Pyx_ag_asend_freelist_free) { - __Pyx_ag_asend_freelist_free--; - o = __Pyx_ag_asend_freelist[__Pyx_ag_asend_freelist_free]; - _Py_NewReference((PyObject *)o); - } else { - o = PyObject_GC_New(__pyx_PyAsyncGenASend, __pyx__PyAsyncGenASendType); - if (o == NULL) { - return NULL; - } - } - - Py_INCREF(gen); - o->ags_gen = gen; - - Py_XINCREF(sendval); - o->ags_sendval = sendval; - - o->ags_state = __PYX_AWAITABLE_STATE_INIT; - - PyObject_GC_Track((PyObject*)o); - return (PyObject*)o; -} - - -/* ---------- Async Generator Value Wrapper ------------ */ - - -static void -__Pyx_async_gen_wrapped_val_dealloc(__pyx__PyAsyncGenWrappedValue *o) -{ - PyObject_GC_UnTrack((PyObject *)o); - Py_CLEAR(o->agw_val); - if (__Pyx_ag_value_freelist_free < _PyAsyncGen_MAXFREELIST) { - assert(__pyx__PyAsyncGenWrappedValue_CheckExact(o)); - __Pyx_ag_value_freelist[__Pyx_ag_value_freelist_free++] = o; - } else { - PyObject_GC_Del(o); - } -} - - -static int -__Pyx_async_gen_wrapped_val_traverse(__pyx__PyAsyncGenWrappedValue *o, - visitproc visit, void *arg) -{ - Py_VISIT(o->agw_val); - return 0; -} - - -static PyTypeObject __pyx__PyAsyncGenWrappedValueType_type = { - PyVarObject_HEAD_INIT(0, 0) - "async_generator_wrapped_value", /* tp_name */ - sizeof(__pyx__PyAsyncGenWrappedValue), /* tp_basicsize */ - 0, /* tp_itemsize */ - /* methods */ - (destructor)__Pyx_async_gen_wrapped_val_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_as_async */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */ - 0, /* tp_doc */ - (traverseproc)__Pyx_async_gen_wrapped_val_traverse, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ - 0, /* tp_version_tag */ -#if PY_VERSION_HEX >= 0x030400a1 - 0, /* tp_finalize */ -#endif -#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) - 0, /*tp_vectorcall*/ -#endif -#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 - 0, /*tp_print*/ -#endif -#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 - 0, /*tp_pypy_flags*/ -#endif -}; - - -static PyObject * -__Pyx__PyAsyncGenValueWrapperNew(PyObject *val) -{ - // NOTE: steals a reference to val ! - __pyx__PyAsyncGenWrappedValue *o; - assert(val); - - if (__Pyx_ag_value_freelist_free) { - __Pyx_ag_value_freelist_free--; - o = __Pyx_ag_value_freelist[__Pyx_ag_value_freelist_free]; - assert(__pyx__PyAsyncGenWrappedValue_CheckExact(o)); - _Py_NewReference((PyObject*)o); - } else { - o = PyObject_GC_New(__pyx__PyAsyncGenWrappedValue, __pyx__PyAsyncGenWrappedValueType); - if (unlikely(!o)) { - Py_DECREF(val); - return NULL; - } - } - o->agw_val = val; - // no Py_INCREF(val) - steals reference! - PyObject_GC_Track((PyObject*)o); - return (PyObject*)o; -} - - -/* ---------- Async Generator AThrow awaitable ------------ */ - - -static void -__Pyx_async_gen_athrow_dealloc(__pyx_PyAsyncGenAThrow *o) -{ - PyObject_GC_UnTrack((PyObject *)o); - Py_CLEAR(o->agt_gen); - Py_CLEAR(o->agt_args); - PyObject_GC_Del(o); -} - - -static int -__Pyx_async_gen_athrow_traverse(__pyx_PyAsyncGenAThrow *o, visitproc visit, void *arg) -{ - Py_VISIT(o->agt_gen); - Py_VISIT(o->agt_args); - return 0; -} - - -static PyObject * -__Pyx_async_gen_athrow_send(__pyx_PyAsyncGenAThrow *o, PyObject *arg) -{ - __pyx_CoroutineObject *gen = (__pyx_CoroutineObject*)o->agt_gen; - PyObject *retval; - - if (o->agt_state == __PYX_AWAITABLE_STATE_CLOSED) { - PyErr_SetNone(PyExc_StopIteration); - return NULL; - } - - if (o->agt_state == __PYX_AWAITABLE_STATE_INIT) { - if (o->agt_gen->ag_closed) { - PyErr_SetNone(PyExc_StopIteration); - return NULL; - } - - if (arg != Py_None) { - PyErr_SetString(PyExc_RuntimeError, __Pyx_NON_INIT_CORO_MSG); - return NULL; - } - - o->agt_state = __PYX_AWAITABLE_STATE_ITER; - - if (o->agt_args == NULL) { - /* aclose() mode */ - o->agt_gen->ag_closed = 1; - - retval = __Pyx__Coroutine_Throw((PyObject*)gen, - /* Do not close generator when - PyExc_GeneratorExit is passed */ - PyExc_GeneratorExit, NULL, NULL, NULL, 0); - - if (retval && __pyx__PyAsyncGenWrappedValue_CheckExact(retval)) { - Py_DECREF(retval); - goto yield_close; - } - } else { - PyObject *typ; - PyObject *tb = NULL; - PyObject *val = NULL; - - if (!PyArg_UnpackTuple(o->agt_args, "athrow", 1, 3, - &typ, &val, &tb)) { - return NULL; - } - - retval = __Pyx__Coroutine_Throw((PyObject*)gen, - /* Do not close generator when PyExc_GeneratorExit is passed */ - typ, val, tb, o->agt_args, 0); - retval = __Pyx_async_gen_unwrap_value(o->agt_gen, retval); - } - if (retval == NULL) { - goto check_error; - } - return retval; - } - - assert (o->agt_state == __PYX_AWAITABLE_STATE_ITER); - - retval = __Pyx_Coroutine_Send((PyObject *)gen, arg); - if (o->agt_args) { - return __Pyx_async_gen_unwrap_value(o->agt_gen, retval); - } else { - /* aclose() mode */ - if (retval) { - if (__pyx__PyAsyncGenWrappedValue_CheckExact(retval)) { - Py_DECREF(retval); - goto yield_close; - } - else { - return retval; - } - } - else { - goto check_error; - } - } - -yield_close: - PyErr_SetString( - PyExc_RuntimeError, __Pyx_ASYNC_GEN_IGNORED_EXIT_MSG); - return NULL; - -check_error: - if (PyErr_ExceptionMatches(__Pyx_PyExc_StopAsyncIteration)) { - o->agt_state = __PYX_AWAITABLE_STATE_CLOSED; - if (o->agt_args == NULL) { - // when aclose() is called we don't want to propagate - // StopAsyncIteration; just raise StopIteration, signalling - // that 'aclose()' is done. - PyErr_Clear(); - PyErr_SetNone(PyExc_StopIteration); - } - } - else if (PyErr_ExceptionMatches(PyExc_GeneratorExit)) { - o->agt_state = __PYX_AWAITABLE_STATE_CLOSED; - PyErr_Clear(); /* ignore these errors */ - PyErr_SetNone(PyExc_StopIteration); - } - return NULL; -} - - -static PyObject * -__Pyx_async_gen_athrow_throw(__pyx_PyAsyncGenAThrow *o, PyObject *args) -{ - PyObject *retval; - - if (o->agt_state == __PYX_AWAITABLE_STATE_INIT) { - PyErr_SetString(PyExc_RuntimeError, __Pyx_NON_INIT_CORO_MSG); - return NULL; - } - - if (o->agt_state == __PYX_AWAITABLE_STATE_CLOSED) { - PyErr_SetNone(PyExc_StopIteration); - return NULL; - } - - retval = __Pyx_Coroutine_Throw((PyObject*)o->agt_gen, args); - if (o->agt_args) { - return __Pyx_async_gen_unwrap_value(o->agt_gen, retval); - } else { - /* aclose() mode */ - if (retval && __pyx__PyAsyncGenWrappedValue_CheckExact(retval)) { - Py_DECREF(retval); - PyErr_SetString(PyExc_RuntimeError, __Pyx_ASYNC_GEN_IGNORED_EXIT_MSG); - return NULL; - } - return retval; - } -} - - -static PyObject * -__Pyx_async_gen_athrow_iternext(__pyx_PyAsyncGenAThrow *o) -{ - return __Pyx_async_gen_athrow_send(o, Py_None); -} - - -static PyObject * -__Pyx_async_gen_athrow_close(PyObject *g, CYTHON_UNUSED PyObject *args) -{ - __pyx_PyAsyncGenAThrow *o = (__pyx_PyAsyncGenAThrow*) g; - o->agt_state = __PYX_AWAITABLE_STATE_CLOSED; - Py_RETURN_NONE; -} - - -static PyMethodDef __Pyx_async_gen_athrow_methods[] = { - {"send", (PyCFunction)__Pyx_async_gen_athrow_send, METH_O, __Pyx_async_gen_send_doc}, - {"throw", (PyCFunction)__Pyx_async_gen_athrow_throw, METH_VARARGS, __Pyx_async_gen_throw_doc}, - {"close", (PyCFunction)__Pyx_async_gen_athrow_close, METH_NOARGS, __Pyx_async_gen_close_doc}, - {"__await__", (PyCFunction)__Pyx_async_gen_self_method, METH_NOARGS, __Pyx_async_gen_await_doc}, - {0, 0, 0, 0} /* Sentinel */ -}; - - -#if CYTHON_USE_ASYNC_SLOTS -static __Pyx_PyAsyncMethodsStruct __Pyx_async_gen_athrow_as_async = { - PyObject_SelfIter, /* am_await */ - 0, /* am_aiter */ - 0, /* am_anext */ -#if PY_VERSION_HEX >= 0x030A00A3 - 0, /*am_send*/ -#endif -}; -#endif - - -static PyTypeObject __pyx__PyAsyncGenAThrowType_type = { - PyVarObject_HEAD_INIT(0, 0) - "async_generator_athrow", /* tp_name */ - sizeof(__pyx_PyAsyncGenAThrow), /* tp_basicsize */ - 0, /* tp_itemsize */ - (destructor)__Pyx_async_gen_athrow_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ -#if CYTHON_USE_ASYNC_SLOTS - &__Pyx_async_gen_athrow_as_async, /* tp_as_async */ -#else - 0, /*tp_reserved*/ -#endif - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */ - 0, /* tp_doc */ - (traverseproc)__Pyx_async_gen_athrow_traverse, /* tp_traverse */ - 0, /* tp_clear */ -#if CYTHON_USE_ASYNC_SLOTS && CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 && PY_VERSION_HEX < 0x030500B1 - // in order to (mis-)use tp_reserved above, we must also implement tp_richcompare - __Pyx_Coroutine_compare, /*tp_richcompare*/ -#else - 0, /*tp_richcompare*/ -#endif - 0, /* tp_weaklistoffset */ - PyObject_SelfIter, /* tp_iter */ - (iternextfunc)__Pyx_async_gen_athrow_iternext, /* tp_iternext */ - __Pyx_async_gen_athrow_methods, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ - 0, /* tp_version_tag */ -#if PY_VERSION_HEX >= 0x030400a1 - 0, /* tp_finalize */ -#endif -#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) - 0, /*tp_vectorcall*/ -#endif -#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 - 0, /*tp_print*/ -#endif -#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 - 0, /*tp_pypy_flags*/ -#endif -}; - - -static PyObject * -__Pyx_async_gen_athrow_new(__pyx_PyAsyncGenObject *gen, PyObject *args) -{ - __pyx_PyAsyncGenAThrow *o; - o = PyObject_GC_New(__pyx_PyAsyncGenAThrow, __pyx__PyAsyncGenAThrowType); - if (o == NULL) { - return NULL; - } - o->agt_gen = gen; - o->agt_args = args; - o->agt_state = __PYX_AWAITABLE_STATE_INIT; - Py_INCREF(gen); - Py_XINCREF(args); - PyObject_GC_Track((PyObject*)o); - return (PyObject*)o; -} - - -/* ---------- global type sharing ------------ */ - -static int __pyx_AsyncGen_init(void) { - // on Windows, C-API functions can't be used in slots statically - __pyx_AsyncGenType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict; - __pyx__PyAsyncGenWrappedValueType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict; - __pyx__PyAsyncGenAThrowType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict; - __pyx__PyAsyncGenASendType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict; - - __pyx_AsyncGenType = __Pyx_FetchCommonType(&__pyx_AsyncGenType_type); - if (unlikely(!__pyx_AsyncGenType)) - return -1; - - __pyx__PyAsyncGenAThrowType = __Pyx_FetchCommonType(&__pyx__PyAsyncGenAThrowType_type); - if (unlikely(!__pyx__PyAsyncGenAThrowType)) - return -1; - - __pyx__PyAsyncGenWrappedValueType = __Pyx_FetchCommonType(&__pyx__PyAsyncGenWrappedValueType_type); - if (unlikely(!__pyx__PyAsyncGenWrappedValueType)) - return -1; - - __pyx__PyAsyncGenASendType = __Pyx_FetchCommonType(&__pyx__PyAsyncGenASendType_type); - if (unlikely(!__pyx__PyAsyncGenASendType)) - return -1; - - return 0; -} diff --git a/spaces/asafAdge/Detic/detic/data/custom_dataset_mapper.py b/spaces/asafAdge/Detic/detic/data/custom_dataset_mapper.py deleted file mode 100644 index c7727dded3f93f5eeafdcd72e257197e3fdc817b..0000000000000000000000000000000000000000 --- a/spaces/asafAdge/Detic/detic/data/custom_dataset_mapper.py +++ /dev/null @@ -1,280 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import copy -import logging -import numpy as np -from typing import List, Optional, Union -import torch -import pycocotools.mask as mask_util - -from detectron2.config import configurable - -from detectron2.data import detection_utils as utils -from detectron2.data.detection_utils import transform_keypoint_annotations -from detectron2.data import transforms as T -from detectron2.data.dataset_mapper import DatasetMapper -from detectron2.structures import Boxes, BoxMode, Instances -from detectron2.structures import Keypoints, PolygonMasks, BitMasks -from fvcore.transforms.transform import TransformList -from .custom_build_augmentation import build_custom_augmentation -from .tar_dataset import DiskTarDataset - -__all__ = ["CustomDatasetMapper"] - -class CustomDatasetMapper(DatasetMapper): - @configurable - def __init__(self, is_train: bool, - with_ann_type=False, - dataset_ann=[], - use_diff_bs_size=False, - dataset_augs=[], - is_debug=False, - use_tar_dataset=False, - tarfile_path='', - tar_index_dir='', - **kwargs): - """ - add image labels - """ - self.with_ann_type = with_ann_type - self.dataset_ann = dataset_ann - self.use_diff_bs_size = use_diff_bs_size - if self.use_diff_bs_size and is_train: - self.dataset_augs = [T.AugmentationList(x) for x in dataset_augs] - self.is_debug = is_debug - self.use_tar_dataset = use_tar_dataset - if self.use_tar_dataset: - print('Using tar dataset') - self.tar_dataset = DiskTarDataset(tarfile_path, tar_index_dir) - super().__init__(is_train, **kwargs) - - - @classmethod - def from_config(cls, cfg, is_train: bool = True): - ret = super().from_config(cfg, is_train) - ret.update({ - 'with_ann_type': cfg.WITH_IMAGE_LABELS, - 'dataset_ann': cfg.DATALOADER.DATASET_ANN, - 'use_diff_bs_size': cfg.DATALOADER.USE_DIFF_BS_SIZE, - 'is_debug': cfg.IS_DEBUG, - 'use_tar_dataset': cfg.DATALOADER.USE_TAR_DATASET, - 'tarfile_path': cfg.DATALOADER.TARFILE_PATH, - 'tar_index_dir': cfg.DATALOADER.TAR_INDEX_DIR, - }) - if ret['use_diff_bs_size'] and is_train: - if cfg.INPUT.CUSTOM_AUG == 'EfficientDetResizeCrop': - dataset_scales = cfg.DATALOADER.DATASET_INPUT_SCALE - dataset_sizes = cfg.DATALOADER.DATASET_INPUT_SIZE - ret['dataset_augs'] = [ - build_custom_augmentation(cfg, True, scale, size) \ - for scale, size in zip(dataset_scales, dataset_sizes)] - else: - assert cfg.INPUT.CUSTOM_AUG == 'ResizeShortestEdge' - min_sizes = cfg.DATALOADER.DATASET_MIN_SIZES - max_sizes = cfg.DATALOADER.DATASET_MAX_SIZES - ret['dataset_augs'] = [ - build_custom_augmentation( - cfg, True, min_size=mi, max_size=ma) \ - for mi, ma in zip(min_sizes, max_sizes)] - else: - ret['dataset_augs'] = [] - - return ret - - def __call__(self, dataset_dict): - """ - include image labels - """ - dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below - # USER: Write your own image loading if it's not from a file - if 'file_name' in dataset_dict: - ori_image = utils.read_image( - dataset_dict["file_name"], format=self.image_format) - else: - ori_image, _, _ = self.tar_dataset[dataset_dict["tar_index"]] - ori_image = utils._apply_exif_orientation(ori_image) - ori_image = utils.convert_PIL_to_numpy(ori_image, self.image_format) - utils.check_image_size(dataset_dict, ori_image) - - # USER: Remove if you don't do semantic/panoptic segmentation. - if "sem_seg_file_name" in dataset_dict: - sem_seg_gt = utils.read_image( - dataset_dict.pop("sem_seg_file_name"), "L").squeeze(2) - else: - sem_seg_gt = None - - if self.is_debug: - dataset_dict['dataset_source'] = 0 - - not_full_labeled = 'dataset_source' in dataset_dict and \ - self.with_ann_type and \ - self.dataset_ann[dataset_dict['dataset_source']] != 'box' - - aug_input = T.AugInput(copy.deepcopy(ori_image), sem_seg=sem_seg_gt) - if self.use_diff_bs_size and self.is_train: - transforms = \ - self.dataset_augs[dataset_dict['dataset_source']](aug_input) - else: - transforms = self.augmentations(aug_input) - image, sem_seg_gt = aug_input.image, aug_input.sem_seg - - image_shape = image.shape[:2] # h, w - dataset_dict["image"] = torch.as_tensor( - np.ascontiguousarray(image.transpose(2, 0, 1))) - - if sem_seg_gt is not None: - dataset_dict["sem_seg"] = torch.as_tensor(sem_seg_gt.astype("long")) - - # USER: Remove if you don't use pre-computed proposals. - # Most users would not need this feature. - if self.proposal_topk is not None: - utils.transform_proposals( - dataset_dict, image_shape, transforms, - proposal_topk=self.proposal_topk - ) - - if not self.is_train: - # USER: Modify this if you want to keep them for some reason. - dataset_dict.pop("annotations", None) - dataset_dict.pop("sem_seg_file_name", None) - return dataset_dict - - if "annotations" in dataset_dict: - # USER: Modify this if you want to keep them for some reason. - for anno in dataset_dict["annotations"]: - if not self.use_instance_mask: - anno.pop("segmentation", None) - if not self.use_keypoint: - anno.pop("keypoints", None) - - # USER: Implement additional transformations if you have other types of data - all_annos = [ - (utils.transform_instance_annotations( - obj, transforms, image_shape, - keypoint_hflip_indices=self.keypoint_hflip_indices, - ), obj.get("iscrowd", 0)) - for obj in dataset_dict.pop("annotations") - ] - annos = [ann[0] for ann in all_annos if ann[1] == 0] - instances = utils.annotations_to_instances( - annos, image_shape, mask_format=self.instance_mask_format - ) - - del all_annos - if self.recompute_boxes: - instances.gt_boxes = instances.gt_masks.get_bounding_boxes() - dataset_dict["instances"] = utils.filter_empty_instances(instances) - if self.with_ann_type: - dataset_dict["pos_category_ids"] = dataset_dict.get( - 'pos_category_ids', []) - dataset_dict["ann_type"] = \ - self.dataset_ann[dataset_dict['dataset_source']] - if self.is_debug and (('pos_category_ids' not in dataset_dict) or \ - (dataset_dict['pos_category_ids'] == [])): - dataset_dict['pos_category_ids'] = [x for x in sorted(set( - dataset_dict['instances'].gt_classes.tolist() - ))] - return dataset_dict - -# DETR augmentation -def build_transform_gen(cfg, is_train): - """ - """ - if is_train: - min_size = cfg.INPUT.MIN_SIZE_TRAIN - max_size = cfg.INPUT.MAX_SIZE_TRAIN - sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING - else: - min_size = cfg.INPUT.MIN_SIZE_TEST - max_size = cfg.INPUT.MAX_SIZE_TEST - sample_style = "choice" - if sample_style == "range": - assert len(min_size) == 2, "more than 2 ({}) min_size(s) are provided for ranges".format(len(min_size)) - - logger = logging.getLogger(__name__) - tfm_gens = [] - if is_train: - tfm_gens.append(T.RandomFlip()) - tfm_gens.append(T.ResizeShortestEdge(min_size, max_size, sample_style)) - if is_train: - logger.info("TransformGens used in training: " + str(tfm_gens)) - return tfm_gens - - -class DetrDatasetMapper: - """ - A callable which takes a dataset dict in Detectron2 Dataset format, - and map it into a format used by DETR. - The callable currently does the following: - 1. Read the image from "file_name" - 2. Applies geometric transforms to the image and annotation - 3. Find and applies suitable cropping to the image and annotation - 4. Prepare image and annotation to Tensors - """ - - def __init__(self, cfg, is_train=True): - if cfg.INPUT.CROP.ENABLED and is_train: - self.crop_gen = [ - T.ResizeShortestEdge([400, 500, 600], sample_style="choice"), - T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE), - ] - else: - self.crop_gen = None - - self.mask_on = cfg.MODEL.MASK_ON - self.tfm_gens = build_transform_gen(cfg, is_train) - logging.getLogger(__name__).info( - "Full TransformGens used in training: {}, crop: {}".format(str(self.tfm_gens), str(self.crop_gen)) - ) - - self.img_format = cfg.INPUT.FORMAT - self.is_train = is_train - - def __call__(self, dataset_dict): - """ - Args: - dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format. - Returns: - dict: a format that builtin models in detectron2 accept - """ - dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below - image = utils.read_image(dataset_dict["file_name"], format=self.img_format) - utils.check_image_size(dataset_dict, image) - - if self.crop_gen is None: - image, transforms = T.apply_transform_gens(self.tfm_gens, image) - else: - if np.random.rand() > 0.5: - image, transforms = T.apply_transform_gens(self.tfm_gens, image) - else: - image, transforms = T.apply_transform_gens( - self.tfm_gens[:-1] + self.crop_gen + self.tfm_gens[-1:], image - ) - - image_shape = image.shape[:2] # h, w - - # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory, - # but not efficient on large generic data structures due to the use of pickle & mp.Queue. - # Therefore it's important to use torch.Tensor. - dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1))) - - if not self.is_train: - # USER: Modify this if you want to keep them for some reason. - dataset_dict.pop("annotations", None) - return dataset_dict - - if "annotations" in dataset_dict: - # USER: Modify this if you want to keep them for some reason. - for anno in dataset_dict["annotations"]: - if not self.mask_on: - anno.pop("segmentation", None) - anno.pop("keypoints", None) - - # USER: Implement additional transformations if you have other types of data - annos = [ - utils.transform_instance_annotations(obj, transforms, image_shape) - for obj in dataset_dict.pop("annotations") - if obj.get("iscrowd", 0) == 0 - ] - instances = utils.annotations_to_instances(annos, image_shape) - dataset_dict["instances"] = utils.filter_empty_instances(instances) - return dataset_dict \ No newline at end of file diff --git a/spaces/asciicorp/Legal-ai/save.py b/spaces/asciicorp/Legal-ai/save.py deleted file mode 100644 index a57eaa46838c083864c8cae916ae9b9b277a1dbe..0000000000000000000000000000000000000000 --- a/spaces/asciicorp/Legal-ai/save.py +++ /dev/null @@ -1,59 +0,0 @@ -import streamlit as st -from datetime import datetime -import base64 - -def save_function(model, temperature, template): - current_time = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") - filename = f"api_{model}_{current_time}.py" - with open(filename, "w") as f: - f.write("from langchain.prompts.prompt import PromptTemplate\n") - f.write("from langchain.llms import OpenAI\n") - f.write("from langchain.chains import ChatVectorDBChain\n") - f.write("\n") - f.write("import os\n") - f.write("import pickle\n") - f.write("from fastapi import FastAPI, Request\n") - f.write("\n") - f.write('os.environ["OPENAI_API_KEY"] = "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"\n') - f.write("\n") - f.write(f"model = '{model}'\n") - f.write(f"temperature = {temperature}\n") - f.write(f"template = '''{template}'''\n") - f.write("\n") - f.write("_template = '''Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n") - f.write("you can assume the question is about the document.\n") - f.write("\n") - f.write("Chat History:\n") - f.write("{chat_history}\n") - f.write("Follow Up Input: {question}\n") - f.write("Standalone question:'''\n") - f.write("\n") - f.write("CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)\n") - f.write("\n") - f.write("QA_PROMPT = PromptTemplate(template=template, input_variables=['question', 'context'])\n") - f.write("\n") - f.write('with open("vectorstore.pkl", "rb") as f:\n') - f.write(" vectorstore = pickle.load(f)\n") - f.write("\n") - f.write("app = FastAPI()\n") - f.write("llm = OpenAI(model=model, temperature=temperature)\n") - f.write("qa_chain = ChatVectorDBChain.from_llm(\n") - f.write(" llm,\n") - f.write(" vectorstore,\n") - f.write(" qa_prompt=QA_PROMPT,\n") - f.write(" condense_question_prompt=CONDENSE_QUESTION_PROMPT,\n") - f.write(" )\n") - f.write('@app.post("/api")\n') - f.write("async def get_answer(request: Request):\n") - f.write(" body = await request.json()\n") - f.write(' question = body.get("question")\n') - f.write(' chat_history = body.get("chat_history", [])\n') - f.write(' result = qa_chain({"question": question, "chat_history": chat_history})\n') - f.write(' chat_history.append((question, result["answer"]))\n') - f.write(' return {"answer": result["answer"]}\n') - st.success(f"Custom API created as {filename}") - with open(f"{filename}", 'rb') as f: - bytes = f.read() - b64 = base64.b64encode(bytes).decode() - href = f'Download custom API' - st.markdown(href, unsafe_allow_html=True) \ No newline at end of file diff --git a/spaces/ashercn97/AsherTesting/docs/Generation-parameters.md b/spaces/ashercn97/AsherTesting/docs/Generation-parameters.md deleted file mode 100644 index 447742160f3d89796e10726b0257c19435e90449..0000000000000000000000000000000000000000 --- a/spaces/ashercn97/AsherTesting/docs/Generation-parameters.md +++ /dev/null @@ -1,35 +0,0 @@ -# Generation parameters - -For a description of the generation parameters provided by the transformers library, see this link: https://huggingface.co/docs/transformers/main_classes/text_generation#transformers.GenerationConfig - -### llama.cpp - -llama.cpp only uses the following parameters: - -* temperature -* top_p -* top_k -* repetition_penalty -* tfs -* mirostat_mode -* mirostat_tau -* mirostat_eta - -### ExLlama - -ExLlama only uses the following parameters: - -* temperature -* top_p -* top_k -* repetition_penalty -* repetition_penalty_range -* typical_p - -### RWKV - -RWKV only uses the following parameters when loaded through the old .pth weights: - -* temperature -* top_p -* top_k diff --git a/spaces/awacke1/Science-NER-Spacy-Streamlit/README.md b/spaces/awacke1/Science-NER-Spacy-Streamlit/README.md deleted file mode 100644 index 9805241f6476eaa5bd4711a13b070367ba061c14..0000000000000000000000000000000000000000 --- a/spaces/awacke1/Science-NER-Spacy-Streamlit/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: 🔥 Science NER Spacy for STEM Streamlit -emoji: 🔥 -colorFrom: indigo -colorTo: red -sdk: streamlit -sdk_version: 1.15.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/balgot/text-to-stylegan3/download_model.sh b/spaces/balgot/text-to-stylegan3/download_model.sh deleted file mode 100644 index 02eaac6b3f16988f15937b96fb81aeeca89dace3..0000000000000000000000000000000000000000 --- a/spaces/balgot/text-to-stylegan3/download_model.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash - -echo "Downloading the translation model..." -wget --show-progress --verbose -nc -O model.pt https://huggingface.co/balgot/bert-2-stylegan3/resolve/main/translation_model-sd.pt \ No newline at end of file diff --git a/spaces/banana-projects/web3d/node_modules/three/examples/js/lines/Wireframe.js b/spaces/banana-projects/web3d/node_modules/three/examples/js/lines/Wireframe.js deleted file mode 100644 index 9f582cf33d44338c609cb47db9f7b9b576b205c2..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/examples/js/lines/Wireframe.js +++ /dev/null @@ -1,65 +0,0 @@ -/** - * @author WestLangley / http://github.com/WestLangley - * - */ - -THREE.Wireframe = function ( geometry, material ) { - - THREE.Mesh.call( this ); - - this.type = 'Wireframe'; - - this.geometry = geometry !== undefined ? geometry : new THREE.LineSegmentsGeometry(); - this.material = material !== undefined ? material : new THREE.LineMaterial( { color: Math.random() * 0xffffff } ); - -}; - -THREE.Wireframe.prototype = Object.assign( Object.create( THREE.Mesh.prototype ), { - - constructor: THREE.Wireframe, - - isWireframe: true, - - computeLineDistances: ( function () { // for backwards-compatability, but could be a method of LineSegmentsGeometry... - - var start = new THREE.Vector3(); - var end = new THREE.Vector3(); - - return function computeLineDistances() { - - var geometry = this.geometry; - - var instanceStart = geometry.attributes.instanceStart; - var instanceEnd = geometry.attributes.instanceEnd; - var lineDistances = new Float32Array( 2 * instanceStart.data.count ); - - for ( var i = 0, j = 0, l = instanceStart.data.count; i < l; i ++, j += 2 ) { - - start.fromBufferAttribute( instanceStart, i ); - end.fromBufferAttribute( instanceEnd, i ); - - lineDistances[ j ] = ( j === 0 ) ? 0 : lineDistances[ j - 1 ]; - lineDistances[ j + 1 ] = lineDistances[ j ] + start.distanceTo( end ); - - } - - var instanceDistanceBuffer = new THREE.InstancedInterleavedBuffer( lineDistances, 2, 1 ); // d0, d1 - - geometry.addAttribute( 'instanceDistanceStart', new THREE.InterleavedBufferAttribute( instanceDistanceBuffer, 1, 0 ) ); // d0 - geometry.addAttribute( 'instanceDistanceEnd', new THREE.InterleavedBufferAttribute( instanceDistanceBuffer, 1, 1 ) ); // d1 - - return this; - - }; - - }() ), - - copy: function ( source ) { - - // todo - - return this; - - } - -} ); diff --git a/spaces/banana-projects/web3d/node_modules/three/src/extras/curves/QuadraticBezierCurve.d.ts b/spaces/banana-projects/web3d/node_modules/three/src/extras/curves/QuadraticBezierCurve.d.ts deleted file mode 100644 index 8face24fa9a42526d1033a4b649b86345606aa7b..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/extras/curves/QuadraticBezierCurve.d.ts +++ /dev/null @@ -1,10 +0,0 @@ -import { Vector2 } from './../../math/Vector2'; -import { Curve } from './../core/Curve'; - -export class QuadraticBezierCurve extends Curve { - constructor(v0: Vector2, v1: Vector2, v2: Vector2); - - v0: Vector2; - v1: Vector2; - v2: Vector2; -} diff --git a/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/logdepthbuf_vertex.glsl.js b/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/logdepthbuf_vertex.glsl.js deleted file mode 100644 index 1631661a9bfa38316f66a9eec2482da043d616ea..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/logdepthbuf_vertex.glsl.js +++ /dev/null @@ -1,17 +0,0 @@ -export default /* glsl */` -#ifdef USE_LOGDEPTHBUF - - #ifdef USE_LOGDEPTHBUF_EXT - - vFragDepth = 1.0 + gl_Position.w; - - #else - - gl_Position.z = log2( max( EPSILON, gl_Position.w + 1.0 ) ) * logDepthBufFC - 1.0; - - gl_Position.z *= gl_Position.w; - - #endif - -#endif -`; diff --git a/spaces/banana-projects/web3d/node_modules/three/src/textures/CubeTexture.js b/spaces/banana-projects/web3d/node_modules/three/src/textures/CubeTexture.js deleted file mode 100644 index 6d2e2946c97bd4ef257344bef0ee271992483be7..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/textures/CubeTexture.js +++ /dev/null @@ -1,42 +0,0 @@ -/** - * @author mrdoob / http://mrdoob.com/ - */ - -import { Texture } from './Texture.js'; -import { CubeReflectionMapping, RGBFormat } from '../constants.js'; - -function CubeTexture( images, mapping, wrapS, wrapT, magFilter, minFilter, format, type, anisotropy, encoding ) { - - images = images !== undefined ? images : []; - mapping = mapping !== undefined ? mapping : CubeReflectionMapping; - format = format !== undefined ? format : RGBFormat; - - Texture.call( this, images, mapping, wrapS, wrapT, magFilter, minFilter, format, type, anisotropy, encoding ); - - this.flipY = false; - -} - -CubeTexture.prototype = Object.create( Texture.prototype ); -CubeTexture.prototype.constructor = CubeTexture; - -CubeTexture.prototype.isCubeTexture = true; - -Object.defineProperty( CubeTexture.prototype, 'images', { - - get: function () { - - return this.image; - - }, - - set: function ( value ) { - - this.image = value; - - } - -} ); - - -export { CubeTexture }; diff --git a/spaces/beihai/GFPGAN-V1.3-whole-image/basicsr/archs/swinir_arch.py b/spaces/beihai/GFPGAN-V1.3-whole-image/basicsr/archs/swinir_arch.py deleted file mode 100644 index 3917fa2c7408e1f5b55b9930c643a9af920a4d81..0000000000000000000000000000000000000000 --- a/spaces/beihai/GFPGAN-V1.3-whole-image/basicsr/archs/swinir_arch.py +++ /dev/null @@ -1,956 +0,0 @@ -# Modified from https://github.com/JingyunLiang/SwinIR -# SwinIR: Image Restoration Using Swin Transformer, https://arxiv.org/abs/2108.10257 -# Originally Written by Ze Liu, Modified by Jingyun Liang. - -import math -import torch -import torch.nn as nn -import torch.utils.checkpoint as checkpoint - -from basicsr.utils.registry import ARCH_REGISTRY -from .arch_util import to_2tuple, trunc_normal_ - - -def drop_path(x, drop_prob: float = 0., training: bool = False): - """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). - - From: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/drop.py - """ - if drop_prob == 0. or not training: - return x - keep_prob = 1 - drop_prob - shape = (x.shape[0], ) + (1, ) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets - random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device) - random_tensor.floor_() # binarize - output = x.div(keep_prob) * random_tensor - return output - - -class DropPath(nn.Module): - """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). - - From: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/drop.py - """ - - def __init__(self, drop_prob=None): - super(DropPath, self).__init__() - self.drop_prob = drop_prob - - def forward(self, x): - return drop_path(x, self.drop_prob, self.training) - - -class Mlp(nn.Module): - - def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): - super().__init__() - out_features = out_features or in_features - hidden_features = hidden_features or in_features - self.fc1 = nn.Linear(in_features, hidden_features) - self.act = act_layer() - self.fc2 = nn.Linear(hidden_features, out_features) - self.drop = nn.Dropout(drop) - - def forward(self, x): - x = self.fc1(x) - x = self.act(x) - x = self.drop(x) - x = self.fc2(x) - x = self.drop(x) - return x - - -def window_partition(x, window_size): - """ - Args: - x: (b, h, w, c) - window_size (int): window size - - Returns: - windows: (num_windows*b, window_size, window_size, c) - """ - b, h, w, c = x.shape - x = x.view(b, h // window_size, window_size, w // window_size, window_size, c) - windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, c) - return windows - - -def window_reverse(windows, window_size, h, w): - """ - Args: - windows: (num_windows*b, window_size, window_size, c) - window_size (int): Window size - h (int): Height of image - w (int): Width of image - - Returns: - x: (b, h, w, c) - """ - b = int(windows.shape[0] / (h * w / window_size / window_size)) - x = windows.view(b, h // window_size, w // window_size, window_size, window_size, -1) - x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(b, h, w, -1) - return x - - -class WindowAttention(nn.Module): - r""" Window based multi-head self attention (W-MSA) module with relative position bias. - It supports both of shifted and non-shifted window. - - Args: - dim (int): Number of input channels. - window_size (tuple[int]): The height and width of the window. - num_heads (int): Number of attention heads. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set - attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 - proj_drop (float, optional): Dropout ratio of output. Default: 0.0 - """ - - def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.): - - super().__init__() - self.dim = dim - self.window_size = window_size # Wh, Ww - self.num_heads = num_heads - head_dim = dim // num_heads - self.scale = qk_scale or head_dim**-0.5 - - # define a parameter table of relative position bias - self.relative_position_bias_table = nn.Parameter( - torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH - - # get pair-wise relative position index for each token inside the window - coords_h = torch.arange(self.window_size[0]) - coords_w = torch.arange(self.window_size[1]) - coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww - coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww - relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww - relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 - relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 - relative_coords[:, :, 1] += self.window_size[1] - 1 - relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 - relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww - self.register_buffer('relative_position_index', relative_position_index) - - self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) - self.attn_drop = nn.Dropout(attn_drop) - self.proj = nn.Linear(dim, dim) - - self.proj_drop = nn.Dropout(proj_drop) - - trunc_normal_(self.relative_position_bias_table, std=.02) - self.softmax = nn.Softmax(dim=-1) - - def forward(self, x, mask=None): - """ - Args: - x: input features with shape of (num_windows*b, n, c) - mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None - """ - b_, n, c = x.shape - qkv = self.qkv(x).reshape(b_, n, 3, self.num_heads, c // self.num_heads).permute(2, 0, 3, 1, 4) - q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) - - q = q * self.scale - attn = (q @ k.transpose(-2, -1)) - - relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view( - self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH - relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww - attn = attn + relative_position_bias.unsqueeze(0) - - if mask is not None: - nw = mask.shape[0] - attn = attn.view(b_ // nw, nw, self.num_heads, n, n) + mask.unsqueeze(1).unsqueeze(0) - attn = attn.view(-1, self.num_heads, n, n) - attn = self.softmax(attn) - else: - attn = self.softmax(attn) - - attn = self.attn_drop(attn) - - x = (attn @ v).transpose(1, 2).reshape(b_, n, c) - x = self.proj(x) - x = self.proj_drop(x) - return x - - def extra_repr(self) -> str: - return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}' - - def flops(self, n): - # calculate flops for 1 window with token length of n - flops = 0 - # qkv = self.qkv(x) - flops += n * self.dim * 3 * self.dim - # attn = (q @ k.transpose(-2, -1)) - flops += self.num_heads * n * (self.dim // self.num_heads) * n - # x = (attn @ v) - flops += self.num_heads * n * n * (self.dim // self.num_heads) - # x = self.proj(x) - flops += n * self.dim * self.dim - return flops - - -class SwinTransformerBlock(nn.Module): - r""" Swin Transformer Block. - - Args: - dim (int): Number of input channels. - input_resolution (tuple[int]): Input resolution. - num_heads (int): Number of attention heads. - window_size (int): Window size. - shift_size (int): Shift size for SW-MSA. - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. - drop (float, optional): Dropout rate. Default: 0.0 - attn_drop (float, optional): Attention dropout rate. Default: 0.0 - drop_path (float, optional): Stochastic depth rate. Default: 0.0 - act_layer (nn.Module, optional): Activation layer. Default: nn.GELU - norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm - """ - - def __init__(self, - dim, - input_resolution, - num_heads, - window_size=7, - shift_size=0, - mlp_ratio=4., - qkv_bias=True, - qk_scale=None, - drop=0., - attn_drop=0., - drop_path=0., - act_layer=nn.GELU, - norm_layer=nn.LayerNorm): - super().__init__() - self.dim = dim - self.input_resolution = input_resolution - self.num_heads = num_heads - self.window_size = window_size - self.shift_size = shift_size - self.mlp_ratio = mlp_ratio - if min(self.input_resolution) <= self.window_size: - # if window size is larger than input resolution, we don't partition windows - self.shift_size = 0 - self.window_size = min(self.input_resolution) - assert 0 <= self.shift_size < self.window_size, 'shift_size must in 0-window_size' - - self.norm1 = norm_layer(dim) - self.attn = WindowAttention( - dim, - window_size=to_2tuple(self.window_size), - num_heads=num_heads, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - attn_drop=attn_drop, - proj_drop=drop) - - self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() - self.norm2 = norm_layer(dim) - mlp_hidden_dim = int(dim * mlp_ratio) - self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) - - if self.shift_size > 0: - attn_mask = self.calculate_mask(self.input_resolution) - else: - attn_mask = None - - self.register_buffer('attn_mask', attn_mask) - - def calculate_mask(self, x_size): - # calculate attention mask for SW-MSA - h, w = x_size - img_mask = torch.zeros((1, h, w, 1)) # 1 h w 1 - h_slices = (slice(0, -self.window_size), slice(-self.window_size, - -self.shift_size), slice(-self.shift_size, None)) - w_slices = (slice(0, -self.window_size), slice(-self.window_size, - -self.shift_size), slice(-self.shift_size, None)) - cnt = 0 - for h in h_slices: - for w in w_slices: - img_mask[:, h, w, :] = cnt - cnt += 1 - - mask_windows = window_partition(img_mask, self.window_size) # nw, window_size, window_size, 1 - mask_windows = mask_windows.view(-1, self.window_size * self.window_size) - attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) - attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) - - return attn_mask - - def forward(self, x, x_size): - h, w = x_size - b, _, c = x.shape - # assert seq_len == h * w, "input feature has wrong size" - - shortcut = x - x = self.norm1(x) - x = x.view(b, h, w, c) - - # cyclic shift - if self.shift_size > 0: - shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) - else: - shifted_x = x - - # partition windows - x_windows = window_partition(shifted_x, self.window_size) # nw*b, window_size, window_size, c - x_windows = x_windows.view(-1, self.window_size * self.window_size, c) # nw*b, window_size*window_size, c - - # W-MSA/SW-MSA (to be compatible for testing on images whose shapes are the multiple of window size - if self.input_resolution == x_size: - attn_windows = self.attn(x_windows, mask=self.attn_mask) # nw*b, window_size*window_size, c - else: - attn_windows = self.attn(x_windows, mask=self.calculate_mask(x_size).to(x.device)) - - # merge windows - attn_windows = attn_windows.view(-1, self.window_size, self.window_size, c) - shifted_x = window_reverse(attn_windows, self.window_size, h, w) # b h' w' c - - # reverse cyclic shift - if self.shift_size > 0: - x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) - else: - x = shifted_x - x = x.view(b, h * w, c) - - # FFN - x = shortcut + self.drop_path(x) - x = x + self.drop_path(self.mlp(self.norm2(x))) - - return x - - def extra_repr(self) -> str: - return (f'dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, ' - f'window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}') - - def flops(self): - flops = 0 - h, w = self.input_resolution - # norm1 - flops += self.dim * h * w - # W-MSA/SW-MSA - nw = h * w / self.window_size / self.window_size - flops += nw * self.attn.flops(self.window_size * self.window_size) - # mlp - flops += 2 * h * w * self.dim * self.dim * self.mlp_ratio - # norm2 - flops += self.dim * h * w - return flops - - -class PatchMerging(nn.Module): - r""" Patch Merging Layer. - - Args: - input_resolution (tuple[int]): Resolution of input feature. - dim (int): Number of input channels. - norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm - """ - - def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm): - super().__init__() - self.input_resolution = input_resolution - self.dim = dim - self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) - self.norm = norm_layer(4 * dim) - - def forward(self, x): - """ - x: b, h*w, c - """ - h, w = self.input_resolution - b, seq_len, c = x.shape - assert seq_len == h * w, 'input feature has wrong size' - assert h % 2 == 0 and w % 2 == 0, f'x size ({h}*{w}) are not even.' - - x = x.view(b, h, w, c) - - x0 = x[:, 0::2, 0::2, :] # b h/2 w/2 c - x1 = x[:, 1::2, 0::2, :] # b h/2 w/2 c - x2 = x[:, 0::2, 1::2, :] # b h/2 w/2 c - x3 = x[:, 1::2, 1::2, :] # b h/2 w/2 c - x = torch.cat([x0, x1, x2, x3], -1) # b h/2 w/2 4*c - x = x.view(b, -1, 4 * c) # b h/2*w/2 4*c - - x = self.norm(x) - x = self.reduction(x) - - return x - - def extra_repr(self) -> str: - return f'input_resolution={self.input_resolution}, dim={self.dim}' - - def flops(self): - h, w = self.input_resolution - flops = h * w * self.dim - flops += (h // 2) * (w // 2) * 4 * self.dim * 2 * self.dim - return flops - - -class BasicLayer(nn.Module): - """ A basic Swin Transformer layer for one stage. - - Args: - dim (int): Number of input channels. - input_resolution (tuple[int]): Input resolution. - depth (int): Number of blocks. - num_heads (int): Number of attention heads. - window_size (int): Local window size. - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. - drop (float, optional): Dropout rate. Default: 0.0 - attn_drop (float, optional): Attention dropout rate. Default: 0.0 - drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 - norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm - downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None - use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. - """ - - def __init__(self, - dim, - input_resolution, - depth, - num_heads, - window_size, - mlp_ratio=4., - qkv_bias=True, - qk_scale=None, - drop=0., - attn_drop=0., - drop_path=0., - norm_layer=nn.LayerNorm, - downsample=None, - use_checkpoint=False): - - super().__init__() - self.dim = dim - self.input_resolution = input_resolution - self.depth = depth - self.use_checkpoint = use_checkpoint - - # build blocks - self.blocks = nn.ModuleList([ - SwinTransformerBlock( - dim=dim, - input_resolution=input_resolution, - num_heads=num_heads, - window_size=window_size, - shift_size=0 if (i % 2 == 0) else window_size // 2, - mlp_ratio=mlp_ratio, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - drop=drop, - attn_drop=attn_drop, - drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, - norm_layer=norm_layer) for i in range(depth) - ]) - - # patch merging layer - if downsample is not None: - self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer) - else: - self.downsample = None - - def forward(self, x, x_size): - for blk in self.blocks: - if self.use_checkpoint: - x = checkpoint.checkpoint(blk, x) - else: - x = blk(x, x_size) - if self.downsample is not None: - x = self.downsample(x) - return x - - def extra_repr(self) -> str: - return f'dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}' - - def flops(self): - flops = 0 - for blk in self.blocks: - flops += blk.flops() - if self.downsample is not None: - flops += self.downsample.flops() - return flops - - -class RSTB(nn.Module): - """Residual Swin Transformer Block (RSTB). - - Args: - dim (int): Number of input channels. - input_resolution (tuple[int]): Input resolution. - depth (int): Number of blocks. - num_heads (int): Number of attention heads. - window_size (int): Local window size. - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. - drop (float, optional): Dropout rate. Default: 0.0 - attn_drop (float, optional): Attention dropout rate. Default: 0.0 - drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 - norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm - downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None - use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. - img_size: Input image size. - patch_size: Patch size. - resi_connection: The convolutional block before residual connection. - """ - - def __init__(self, - dim, - input_resolution, - depth, - num_heads, - window_size, - mlp_ratio=4., - qkv_bias=True, - qk_scale=None, - drop=0., - attn_drop=0., - drop_path=0., - norm_layer=nn.LayerNorm, - downsample=None, - use_checkpoint=False, - img_size=224, - patch_size=4, - resi_connection='1conv'): - super(RSTB, self).__init__() - - self.dim = dim - self.input_resolution = input_resolution - - self.residual_group = BasicLayer( - dim=dim, - input_resolution=input_resolution, - depth=depth, - num_heads=num_heads, - window_size=window_size, - mlp_ratio=mlp_ratio, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - drop=drop, - attn_drop=attn_drop, - drop_path=drop_path, - norm_layer=norm_layer, - downsample=downsample, - use_checkpoint=use_checkpoint) - - if resi_connection == '1conv': - self.conv = nn.Conv2d(dim, dim, 3, 1, 1) - elif resi_connection == '3conv': - # to save parameters and memory - self.conv = nn.Sequential( - nn.Conv2d(dim, dim // 4, 3, 1, 1), nn.LeakyReLU(negative_slope=0.2, inplace=True), - nn.Conv2d(dim // 4, dim // 4, 1, 1, 0), nn.LeakyReLU(negative_slope=0.2, inplace=True), - nn.Conv2d(dim // 4, dim, 3, 1, 1)) - - self.patch_embed = PatchEmbed( - img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim, norm_layer=None) - - self.patch_unembed = PatchUnEmbed( - img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim, norm_layer=None) - - def forward(self, x, x_size): - return self.patch_embed(self.conv(self.patch_unembed(self.residual_group(x, x_size), x_size))) + x - - def flops(self): - flops = 0 - flops += self.residual_group.flops() - h, w = self.input_resolution - flops += h * w * self.dim * self.dim * 9 - flops += self.patch_embed.flops() - flops += self.patch_unembed.flops() - - return flops - - -class PatchEmbed(nn.Module): - r""" Image to Patch Embedding - - Args: - img_size (int): Image size. Default: 224. - patch_size (int): Patch token size. Default: 4. - in_chans (int): Number of input image channels. Default: 3. - embed_dim (int): Number of linear projection output channels. Default: 96. - norm_layer (nn.Module, optional): Normalization layer. Default: None - """ - - def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None): - super().__init__() - img_size = to_2tuple(img_size) - patch_size = to_2tuple(patch_size) - patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]] - self.img_size = img_size - self.patch_size = patch_size - self.patches_resolution = patches_resolution - self.num_patches = patches_resolution[0] * patches_resolution[1] - - self.in_chans = in_chans - self.embed_dim = embed_dim - - if norm_layer is not None: - self.norm = norm_layer(embed_dim) - else: - self.norm = None - - def forward(self, x): - x = x.flatten(2).transpose(1, 2) # b Ph*Pw c - if self.norm is not None: - x = self.norm(x) - return x - - def flops(self): - flops = 0 - h, w = self.img_size - if self.norm is not None: - flops += h * w * self.embed_dim - return flops - - -class PatchUnEmbed(nn.Module): - r""" Image to Patch Unembedding - - Args: - img_size (int): Image size. Default: 224. - patch_size (int): Patch token size. Default: 4. - in_chans (int): Number of input image channels. Default: 3. - embed_dim (int): Number of linear projection output channels. Default: 96. - norm_layer (nn.Module, optional): Normalization layer. Default: None - """ - - def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None): - super().__init__() - img_size = to_2tuple(img_size) - patch_size = to_2tuple(patch_size) - patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]] - self.img_size = img_size - self.patch_size = patch_size - self.patches_resolution = patches_resolution - self.num_patches = patches_resolution[0] * patches_resolution[1] - - self.in_chans = in_chans - self.embed_dim = embed_dim - - def forward(self, x, x_size): - x = x.transpose(1, 2).view(x.shape[0], self.embed_dim, x_size[0], x_size[1]) # b Ph*Pw c - return x - - def flops(self): - flops = 0 - return flops - - -class Upsample(nn.Sequential): - """Upsample module. - - Args: - scale (int): Scale factor. Supported scales: 2^n and 3. - num_feat (int): Channel number of intermediate features. - """ - - def __init__(self, scale, num_feat): - m = [] - if (scale & (scale - 1)) == 0: # scale = 2^n - for _ in range(int(math.log(scale, 2))): - m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1)) - m.append(nn.PixelShuffle(2)) - elif scale == 3: - m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1)) - m.append(nn.PixelShuffle(3)) - else: - raise ValueError(f'scale {scale} is not supported. Supported scales: 2^n and 3.') - super(Upsample, self).__init__(*m) - - -class UpsampleOneStep(nn.Sequential): - """UpsampleOneStep module (the difference with Upsample is that it always only has 1conv + 1pixelshuffle) - Used in lightweight SR to save parameters. - - Args: - scale (int): Scale factor. Supported scales: 2^n and 3. - num_feat (int): Channel number of intermediate features. - - """ - - def __init__(self, scale, num_feat, num_out_ch, input_resolution=None): - self.num_feat = num_feat - self.input_resolution = input_resolution - m = [] - m.append(nn.Conv2d(num_feat, (scale**2) * num_out_ch, 3, 1, 1)) - m.append(nn.PixelShuffle(scale)) - super(UpsampleOneStep, self).__init__(*m) - - def flops(self): - h, w = self.input_resolution - flops = h * w * self.num_feat * 3 * 9 - return flops - - -@ARCH_REGISTRY.register() -class SwinIR(nn.Module): - r""" SwinIR - A PyTorch impl of : `SwinIR: Image Restoration Using Swin Transformer`, based on Swin Transformer. - - Args: - img_size (int | tuple(int)): Input image size. Default 64 - patch_size (int | tuple(int)): Patch size. Default: 1 - in_chans (int): Number of input image channels. Default: 3 - embed_dim (int): Patch embedding dimension. Default: 96 - depths (tuple(int)): Depth of each Swin Transformer layer. - num_heads (tuple(int)): Number of attention heads in different layers. - window_size (int): Window size. Default: 7 - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4 - qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None - drop_rate (float): Dropout rate. Default: 0 - attn_drop_rate (float): Attention dropout rate. Default: 0 - drop_path_rate (float): Stochastic depth rate. Default: 0.1 - norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm. - ape (bool): If True, add absolute position embedding to the patch embedding. Default: False - patch_norm (bool): If True, add normalization after patch embedding. Default: True - use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False - upscale: Upscale factor. 2/3/4/8 for image SR, 1 for denoising and compress artifact reduction - img_range: Image range. 1. or 255. - upsampler: The reconstruction reconstruction module. 'pixelshuffle'/'pixelshuffledirect'/'nearest+conv'/None - resi_connection: The convolutional block before residual connection. '1conv'/'3conv' - """ - - def __init__(self, - img_size=64, - patch_size=1, - in_chans=3, - embed_dim=96, - depths=(6, 6, 6, 6), - num_heads=(6, 6, 6, 6), - window_size=7, - mlp_ratio=4., - qkv_bias=True, - qk_scale=None, - drop_rate=0., - attn_drop_rate=0., - drop_path_rate=0.1, - norm_layer=nn.LayerNorm, - ape=False, - patch_norm=True, - use_checkpoint=False, - upscale=2, - img_range=1., - upsampler='', - resi_connection='1conv', - **kwargs): - super(SwinIR, self).__init__() - num_in_ch = in_chans - num_out_ch = in_chans - num_feat = 64 - self.img_range = img_range - if in_chans == 3: - rgb_mean = (0.4488, 0.4371, 0.4040) - self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1) - else: - self.mean = torch.zeros(1, 1, 1, 1) - self.upscale = upscale - self.upsampler = upsampler - - # ------------------------- 1, shallow feature extraction ------------------------- # - self.conv_first = nn.Conv2d(num_in_ch, embed_dim, 3, 1, 1) - - # ------------------------- 2, deep feature extraction ------------------------- # - self.num_layers = len(depths) - self.embed_dim = embed_dim - self.ape = ape - self.patch_norm = patch_norm - self.num_features = embed_dim - self.mlp_ratio = mlp_ratio - - # split image into non-overlapping patches - self.patch_embed = PatchEmbed( - img_size=img_size, - patch_size=patch_size, - in_chans=embed_dim, - embed_dim=embed_dim, - norm_layer=norm_layer if self.patch_norm else None) - num_patches = self.patch_embed.num_patches - patches_resolution = self.patch_embed.patches_resolution - self.patches_resolution = patches_resolution - - # merge non-overlapping patches into image - self.patch_unembed = PatchUnEmbed( - img_size=img_size, - patch_size=patch_size, - in_chans=embed_dim, - embed_dim=embed_dim, - norm_layer=norm_layer if self.patch_norm else None) - - # absolute position embedding - if self.ape: - self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) - trunc_normal_(self.absolute_pos_embed, std=.02) - - self.pos_drop = nn.Dropout(p=drop_rate) - - # stochastic depth - dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule - - # build Residual Swin Transformer blocks (RSTB) - self.layers = nn.ModuleList() - for i_layer in range(self.num_layers): - layer = RSTB( - dim=embed_dim, - input_resolution=(patches_resolution[0], patches_resolution[1]), - depth=depths[i_layer], - num_heads=num_heads[i_layer], - window_size=window_size, - mlp_ratio=self.mlp_ratio, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - drop=drop_rate, - attn_drop=attn_drop_rate, - drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], # no impact on SR results - norm_layer=norm_layer, - downsample=None, - use_checkpoint=use_checkpoint, - img_size=img_size, - patch_size=patch_size, - resi_connection=resi_connection) - self.layers.append(layer) - self.norm = norm_layer(self.num_features) - - # build the last conv layer in deep feature extraction - if resi_connection == '1conv': - self.conv_after_body = nn.Conv2d(embed_dim, embed_dim, 3, 1, 1) - elif resi_connection == '3conv': - # to save parameters and memory - self.conv_after_body = nn.Sequential( - nn.Conv2d(embed_dim, embed_dim // 4, 3, 1, 1), nn.LeakyReLU(negative_slope=0.2, inplace=True), - nn.Conv2d(embed_dim // 4, embed_dim // 4, 1, 1, 0), nn.LeakyReLU(negative_slope=0.2, inplace=True), - nn.Conv2d(embed_dim // 4, embed_dim, 3, 1, 1)) - - # ------------------------- 3, high quality image reconstruction ------------------------- # - if self.upsampler == 'pixelshuffle': - # for classical SR - self.conv_before_upsample = nn.Sequential( - nn.Conv2d(embed_dim, num_feat, 3, 1, 1), nn.LeakyReLU(inplace=True)) - self.upsample = Upsample(upscale, num_feat) - self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1) - elif self.upsampler == 'pixelshuffledirect': - # for lightweight SR (to save parameters) - self.upsample = UpsampleOneStep(upscale, embed_dim, num_out_ch, - (patches_resolution[0], patches_resolution[1])) - elif self.upsampler == 'nearest+conv': - # for real-world SR (less artifacts) - assert self.upscale == 4, 'only support x4 now.' - self.conv_before_upsample = nn.Sequential( - nn.Conv2d(embed_dim, num_feat, 3, 1, 1), nn.LeakyReLU(inplace=True)) - self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) - self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) - self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1) - self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1) - self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) - else: - # for image denoising and JPEG compression artifact reduction - self.conv_last = nn.Conv2d(embed_dim, num_out_ch, 3, 1, 1) - - self.apply(self._init_weights) - - def _init_weights(self, m): - if isinstance(m, nn.Linear): - trunc_normal_(m.weight, std=.02) - if isinstance(m, nn.Linear) and m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.LayerNorm): - nn.init.constant_(m.bias, 0) - nn.init.constant_(m.weight, 1.0) - - @torch.jit.ignore - def no_weight_decay(self): - return {'absolute_pos_embed'} - - @torch.jit.ignore - def no_weight_decay_keywords(self): - return {'relative_position_bias_table'} - - def forward_features(self, x): - x_size = (x.shape[2], x.shape[3]) - x = self.patch_embed(x) - if self.ape: - x = x + self.absolute_pos_embed - x = self.pos_drop(x) - - for layer in self.layers: - x = layer(x, x_size) - - x = self.norm(x) # b seq_len c - x = self.patch_unembed(x, x_size) - - return x - - def forward(self, x): - self.mean = self.mean.type_as(x) - x = (x - self.mean) * self.img_range - - if self.upsampler == 'pixelshuffle': - # for classical SR - x = self.conv_first(x) - x = self.conv_after_body(self.forward_features(x)) + x - x = self.conv_before_upsample(x) - x = self.conv_last(self.upsample(x)) - elif self.upsampler == 'pixelshuffledirect': - # for lightweight SR - x = self.conv_first(x) - x = self.conv_after_body(self.forward_features(x)) + x - x = self.upsample(x) - elif self.upsampler == 'nearest+conv': - # for real-world SR - x = self.conv_first(x) - x = self.conv_after_body(self.forward_features(x)) + x - x = self.conv_before_upsample(x) - x = self.lrelu(self.conv_up1(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest'))) - x = self.lrelu(self.conv_up2(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest'))) - x = self.conv_last(self.lrelu(self.conv_hr(x))) - else: - # for image denoising and JPEG compression artifact reduction - x_first = self.conv_first(x) - res = self.conv_after_body(self.forward_features(x_first)) + x_first - x = x + self.conv_last(res) - - x = x / self.img_range + self.mean - - return x - - def flops(self): - flops = 0 - h, w = self.patches_resolution - flops += h * w * 3 * self.embed_dim * 9 - flops += self.patch_embed.flops() - for layer in self.layers: - flops += layer.flops() - flops += h * w * 3 * self.embed_dim * self.embed_dim - flops += self.upsample.flops() - return flops - - -if __name__ == '__main__': - upscale = 4 - window_size = 8 - height = (1024 // upscale // window_size + 1) * window_size - width = (720 // upscale // window_size + 1) * window_size - model = SwinIR( - upscale=2, - img_size=(height, width), - window_size=window_size, - img_range=1., - depths=[6, 6, 6, 6], - embed_dim=60, - num_heads=[6, 6, 6, 6], - mlp_ratio=2, - upsampler='pixelshuffledirect') - print(model) - print(height, width, model.flops() / 1e9) - - x = torch.randn((1, 3, height, width)) - x = model(x) - print(x.shape) diff --git a/spaces/bioriAsaeru/text-to-voice/Battleshiptamildubbedfullmoviefreedownload Dont Miss the Spectacular Visual Effects and Soundtrack of the Film.md b/spaces/bioriAsaeru/text-to-voice/Battleshiptamildubbedfullmoviefreedownload Dont Miss the Spectacular Visual Effects and Soundtrack of the Film.md deleted file mode 100644 index 5ca27366dad2181103bae4dbd5609be0992036fd..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Battleshiptamildubbedfullmoviefreedownload Dont Miss the Spectacular Visual Effects and Soundtrack of the Film.md +++ /dev/null @@ -1,6 +0,0 @@ -

Battleshiptamildubbedfullmoviefreedownload


Download File ===> https://urloso.com/2uyPQ6



- - aaccfb2cb3
-
-
-

diff --git a/spaces/bioriAsaeru/text-to-voice/Facebook Chat Sniffer Network How to Install and Use This Powerful Application.md b/spaces/bioriAsaeru/text-to-voice/Facebook Chat Sniffer Network How to Install and Use This Powerful Application.md deleted file mode 100644 index da60c62d275fd7de98acd30fb1f5e8711942f5d8..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Facebook Chat Sniffer Network How to Install and Use This Powerful Application.md +++ /dev/null @@ -1,13 +0,0 @@ -
-

IMMonitor Facebook Spy is a packet sniffer that reads Facebook messages. The application can detect all the Facebook chat conversations that are taking place in your network and it displays them organized by contact name. This app doesn't require a client to be installed on the target computer, instead it can monitor any IP address in the same subnet as the computer that is running the app.

-

The first step that you have to do to use this app is tell it what IP address to scan. This is quite simple. You can either manually type the IP address or carry out a scan for any active computers in the network and then select that computer from a drop-down menu. After the IP address is set, the scan can be started. When the application detects an ongoing chat, it displays the messages and it organizes them by contact name. It also keeps a log of how many messages were sent and it shows it. I like that you don't simply see packets and technical jargon all over the screen. You only see contact names and messages, along with some packet info once in a while.

-

Facebook Chat Sniffer Network


Download File ……… https://urloso.com/2uyRaB



-

I need to disagree that this method monitors all network traffic in/out of you home. I simply logs the ip addresses which is not the same thing. There are legitimate sites that can be visited where my teen can still get into trouble. What my main interest is what data is being exchanged with host IP addresses. Is it an innocent chat with one of her friend or is the pedophile across town trying to set up a meeting at the mall. Do you see the important difference. I have been scouring the web looking for a solution but all I can find is network sniffers that record everything in a sort of cryptic raw format and you have to be really savy to interpret the data and what it means.

-

PCAP is an abbreviation of \u201cpacket capture.\u201d A PCAP tool copies packets as they travel around the network. The captured packets are displayed in a viewer within the tool, stored to a file, or both. Some PCAP tools will copy all of each packet, including its data payload, while others only display and\/or store packet headers. PCAP tools that capture packets in their entirety create very large files and are stored with the .pcap extension.","author":"@type":"Person","name":"Jon Watson","description":"Jon is a senior Linux systems administrator currently working in the Internet security field. Most of his time is spent reviewing security related issues and developing ways to mitigate damage or prevent them from happening in the first place. He has articles published in Linux Journal and Linux Magazines and was previously the Tech Channel editor for b5 Media.\n","url":"https:\/\/www.comparitech.com\/author\/jon-watson\/"}},"@type":"Question","name":"What are the best network traffic analysis tools?","answerCount":1,"acceptedAnswer":"@type":"Answer","text":"Our research shows that the best network traffic analysis tools are SolarWinds Deep Packet Inspection and Analysis Tool, Paessler Packet Capture Tool, ManageEngine NetFlow Analyzer, and the Omnipeek Network Protocol Analyzer. There are also some industry favorites such as tcpdump, Windump, and Wireshark.","author":"@type":"Person","name":"Jon Watson","description":"Jon is a senior Linux systems administrator currently working in the Internet security field. Most of his time is spent reviewing security related issues and developing ways to mitigate damage or prevent them from happening in the first place. He has articles published in Linux Journal and Linux Magazines and was previously the Tech Channel editor for b5 Media.\n","url":"https:\/\/www.comparitech.com\/author\/jon-watson\/","@type":"Question","name":"How does a packet analyzer work?","answerCount":1,"acceptedAnswer":"@type":"Answer","text":"A packet analyzer captures packets as they travel around the network. This can be implemented as a stand-alone packet capture device that works as a TAP or software that accesses the network adapter of its host computer in \u201cpromiscuous mode.\u201d As well as copying network packets, a packet analyzer needs to offer a utility to view, search, and filter packet data. Some packet analyzers also include more sophisticated analysis tools.","author":"@type":"Person","name":"Jon Watson","description":"Jon is a senior Linux systems administrator currently working in the Internet security field. Most of his time is spent reviewing security related issues and developing ways to mitigate damage or prevent them from happening in the first place. He has articles published in Linux Journal and Linux Magazines and was previously the Tech Channel editor for b5 Media.\n","url":"https:\/\/www.comparitech.com\/author\/jon-watson\/","@type":"Question","name":"Can packet sniffing be detected?","answerCount":1,"acceptedAnswer":"@type":"Answer","text":"Packet sniffing can be detected in certain circumstances. The solution to finding packet capture depends on the location of the packet sniffer and the method it uses. A software packet sniffing tool requires that the host computer\u2019s network adapter is in promiscuous mode. Issuing a Ping with the right IP address but the wrong MAC address for each computer on the network should spot the hosts that are in promiscuous mode and therefore likely to be in use for packet sniffing.","author":"@type":"Person","name":"Jon Watson","description":"Jon is a senior Linux systems administrator currently working in the Internet security field. Most of his time is spent reviewing security related issues and developing ways to mitigate damage or prevent them from happening in the first place. He has articles published in Linux Journal and Linux Magazines and was previously the Tech Channel editor for b5 Media.\n","url":"https:\/\/www.comparitech.com\/author\/jon-watson\/","@type":"Question","name":"What is full packet capture?","answerCount":1,"acceptedAnswer":"@type":"Answer","text":"Full packet capture copies all of a packet including the data payload. Typically full packet capture data gets stored in a file with the .pcap extension. Businesses don\u2019t like network professionals using this method because the contents of the packet might not be encrypted. Allowing IT department staff to use full packet capture capabilities can break the confidentiality of data held by the enterprise and invalidate data security standards compliance.","author":"@type":"Person","name":"Jon Watson","description":"Jon is a senior Linux systems administrator currently working in the Internet security field. Most of his time is spent reviewing security related issues and developing ways to mitigate damage or prevent them from happening in the first place. He has articles published in Linux Journal and Linux Magazines and was previously the Tech Channel editor for b5 Media.\n","url":"https:\/\/www.comparitech.com\/author\/jon-watson\/"]} "@context":"http:\/\/schema.org","@type":"BreadcrumbList","itemListElement":["@type":"ListItem","position":1,"name":"Home","item":"https:\/\/www.comparitech.com\/","@type":"ListItem","position":2,"name":"Net Admin","item":"https:\/\/www.comparitech.com\/net-admin\/","@type":"ListItem","position":3,"name":"Best Packet Sniffer Tools in 2020","item":"https:\/\/www.comparitech.com\/net-admin\/packet-sniffer-network-analyzers\/"]Net AdminBest Packet Sniffer Tools in 2020 We are funded by our readers and may receive a commission when you buy using links on our site. 11 Best Packet Sniffers in 2023 Looking at ways to get a birds-eye view of your network's traffic and establish some control of data loss and flows? In this article, we round up the best packet sniffers and software tools. Jon Watson Linux and internet security expert @lahmstache UPDATED: July 18, 2022 body.single .section.main-content.sidebar-active .col.grid-item.sidebar.span_1_of_3 float: right; body.single .section.main-content.sidebar-active .col.grid-item.content.span_2_of_3 margin-left: 0;

-

The actions you take depend on your available budget. If you have the resources to expand network capacity, the packet sniffer will enable you to target new resources more effectively. If you have no budget, packet sniffing will help traffic shaping through prioritizing application traffic, resizing subnets, rescheduling heavy-traffic events, limiting bandwidth for specific applications, or replacing applications with more efficient alternatives.

-

The key feature of a packet sniffer is that it copies data as it travels across a network and makes it available for viewing. The sniffing device simply copies all of the data that it sees passing over a network. When implemented on a switch, settings of the device allow the passing packet to be sent to a second port as well as the intended destination, thus duplicating traffic. Usually, the packets of data that are reaped from the network get copied to a file. Some tools will also show that data in a dashboard. However, packet sniffers can gather a lot of data, which includes encoded admin information. You will need to find an analysis tool that can help you be dereferencing information on the journey of the packets in the extract and other pieces of information, such as the relevance of the port numbers that the packets travel between.

-

The PRTG packet sniffer only captures the headers of the packets traveling across your network. This gives the packet analyzer a speed advantage and it also reduces the amount of storage space needed to hold capture files. The dashboard of the packet sniffer categorizes traffic by application type. These include email traffic, web packets, chat app traffic data, and file transfer packet volumes.

-

NetworkMiner is a fascinating tool that falls more into the category of a forensic tool rather than a straight-up network sniffer. The field of forensics typically deals with the investigation and collection of evidence and Network Miner does that job well for network traffic. Much like WireShark can follow a TCP stream to recover an entire TCP conversation, Network Miner can follow a stream to reconstruct files that were sent over the network.

-

aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/bioriAsaeru/text-to-voice/Integral Equations By Md Raisinghania Pdf Download.md b/spaces/bioriAsaeru/text-to-voice/Integral Equations By Md Raisinghania Pdf Download.md deleted file mode 100644 index 5f4e1980c557ffe24cf1860860c0015dc8b3fecf..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Integral Equations By Md Raisinghania Pdf Download.md +++ /dev/null @@ -1,10 +0,0 @@ - -

also, the book is available in hindi, and the translated version can be purchased from gotoassist. in addition to the book, students also have the option to purchase the course materials, which include:

-

integral equations by md raisinghania pdf download


Download Zip >>>>> https://urloso.com/2uyPAn



  • integral equations and boundary value problems for a single course
  • integral equations and boundary value problems for multiple courses
  • integral equations and boundary value problems for a whole semester
-

the book is mainly divided into two major parts. in the first part, raisinghania has included a chapter on the classic fredholm integral equation. the second part is devoted to the singular integral equations. for this, there is a chapter on the singular integral equations, followed by the boundary value problems.

-

chapter 1 contains important definitions, along with theorems and results. it deals with the basic tools for the solution of the boundary value problems. this chapter provides a very lucid introduction to linear operators, which is important for the further chapters on the fredholm integral equation. it also establishes the link between linear operators and the integral equations which are defined and dealt with in the remaining chapters.

-

chapter 2 deals with the fredholm integral equation of the first kind. this is considered as the fundamental equation to the solution of the boundary value problems. this chapter is illustrated with many examples, and it has a separate chapter on perturbation theory.

-

chapter 3 deals with the fredholm integral equation of the second kind. this is the fundamental equation to the solution of the boundary value problems. it also considers the dirichlet integral equation. in this chapter, the equality case is examined in depth. the well-known wiener-hopf factorization theorem is considered in detail. it is also shown how the integral equation is related to the linear ordinary differential equation.

-

899543212b
-
-
\ No newline at end of file diff --git a/spaces/bioriAsaeru/text-to-voice/Kamasutra Malayalam Book Pdf 183.md b/spaces/bioriAsaeru/text-to-voice/Kamasutra Malayalam Book Pdf 183.md deleted file mode 100644 index 5e7a33ba518f93f2554d46cf4834ce6f685c3b71..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Kamasutra Malayalam Book Pdf 183.md +++ /dev/null @@ -1,7 +0,0 @@ - -

The Ten Commandments were meant exclusively for Jewish males.[65] Michael Coogan writes that according to the text wives are the property of their husband, marriage meaning transfer of property (from father to husband),[65] and women are less valuable than real estate, being mentioned after real estate.[65] Adultery is violating the property right of a man.[66] Coogan's book was criticized by Phyllis Trible, who argues that he failed to note that patriarchy was not decreed, but only described by God, patriarchy being specific to people after the fall.[67] She states that Paul the Apostle made the same mistake as Coogan.[67]

-

kamasutra malayalam book pdf 183


Download File ★ https://urloso.com/2uyRnE



-

The Manusmriti, also known as the Laws of Manu, deals with this in greater detail. When translated, verse 4.134 of the book declares adultery to be a heinous offense.[102] The Manusmriti does not include adultery as a "grievous sin", but includes it as a "secondary sin" that leads to a loss of caste.[103] In the book, the intent and mutual consent are a part that determine the recommended punishment. Rape is not considered as adultery for the woman, while the rapist is punished severely. Lesser punishment is recommended for consensual adulterous sex.[100] Death penalty is mentioned by Manu,[104] as well as "penance" for the sin of adultery.[103][105] even in cases of repeated adultery with a man of the same caste.[106] In verses 8.362-363, the author states that sexual relations with the wife of traveling performer is not a sin, and exempts such sexual liaisons.[107][108] The book offers two views on adultery. It recommends a new married couple to remain sexually faithful to each other for life. It also accepts that adulterous relationships happen, children are born from such relationships and then proceeds to reason that the child belongs to the legal husband of the pregnant woman, and not to the biological father.[109]

-

The theme of adultery has been used in many literary works, and has served as a theme for notable books such as Anna Karenina, Madame Bovary, Lady Chatterley's Lover, The Scarlet Letter and Adultery. It has also been the theme of many movies.

aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/structures/instances.py b/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/structures/instances.py deleted file mode 100644 index c9579bce2730f42e256c6eed99d9014d09304c99..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/structures/instances.py +++ /dev/null @@ -1,194 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import itertools -import warnings -from typing import Any, Dict, List, Tuple, Union -import torch - - -class Instances: - """ - This class represents a list of instances in an image. - It stores the attributes of instances (e.g., boxes, masks, labels, scores) as "fields". - All fields must have the same ``__len__`` which is the number of instances. - - All other (non-field) attributes of this class are considered private: - they must start with '_' and are not modifiable by a user. - - Some basic usage: - - 1. Set/get/check a field: - - .. code-block:: python - - instances.gt_boxes = Boxes(...) - print(instances.pred_masks) # a tensor of shape (N, H, W) - print('gt_masks' in instances) - - 2. ``len(instances)`` returns the number of instances - 3. Indexing: ``instances[indices]`` will apply the indexing on all the fields - and returns a new :class:`Instances`. - Typically, ``indices`` is a integer vector of indices, - or a binary mask of length ``num_instances`` - - .. code-block:: python - - category_3_detections = instances[instances.pred_classes == 3] - confident_detections = instances[instances.scores > 0.9] - """ - - def __init__(self, image_size: Tuple[int, int], **kwargs: Any): - """ - Args: - image_size (height, width): the spatial size of the image. - kwargs: fields to add to this `Instances`. - """ - self._image_size = image_size - self._fields: Dict[str, Any] = {} - for k, v in kwargs.items(): - self.set(k, v) - - @property - def image_size(self) -> Tuple[int, int]: - """ - Returns: - tuple: height, width - """ - return self._image_size - - def __setattr__(self, name: str, val: Any) -> None: - if name.startswith("_"): - super().__setattr__(name, val) - else: - self.set(name, val) - - def __getattr__(self, name: str) -> Any: - if name == "_fields" or name not in self._fields: - raise AttributeError("Cannot find field '{}' in the given Instances!".format(name)) - return self._fields[name] - - def set(self, name: str, value: Any) -> None: - """ - Set the field named `name` to `value`. - The length of `value` must be the number of instances, - and must agree with other existing fields in this object. - """ - with warnings.catch_warnings(record=True): - data_len = len(value) - if len(self._fields): - assert ( - len(self) == data_len - ), "Adding a field of length {} to a Instances of length {}".format(data_len, len(self)) - self._fields[name] = value - - def has(self, name: str) -> bool: - """ - Returns: - bool: whether the field called `name` exists. - """ - return name in self._fields - - def remove(self, name: str) -> None: - """ - Remove the field called `name`. - """ - del self._fields[name] - - def get(self, name: str) -> Any: - """ - Returns the field called `name`. - """ - return self._fields[name] - - def get_fields(self) -> Dict[str, Any]: - """ - Returns: - dict: a dict which maps names (str) to data of the fields - - Modifying the returned dict will modify this instance. - """ - return self._fields - - # Tensor-like methods - def to(self, *args: Any, **kwargs: Any) -> "Instances": - """ - Returns: - Instances: all fields are called with a `to(device)`, if the field has this method. - """ - ret = Instances(self._image_size) - for k, v in self._fields.items(): - if hasattr(v, "to"): - v = v.to(*args, **kwargs) - ret.set(k, v) - return ret - - def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Instances": - """ - Args: - item: an index-like object and will be used to index all the fields. - - Returns: - If `item` is a string, return the data in the corresponding field. - Otherwise, returns an `Instances` where all fields are indexed by `item`. - """ - if type(item) == int: - if item >= len(self) or item < -len(self): - raise IndexError("Instances index out of range!") - else: - item = slice(item, None, len(self)) - - ret = Instances(self._image_size) - for k, v in self._fields.items(): - ret.set(k, v[item]) - return ret - - def __len__(self) -> int: - for v in self._fields.values(): - # use __len__ because len() has to be int and is not friendly to tracing - return v.__len__() - raise NotImplementedError("Empty Instances does not support __len__!") - - def __iter__(self): - raise NotImplementedError("`Instances` object is not iterable!") - - @staticmethod - def cat(instance_lists: List["Instances"]) -> "Instances": - """ - Args: - instance_lists (list[Instances]) - - Returns: - Instances - """ - assert all(isinstance(i, Instances) for i in instance_lists) - assert len(instance_lists) > 0 - if len(instance_lists) == 1: - return instance_lists[0] - - image_size = instance_lists[0].image_size - if not isinstance(image_size, torch.Tensor): # could be a tensor in tracing - for i in instance_lists[1:]: - assert i.image_size == image_size - ret = Instances(image_size) - for k in instance_lists[0]._fields.keys(): - values = [i.get(k) for i in instance_lists] - v0 = values[0] - if isinstance(v0, torch.Tensor): - values = torch.cat(values, dim=0) - elif isinstance(v0, list): - values = list(itertools.chain(*values)) - elif hasattr(type(v0), "cat"): - values = type(v0).cat(values) - else: - raise ValueError("Unsupported type {} for concatenation".format(type(v0))) - ret.set(k, values) - return ret - - def __str__(self) -> str: - s = self.__class__.__name__ + "(" - s += "num_instances={}, ".format(len(self)) - s += "image_height={}, ".format(self._image_size[0]) - s += "image_width={}, ".format(self._image_size[1]) - s += "fields=[{}])".format(", ".join((f"{k}: {v}" for k, v in self._fields.items()))) - return s - - __repr__ = __str__ diff --git a/spaces/bulentsofttech/gradio_s1000_veri_toplama_modeli/yolov5-flask-master/restapi.py b/spaces/bulentsofttech/gradio_s1000_veri_toplama_modeli/yolov5-flask-master/restapi.py deleted file mode 100644 index 2bab7b06a5e5397b46db43f375c7cff398646501..0000000000000000000000000000000000000000 --- a/spaces/bulentsofttech/gradio_s1000_veri_toplama_modeli/yolov5-flask-master/restapi.py +++ /dev/null @@ -1,41 +0,0 @@ -""" -Run a rest API exposing the yolov5s object detection model -""" -import argparse -import io -from PIL import Image - -import torch -from flask import Flask, request - -app = Flask(__name__) - -DETECTION_URL = "/v1/object-detection/yolov5s" - - -@app.route(DETECTION_URL, methods=["POST"]) -def predict(): - if not request.method == "POST": - return - - if request.files.get("image"): - image_file = request.files["image"] - image_bytes = image_file.read() - - img = Image.open(io.BytesIO(image_bytes)) - - results = model(img, size=640) - data = results.pandas().xyxy[0].to_json(orient="records") - return data - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Flask api exposing yolov5 model") - parser.add_argument("--port", default=5000, type=int, help="port number") - args = parser.parse_args() - - model = torch.hub.load( - "ultralytics/yolov5", "yolov5s", pretrained=True, force_reload=True - ).autoshape() # force_reload = recache latest code - model.eval() - app.run(host="0.0.0.0", port=args.port) # debug=True causes Restarting with stat diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/TridentNet/tridentnet/config.py b/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/TridentNet/tridentnet/config.py deleted file mode 100644 index 4b8732a43f6974ec60168652bf08e382ddc9c941..0000000000000000000000000000000000000000 --- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/TridentNet/tridentnet/config.py +++ /dev/null @@ -1,26 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) Facebook, Inc. and its affiliates. - -from detectron2.config import CfgNode as CN - - -def add_tridentnet_config(cfg): - """ - Add config for tridentnet. - """ - _C = cfg - - _C.MODEL.TRIDENT = CN() - - # Number of branches for TridentNet. - _C.MODEL.TRIDENT.NUM_BRANCH = 3 - # Specify the dilations for each branch. - _C.MODEL.TRIDENT.BRANCH_DILATIONS = [1, 2, 3] - # Specify the stage for applying trident blocks. Default stage is Res4 according to the - # TridentNet paper. - _C.MODEL.TRIDENT.TRIDENT_STAGE = "res4" - # Specify the test branch index TridentNet Fast inference: - # - use -1 to aggregate results of all branches during inference. - # - otherwise, only using specified branch for fast inference. Recommended setting is - # to use the middle branch. - _C.MODEL.TRIDENT.TEST_BRANCH_IDX = 1 diff --git a/spaces/carloscar/stable-diffusion-webui-controlnet-docker/run.py b/spaces/carloscar/stable-diffusion-webui-controlnet-docker/run.py deleted file mode 100644 index 503c6065bd96c8739fb81c94485ea2f7c86441de..0000000000000000000000000000000000000000 --- a/spaces/carloscar/stable-diffusion-webui-controlnet-docker/run.py +++ /dev/null @@ -1,55 +0,0 @@ -import os -import subprocess -import sys - - -def on_start(): - print("---------------") - print("Running script './on_start.sh' to download models ...") - print("---------------") - result = subprocess.run("./on_start.sh", shell=True, env=os.environ) - if result.returncode != 0: - raise RuntimeError(f"Error executing ./on_start.sh [exit code: {result.returncode}]") - - -def start(): - print("---------------") - print(f"Launching {'API server' if '--nowebui' in sys.argv else 'Web UI'} with arguments: {' '.join(sys.argv[1:])}") - print("---------------") - import webui # type: ignore # noqa - - if "--nowebui" in sys.argv: - webui.api_only() - else: - webui.webui() - - -def set_options(): - import torch # type: ignore # noqa - - if not torch.cuda.is_available(): - # If no GPU is available, uninstall xformers and apply "--precision full --no-half --use-cpu all" to sys.argv. - os.system(f"{sys.executable} -m pip uninstall -y xformers") - sys.argv.extend( - [ - "--precision", - "full", - "--no-half", - "--use-cpu", - "all", - ] - ) - else: - # Applies "--force-enable-xformers --xformers" to sys.argv when there's a GPU present. - sys.argv.extend(["--force-enable-xformers", "--xformers"]) - - is_shared_ui = str(os.environ.get("IS_SHARED_UI", "") or "").strip().lower() not in ("", "0", "false", "none", "no") - if not is_shared_ui: - # Provide access to extensions only if IS_SHARED_UI isn't set. - sys.argv.extend(["--enable-insecure-extension-access"]) - - -if __name__ == "__main__": - set_options() - on_start() - start() diff --git a/spaces/catasaurus/sound-distance/app.py b/spaces/catasaurus/sound-distance/app.py deleted file mode 100644 index 25a126216f750a45da8615665c9d297902735338..0000000000000000000000000000000000000000 --- a/spaces/catasaurus/sound-distance/app.py +++ /dev/null @@ -1,7 +0,0 @@ -import gradio as gr -import soundex - -model = soundex.Soundex() - -iface = gr.Interface(fn=model.compare, inputs=[gr.Textbox(label='Text 1'), gr.Textbox(label='Text 2')], outputs=gr.Textbox(label='Sound distance'), title="Find how different two words sound") -iface.launch() \ No newline at end of file diff --git a/spaces/cc1799/vits-uma-genshin-honkai/text/__init__.py b/spaces/cc1799/vits-uma-genshin-honkai/text/__init__.py deleted file mode 100644 index 663c4b6416affb53c9dc56dddbc8b2b65d4bf518..0000000000000000000000000000000000000000 --- a/spaces/cc1799/vits-uma-genshin-honkai/text/__init__.py +++ /dev/null @@ -1,57 +0,0 @@ -""" from https://github.com/keithito/tacotron """ -from text import cleaners -from text.symbols import symbols - - -# Mappings from symbol to numeric ID and vice versa: -_symbol_to_id = {s: i for i, s in enumerate(symbols)} -_id_to_symbol = {i: s for i, s in enumerate(symbols)} - - -def text_to_sequence(text, symbols, cleaner_names): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - cleaner_names: names of the cleaner functions to run the text through - Returns: - List of integers corresponding to the symbols in the text - ''' - _symbol_to_id = {s: i for i, s in enumerate(symbols)} - sequence = [] - - clean_text = _clean_text(text, cleaner_names) - for symbol in clean_text: - if symbol not in _symbol_to_id.keys(): - continue - symbol_id = _symbol_to_id[symbol] - sequence += [symbol_id] - return sequence, clean_text - - -def cleaned_text_to_sequence(cleaned_text): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - Returns: - List of integers corresponding to the symbols in the text - ''' - sequence = [_symbol_to_id[symbol] for symbol in cleaned_text if symbol in _symbol_to_id.keys()] - return sequence - - -def sequence_to_text(sequence): - '''Converts a sequence of IDs back to a string''' - result = '' - for symbol_id in sequence: - s = _id_to_symbol[symbol_id] - result += s - return result - - -def _clean_text(text, cleaner_names): - for name in cleaner_names: - cleaner = getattr(cleaners, name) - if not cleaner: - raise Exception('Unknown cleaner: %s' % name) - text = cleaner(text) - return text diff --git a/spaces/ceyda/kornia-augmentations-tester/kornia_aug.py b/spaces/ceyda/kornia-augmentations-tester/kornia_aug.py deleted file mode 100644 index 466384608c471685933a9a234a857a9e78663d7d..0000000000000000000000000000000000000000 --- a/spaces/ceyda/kornia-augmentations-tester/kornia_aug.py +++ /dev/null @@ -1,151 +0,0 @@ -import streamlit as st -import kornia -from torch import nn -import torch -from torchvision.transforms import functional as F -from torchvision.utils import make_grid -from streamlit_ace import st_ace -from PIL import Image - -IS_LOCAL = False #Change this - -@st.cache(suppress_st_warning=True) -def set_transform(content): - # st.write("set transform") - try: - transform = eval(content, {"kornia": kornia, "nn": nn}, None) - except Exception as e: - st.write(f"There was an error: {e}") - transform = nn.Sequential() - return transform - -st.markdown("# Kornia Augmentations Demo") -st.sidebar.markdown( - "[Kornia](https://github.com/kornia/kornia) is a *differentiable* computer vision library for PyTorch." -) -ims=[] -uploaded_files = st.sidebar.file_uploader("Choose a file",accept_multiple_files=True) -if uploaded_files is not None: - for uploaded_file in uploaded_files: - im = Image.open(uploaded_file) - st.sidebar.image(im, caption="Input Image", width=256) - im=im.resize((512,512)) - image = F.pil_to_tensor(im).float() / 255 - ims.append(image) -else: - im = Image.open("./images/pretty_bird.jpg") - st.sidebar.image(im, caption="Input Image", width=256) - image = F.pil_to_tensor(im).float() / 255 - ims.append(image) -scaler = 256 - -# batch size is just for show -batch_size = st.sidebar.slider("batch_size", min_value=4, max_value=16,value=8) -gpu = st.sidebar.checkbox("Use GPU!", value=True) -if not gpu: - st.sidebar.markdown("With Kornia you do ops on the GPU!") - device = torch.device("cpu") -else: - if not IS_LOCAL: - st.sidebar.markdown("(GPU Not available on hosted demo, try on your local!)") - # Credits - st.sidebar.caption("Demo made by [Ceyda Cinarel](https://linktr.ee/ceydai)") - st.sidebar.markdown("Clone [Code](https://github.com/cceyda/kornia-demo)") - device = torch.device("cpu") - else: - st.sidebar.markdown("Running on GPU~") - device = torch.device("cuda:0") - -predefined_transforms = [ - """ -nn.Sequential( - kornia.augmentation.RandomAffine(degrees=360,p=0.5), - kornia.augmentation.ColorJitter(brightness=0.2, contrast=0.3, saturation=0.2, hue=0.3, p=1) -) -# p=0.5 is the probability of applying the transformation -""", - """ -nn.Sequential( - kornia.augmentation.RandomErasing(scale=(.4, .8), ratio=(.3, 1/.3), p=0.5), -) -""", - """ -nn.Sequential( - kornia.augmentation.RandomErasing(scale=(.4, .8), ratio=(.3, 1/.3), p=1, same_on_batch=False), -) -#By setting same_on_batch=True you can apply the same transform across the batch -""", - f""" -nn.Sequential( - kornia.augmentation.RandomResizedCrop(size=({scaler}, {scaler}), scale=(3., 3.), ratio=(2., 2.), p=1.), - kornia.augmentation.RandomHorizontalFlip(p=0.7), - kornia.augmentation.RandomGrayscale(p=0.5), -) -""", -] - -selected_transform = st.selectbox( - "Pick an augmentation pipeline example:", predefined_transforms -) - -st.write("Transform to apply:") -readonly = False -content = st_ace( - value=selected_transform, - height=150, - language="python", - keybinding="vscode", - show_gutter=True, - show_print_margin=True, - wrap=False, - auto_update=False, - readonly=readonly, -) -if content: - # st.write(content) - transform = set_transform(content) - -# st.write(transform) - -# with st.echo(): -# transform = nn.Sequential( -# K.RandomAffine(360), -# K.ColorJitter(0.2, 0.3, 0.2, 0.3) -# ) - -process = st.button("Next Batch") - -# Fake dataloader -if len(ims)>1: - image_batch = torch.stack(ims) -else: - image_batch = torch.stack(batch_size * ims) - - -image_batch.to(device) -transformeds = None -try: - transformeds = transform(image_batch) -except Exception as e: - st.write(f"There was an error: {e}") - - - - -cols = st.columns(4) - -# st.image(F.to_pil_image(make_grid(transformeds))) -if transformeds is not None: - for i, x in enumerate(transformeds): - i = i % 4 - cols[i].image(F.to_pil_image(x), use_column_width=True) - -st.markdown( - "There are a lot more transformations available: [Documentation](https://kornia.readthedocs.io/en/latest/augmentation.module.html)" -) -st.markdown( - "Kornia can do a lot more than augmentations~ [Check it out](https://kornia.readthedocs.io/en/latest/introduction.html#highlighted-features)" -) -# if process: -# pass - diff --git a/spaces/cfwef/gpt/crazy_functions/test_project/cpp/cppipc/queue.h b/spaces/cfwef/gpt/crazy_functions/test_project/cpp/cppipc/queue.h deleted file mode 100644 index a21f3446e06b5826af7b554c8a7d9c5d80848b62..0000000000000000000000000000000000000000 --- a/spaces/cfwef/gpt/crazy_functions/test_project/cpp/cppipc/queue.h +++ /dev/null @@ -1,216 +0,0 @@ -#pragma once - -#include -#include -#include // [[since C++14]]: std::exchange -#include -#include -#include -#include -#include -#include -#include // assert - -#include "libipc/def.h" -#include "libipc/shm.h" -#include "libipc/rw_lock.h" - -#include "libipc/utility/log.h" -#include "libipc/platform/detail.h" -#include "libipc/circ/elem_def.h" - -namespace ipc { -namespace detail { - -class queue_conn { -protected: - circ::cc_t connected_ = 0; - shm::handle elems_h_; - - template - Elems* open(char const * name) { - if (name == nullptr || name[0] == '\0') { - ipc::error("fail open waiter: name is empty!\n"); - return nullptr; - } - if (!elems_h_.acquire(name, sizeof(Elems))) { - return nullptr; - } - auto elems = static_cast(elems_h_.get()); - if (elems == nullptr) { - ipc::error("fail acquire elems: %s\n", name); - return nullptr; - } - elems->init(); - return elems; - } - - void close() { - elems_h_.release(); - } - -public: - queue_conn() = default; - queue_conn(const queue_conn&) = delete; - queue_conn& operator=(const queue_conn&) = delete; - - bool connected() const noexcept { - return connected_ != 0; - } - - circ::cc_t connected_id() const noexcept { - return connected_; - } - - template - auto connect(Elems* elems) noexcept - /*needs 'optional' here*/ - -> std::tuple().cursor())> { - if (elems == nullptr) return {}; - // if it's already connected, just return - if (connected()) return {connected(), false, 0}; - connected_ = elems->connect_receiver(); - return {connected(), true, elems->cursor()}; - } - - template - bool disconnect(Elems* elems) noexcept { - if (elems == nullptr) return false; - // if it's already disconnected, just return false - if (!connected()) return false; - elems->disconnect_receiver(std::exchange(connected_, 0)); - return true; - } -}; - -template -class queue_base : public queue_conn { - using base_t = queue_conn; - -public: - using elems_t = Elems; - using policy_t = typename elems_t::policy_t; - -protected: - elems_t * elems_ = nullptr; - decltype(std::declval().cursor()) cursor_ = 0; - bool sender_flag_ = false; - -public: - using base_t::base_t; - - queue_base() = default; - - explicit queue_base(char const * name) - : queue_base{} { - elems_ = open(name); - } - - explicit queue_base(elems_t * elems) noexcept - : queue_base{} { - assert(elems != nullptr); - elems_ = elems; - } - - /* not virtual */ ~queue_base() { - base_t::close(); - } - - elems_t * elems() noexcept { return elems_; } - elems_t const * elems() const noexcept { return elems_; } - - bool ready_sending() noexcept { - if (elems_ == nullptr) return false; - return sender_flag_ || (sender_flag_ = elems_->connect_sender()); - } - - void shut_sending() noexcept { - if (elems_ == nullptr) return; - if (!sender_flag_) return; - elems_->disconnect_sender(); - } - - bool connect() noexcept { - auto tp = base_t::connect(elems_); - if (std::get<0>(tp) && std::get<1>(tp)) { - cursor_ = std::get<2>(tp); - return true; - } - return std::get<0>(tp); - } - - bool disconnect() noexcept { - return base_t::disconnect(elems_); - } - - std::size_t conn_count() const noexcept { - return (elems_ == nullptr) ? static_cast(invalid_value) : elems_->conn_count(); - } - - bool valid() const noexcept { - return elems_ != nullptr; - } - - bool empty() const noexcept { - return !valid() || (cursor_ == elems_->cursor()); - } - - template - bool push(F&& prep, P&&... params) { - if (elems_ == nullptr) return false; - return elems_->push(this, [&](void* p) { - if (prep(p)) ::new (p) T(std::forward

(params)...); - }); - } - - template - bool force_push(F&& prep, P&&... params) { - if (elems_ == nullptr) return false; - return elems_->force_push(this, [&](void* p) { - if (prep(p)) ::new (p) T(std::forward

(params)...); - }); - } - - template - bool pop(T& item, F&& out) { - if (elems_ == nullptr) { - return false; - } - return elems_->pop(this, &(this->cursor_), [&item](void* p) { - ::new (&item) T(std::move(*static_cast(p))); - }, std::forward(out)); - } -}; - -} // namespace detail - -template -class queue final : public detail::queue_base> { - using base_t = detail::queue_base>; - -public: - using value_t = T; - - using base_t::base_t; - - template - bool push(P&&... params) { - return base_t::template push(std::forward

(params)...); - } - - template - bool force_push(P&&... params) { - return base_t::template force_push(std::forward

(params)...); - } - - bool pop(T& item) { - return base_t::pop(item, [](bool) {}); - } - - template - bool pop(T& item, F&& out) { - return base_t::pop(item, std::forward(out)); - } -}; - -} // namespace ipc diff --git a/spaces/chasemcdo/hf_localai/examples/README.md b/spaces/chasemcdo/hf_localai/examples/README.md deleted file mode 100644 index 29a4f857cef5591b6c973801cac9493fc91f4262..0000000000000000000000000000000000000000 --- a/spaces/chasemcdo/hf_localai/examples/README.md +++ /dev/null @@ -1,145 +0,0 @@ -# Examples - -Here is a list of projects that can easily be integrated with the LocalAI backend. - -### Projects - -### AutoGPT - -_by [@mudler](https://github.com/mudler)_ - -This example shows how to use AutoGPT with LocalAI. - -[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/autoGPT/) - -### Chatbot-UI - -_by [@mkellerman](https://github.com/mkellerman)_ - -![Screenshot from 2023-04-26 23-59-55](https://user-images.githubusercontent.com/2420543/234715439-98d12e03-d3ce-4f94-ab54-2b256808e05e.png) - -This integration shows how to use LocalAI with [mckaywrigley/chatbot-ui](https://github.com/mckaywrigley/chatbot-ui). - -[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/chatbot-ui/) - -There is also a separate example to show how to manually setup a model: [example](https://github.com/go-skynet/LocalAI/tree/master/examples/chatbot-ui-manual/) - -### K8sGPT - -_by [@mudler](https://github.com/mudler)_ - -This example show how to use LocalAI inside Kubernetes with [k8sgpt](https://k8sgpt.ai). - -![Screenshot from 2023-06-19 23-58-47](https://github.com/go-skynet/go-ggml-transformers.cpp/assets/2420543/cab87409-ee68-44ae-8d53-41627fb49509) - -### Flowise - -_by [@mudler](https://github.com/mudler)_ - -This example shows how to use [FlowiseAI/Flowise](https://github.com/FlowiseAI/Flowise) with LocalAI. - -[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/flowise/) - -### Discord bot - -_by [@mudler](https://github.com/mudler)_ - -Run a discord bot which lets you talk directly with a model - -[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/discord-bot/), or for a live demo you can talk with our bot in #random-bot in our discord server. - -### Langchain - -_by [@dave-gray101](https://github.com/dave-gray101)_ - -A ready to use example to show e2e how to integrate LocalAI with langchain - -[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/langchain/) - -### Langchain Python - -_by [@mudler](https://github.com/mudler)_ - -A ready to use example to show e2e how to integrate LocalAI with langchain - -[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/langchain-python/) - -### LocalAI WebUI - -_by [@dhruvgera](https://github.com/dhruvgera)_ - -![image](https://user-images.githubusercontent.com/42107491/235344183-44b5967d-ba22-4331-804c-8da7004a5d35.png) - -A light, community-maintained web interface for LocalAI - -[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/localai-webui/) - -### How to run rwkv models - -_by [@mudler](https://github.com/mudler)_ - -A full example on how to run RWKV models with LocalAI - -[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/rwkv/) - -### PrivateGPT - -_by [@mudler](https://github.com/mudler)_ - -A full example on how to run PrivateGPT with LocalAI - -[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/privateGPT/) - -### Slack bot - -_by [@mudler](https://github.com/mudler)_ - -Run a slack bot which lets you talk directly with a model - -[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/slack-bot/) - -### Slack bot (Question answering) - -_by [@mudler](https://github.com/mudler)_ - -Run a slack bot, ideally for teams, which lets you ask questions on a documentation website, or a github repository. - -[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/slack-qa-bot/) - -### Question answering on documents with llama-index - -_by [@mudler](https://github.com/mudler)_ - -Shows how to integrate with [Llama-Index](https://gpt-index.readthedocs.io/en/stable/getting_started/installation.html) to enable question answering on a set of documents. - -[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/query_data/) - -### Question answering on documents with langchain and chroma - -_by [@mudler](https://github.com/mudler)_ - -Shows how to integrate with `Langchain` and `Chroma` to enable question answering on a set of documents. - -[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/langchain-chroma/) - -### Telegram bot - -_by [@mudler](https://github.com/mudler) - -![Screenshot from 2023-06-09 00-36-26](https://github.com/go-skynet/LocalAI/assets/2420543/e98b4305-fa2d-41cf-9d2f-1bb2d75ca902) - -Use LocalAI to power a Telegram bot assistant, with Image generation and audio support! - -[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/telegram-bot/) - -### Template for Runpod.io - -_by [@fHachenberg](https://github.com/fHachenberg)_ - -Allows to run any LocalAI-compatible model as a backend on the servers of https://runpod.io - -[Check it out here](https://runpod.io/gsc?template=uv9mtqnrd0&ref=984wlcra) - -## Want to contribute? - -Create an issue, and put `Example: ` in the title! We will post your examples here. diff --git a/spaces/chendl/compositional_test/transformers/examples/legacy/token-classification/run_pos.sh b/spaces/chendl/compositional_test/transformers/examples/legacy/token-classification/run_pos.sh deleted file mode 100644 index 7d76ed8a2a8a94bc2cd258c42b78bcdb9ba3243b..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/examples/legacy/token-classification/run_pos.sh +++ /dev/null @@ -1,37 +0,0 @@ -if ! [ -f ./dev.txt ]; then - echo "Download dev dataset...." - curl -L -o ./dev.txt 'https://github.com/UniversalDependencies/UD_English-EWT/raw/master/en_ewt-ud-dev.conllu' -fi - -if ! [ -f ./test.txt ]; then - echo "Download test dataset...." - curl -L -o ./test.txt 'https://github.com/UniversalDependencies/UD_English-EWT/raw/master/en_ewt-ud-test.conllu' -fi - -if ! [ -f ./train.txt ]; then - echo "Download train dataset...." - curl -L -o ./train.txt 'https://github.com/UniversalDependencies/UD_English-EWT/raw/master/en_ewt-ud-train.conllu' -fi - -export MAX_LENGTH=200 -export BERT_MODEL=bert-base-uncased -export OUTPUT_DIR=postagger-model -export BATCH_SIZE=32 -export NUM_EPOCHS=3 -export SAVE_STEPS=750 -export SEED=1 - -python3 run_ner.py \ ---task_type POS \ ---data_dir . \ ---model_name_or_path $BERT_MODEL \ ---output_dir $OUTPUT_DIR \ ---max_seq_length $MAX_LENGTH \ ---num_train_epochs $NUM_EPOCHS \ ---per_gpu_train_batch_size $BATCH_SIZE \ ---save_steps $SAVE_STEPS \ ---seed $SEED \ ---do_train \ ---do_eval \ ---do_predict - diff --git a/spaces/chendl/compositional_test/transformers/examples/pytorch/question-answering/README.md b/spaces/chendl/compositional_test/transformers/examples/pytorch/question-answering/README.md deleted file mode 100644 index 6b86a4effa95084cd33ab25b918103b9e5b30c4f..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/examples/pytorch/question-answering/README.md +++ /dev/null @@ -1,183 +0,0 @@ - - -# Question answering - -This folder contains several scripts that showcase how to fine-tune a 🤗 Transformers model on a question answering dataset, -like SQuAD. - -## Trainer-based scripts - -The [`run_qa.py`](https://github.com/huggingface/transformers/blob/main/examples/pytorch/question-answering/run_qa.py), -[`run_qa_beam_search.py`](https://github.com/huggingface/transformers/blob/main/examples/pytorch/question-answering/run_qa_beam_search.py) and [`run_seq2seq_qa.py`](https://github.com/huggingface/transformers/blob/main/examples/pytorch/question-answering/run_seq2seq_qa.py) leverage the 🤗 [Trainer](https://huggingface.co/transformers/main_classes/trainer.html) for fine-tuning. - -### Fine-tuning BERT on SQuAD1.0 - -The [`run_qa.py`](https://github.com/huggingface/transformers/blob/main/examples/pytorch/question-answering/run_qa.py) script -allows to fine-tune any model from our [hub](https://huggingface.co/models) (as long as its architecture has a `ForQuestionAnswering` version in the library) on a question-answering dataset (such as SQuAD, or any other QA dataset available in the `datasets` library, or your own csv/jsonlines files) as long as they are structured the same way as SQuAD. You might need to tweak the data processing inside the script if your data is structured differently. - -**Note:** This script only works with models that have a fast tokenizer (backed by the 🤗 Tokenizers library) as it -uses special features of those tokenizers. You can check if your favorite model has a fast tokenizer in -[this table](https://huggingface.co/transformers/index.html#supported-frameworks), if it doesn't you can still use the old version of the script which can be found [here](https://github.com/huggingface/transformers/tree/main/examples/legacy/question-answering). - -Note that if your dataset contains samples with no possible answers (like SQuAD version 2), you need to pass along the flag `--version_2_with_negative`. - -This example code fine-tunes BERT on the SQuAD1.0 dataset. It runs in 24 min (with BERT-base) or 68 min (with BERT-large) -on a single tesla V100 16GB. - -```bash -python run_qa.py \ - --model_name_or_path bert-base-uncased \ - --dataset_name squad \ - --do_train \ - --do_eval \ - --per_device_train_batch_size 12 \ - --learning_rate 3e-5 \ - --num_train_epochs 2 \ - --max_seq_length 384 \ - --doc_stride 128 \ - --output_dir /tmp/debug_squad/ -``` - -Training with the previously defined hyper-parameters yields the following results: - -```bash -f1 = 88.52 -exact_match = 81.22 -``` - -### Fine-tuning XLNet with beam search on SQuAD - -The [`run_qa_beam_search.py`](https://github.com/huggingface/transformers/blob/main/examples/pytorch/question-answering/run_qa_beam_search.py) script is only meant to fine-tune XLNet, which is a special encoder-only Transformer model. The example code below fine-tunes XLNet on the SQuAD1.0 and SQuAD2.0 datasets. - -#### Command for SQuAD1.0: - -```bash -python run_qa_beam_search.py \ - --model_name_or_path xlnet-large-cased \ - --dataset_name squad \ - --do_train \ - --do_eval \ - --learning_rate 3e-5 \ - --num_train_epochs 2 \ - --max_seq_length 384 \ - --doc_stride 128 \ - --output_dir ./wwm_cased_finetuned_squad/ \ - --per_device_eval_batch_size=4 \ - --per_device_train_batch_size=4 \ - --save_steps 5000 -``` - -#### Command for SQuAD2.0: - -```bash -export SQUAD_DIR=/path/to/SQUAD - -python run_qa_beam_search.py \ - --model_name_or_path xlnet-large-cased \ - --dataset_name squad_v2 \ - --do_train \ - --do_eval \ - --version_2_with_negative \ - --learning_rate 3e-5 \ - --num_train_epochs 4 \ - --max_seq_length 384 \ - --doc_stride 128 \ - --output_dir ./wwm_cased_finetuned_squad/ \ - --per_device_eval_batch_size=2 \ - --per_device_train_batch_size=2 \ - --save_steps 5000 -``` - -### Fine-tuning T5 on SQuAD2.0 - -The [`run_seq2seq_qa.py`](https://github.com/huggingface/transformers/blob/main/examples/pytorch/question-answering/run_seq2seq_qa.py) script is meant for encoder-decoder (also called seq2seq) Transformer models, such as T5 or BART. These -models are generative, rather than discriminative. This means that they learn to generate the correct answer, rather than predicting the start and end position of the tokens of the answer. - -This example code fine-tunes T5 on the SQuAD2.0 dataset. - -```bash -python run_seq2seq_qa.py \ - --model_name_or_path t5-small \ - --dataset_name squad_v2 \ - --context_column context \ - --question_column question \ - --answer_column answers \ - --do_train \ - --do_eval \ - --per_device_train_batch_size 12 \ - --learning_rate 3e-5 \ - --num_train_epochs 2 \ - --max_seq_length 384 \ - --doc_stride 128 \ - --output_dir /tmp/debug_seq2seq_squad/ -``` - -## Accelerate-based scripts - -Based on the scripts `run_qa_no_trainer.py` and `run_qa_beam_search_no_trainer.py`. - -Like `run_qa.py` and `run_qa_beam_search.py`, these scripts allow you to fine-tune any of the models supported on a -SQuAD or a similar dataset, the main difference is that this script exposes the bare training loop, to allow you to quickly experiment and add any customization you would like. It offers less options than the script with `Trainer` (for instance you can easily change the options for the optimizer or the dataloaders directly in the script), but still run in a distributed setup, on TPU and supports mixed precision by leveraging the [🤗 `Accelerate`](https://github.com/huggingface/accelerate) library. - -You can use the script normally after installing it: - -```bash -pip install git+https://github.com/huggingface/accelerate -``` - -then - -```bash -python run_qa_no_trainer.py \ - --model_name_or_path bert-base-uncased \ - --dataset_name squad \ - --max_seq_length 384 \ - --doc_stride 128 \ - --output_dir ~/tmp/debug_squad -``` - -You can then use your usual launchers to run in it in a distributed environment, but the easiest way is to run - -```bash -accelerate config -``` - -and reply to the questions asked. Then - -```bash -accelerate test -``` - -that will check everything is ready for training. Finally, you can launch training with - -```bash -accelerate launch run_qa_no_trainer.py \ - --model_name_or_path bert-base-uncased \ - --dataset_name squad \ - --max_seq_length 384 \ - --doc_stride 128 \ - --output_dir ~/tmp/debug_squad -``` - -This command is the same and will work for: - -- a CPU-only setup -- a setup with one GPU -- a distributed training with several GPUs (single or multi node) -- a training on TPUs - -Note that this library is in alpha release so your feedback is more than welcome if you encounter any problem using it. diff --git a/spaces/chronopt-research/ViTExCo/src/models/CNN/GAN_models.py b/spaces/chronopt-research/ViTExCo/src/models/CNN/GAN_models.py deleted file mode 100644 index 137111bb8035c8d0dbd26b6b958c4036260b8821..0000000000000000000000000000000000000000 --- a/spaces/chronopt-research/ViTExCo/src/models/CNN/GAN_models.py +++ /dev/null @@ -1,268 +0,0 @@ -# DCGAN-like generator and discriminator -import torch -from torch import nn -import torch.nn.functional as F -from torch.nn import Parameter - - -def l2normalize(v, eps=1e-12): - return v / (v.norm() + eps) - - -class SpectralNorm(nn.Module): - def __init__(self, module, name="weight", power_iterations=1): - super(SpectralNorm, self).__init__() - self.module = module - self.name = name - self.power_iterations = power_iterations - if not self._made_params(): - self._make_params() - - def _update_u_v(self): - u = getattr(self.module, self.name + "_u") - v = getattr(self.module, self.name + "_v") - w = getattr(self.module, self.name + "_bar") - - height = w.data.shape[0] - for _ in range(self.power_iterations): - v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data), u.data)) - u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data)) - - sigma = u.dot(w.view(height, -1).mv(v)) - setattr(self.module, self.name, w / sigma.expand_as(w)) - - def _made_params(self): - try: - u = getattr(self.module, self.name + "_u") - v = getattr(self.module, self.name + "_v") - w = getattr(self.module, self.name + "_bar") - return True - except AttributeError: - return False - - def _make_params(self): - w = getattr(self.module, self.name) - - height = w.data.shape[0] - width = w.view(height, -1).data.shape[1] - - u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) - v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) - u.data = l2normalize(u.data) - v.data = l2normalize(v.data) - w_bar = Parameter(w.data) - - del self.module._parameters[self.name] - - self.module.register_parameter(self.name + "_u", u) - self.module.register_parameter(self.name + "_v", v) - self.module.register_parameter(self.name + "_bar", w_bar) - - def forward(self, *args): - self._update_u_v() - return self.module.forward(*args) - - -class Generator(nn.Module): - def __init__(self, z_dim): - super(Generator, self).__init__() - self.z_dim = z_dim - - self.model = nn.Sequential( - nn.ConvTranspose2d(z_dim, 512, 4, stride=1), - nn.InstanceNorm2d(512), - nn.ReLU(), - nn.ConvTranspose2d(512, 256, 4, stride=2, padding=(1, 1)), - nn.InstanceNorm2d(256), - nn.ReLU(), - nn.ConvTranspose2d(256, 128, 4, stride=2, padding=(1, 1)), - nn.InstanceNorm2d(128), - nn.ReLU(), - nn.ConvTranspose2d(128, 64, 4, stride=2, padding=(1, 1)), - nn.InstanceNorm2d(64), - nn.ReLU(), - nn.ConvTranspose2d(64, channels, 3, stride=1, padding=(1, 1)), - nn.Tanh(), - ) - - def forward(self, z): - return self.model(z.view(-1, self.z_dim, 1, 1)) - - -channels = 3 -leak = 0.1 -w_g = 4 - - -class Discriminator(nn.Module): - def __init__(self): - super(Discriminator, self).__init__() - - self.conv1 = SpectralNorm(nn.Conv2d(channels, 64, 3, stride=1, padding=(1, 1))) - self.conv2 = SpectralNorm(nn.Conv2d(64, 64, 4, stride=2, padding=(1, 1))) - self.conv3 = SpectralNorm(nn.Conv2d(64, 128, 3, stride=1, padding=(1, 1))) - self.conv4 = SpectralNorm(nn.Conv2d(128, 128, 4, stride=2, padding=(1, 1))) - self.conv5 = SpectralNorm(nn.Conv2d(128, 256, 3, stride=1, padding=(1, 1))) - self.conv6 = SpectralNorm(nn.Conv2d(256, 256, 4, stride=2, padding=(1, 1))) - self.conv7 = SpectralNorm(nn.Conv2d(256, 256, 3, stride=1, padding=(1, 1))) - self.conv8 = SpectralNorm(nn.Conv2d(256, 512, 4, stride=2, padding=(1, 1))) - self.fc = SpectralNorm(nn.Linear(w_g * w_g * 512, 1)) - - def forward(self, x): - m = x - m = nn.LeakyReLU(leak)(self.conv1(m)) - m = nn.LeakyReLU(leak)(nn.InstanceNorm2d(64)(self.conv2(m))) - m = nn.LeakyReLU(leak)(nn.InstanceNorm2d(128)(self.conv3(m))) - m = nn.LeakyReLU(leak)(nn.InstanceNorm2d(128)(self.conv4(m))) - m = nn.LeakyReLU(leak)(nn.InstanceNorm2d(256)(self.conv5(m))) - m = nn.LeakyReLU(leak)(nn.InstanceNorm2d(256)(self.conv6(m))) - m = nn.LeakyReLU(leak)(nn.InstanceNorm2d(256)(self.conv7(m))) - m = nn.LeakyReLU(leak)(self.conv8(m)) - - return self.fc(m.view(-1, w_g * w_g * 512)) - - -class Self_Attention(nn.Module): - """Self attention Layer""" - - def __init__(self, in_dim): - super(Self_Attention, self).__init__() - self.chanel_in = in_dim - - self.query_conv = SpectralNorm(nn.Conv2d(in_channels=in_dim, out_channels=in_dim // 1, kernel_size=1)) - self.key_conv = SpectralNorm(nn.Conv2d(in_channels=in_dim, out_channels=in_dim // 1, kernel_size=1)) - self.value_conv = SpectralNorm(nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)) - self.gamma = nn.Parameter(torch.zeros(1)) - - self.softmax = nn.Softmax(dim=-1) # - - def forward(self, x): - """ - inputs : - x : input feature maps( B X C X W X H) - returns : - out : self attention value + input feature - attention: B X N X N (N is Width*Height) - """ - m_batchsize, C, width, height = x.size() - proj_query = self.query_conv(x).view(m_batchsize, -1, width * height).permute(0, 2, 1) # B X CX(N) - proj_key = self.key_conv(x).view(m_batchsize, -1, width * height) # B X C x (*W*H) - energy = torch.bmm(proj_query, proj_key) # transpose check - attention = self.softmax(energy) # BX (N) X (N) - proj_value = self.value_conv(x).view(m_batchsize, -1, width * height) # B X C X N - - out = torch.bmm(proj_value, attention.permute(0, 2, 1)) - out = out.view(m_batchsize, C, width, height) - - out = self.gamma * out + x - return out - - -class Discriminator_x64(nn.Module): - """ - Discriminative Network - """ - - def __init__(self, in_size=6, ndf=64): - super(Discriminator_x64, self).__init__() - self.in_size = in_size - self.ndf = ndf - - self.layer1 = nn.Sequential(SpectralNorm(nn.Conv2d(self.in_size, self.ndf, 4, 2, 1)), nn.LeakyReLU(0.2, inplace=True)) - - self.layer2 = nn.Sequential( - SpectralNorm(nn.Conv2d(self.ndf, self.ndf, 4, 2, 1)), - nn.InstanceNorm2d(self.ndf), - nn.LeakyReLU(0.2, inplace=True), - ) - self.attention = Self_Attention(self.ndf) - self.layer3 = nn.Sequential( - SpectralNorm(nn.Conv2d(self.ndf, self.ndf * 2, 4, 2, 1)), - nn.InstanceNorm2d(self.ndf * 2), - nn.LeakyReLU(0.2, inplace=True), - ) - self.layer4 = nn.Sequential( - SpectralNorm(nn.Conv2d(self.ndf * 2, self.ndf * 4, 4, 2, 1)), - nn.InstanceNorm2d(self.ndf * 4), - nn.LeakyReLU(0.2, inplace=True), - ) - self.layer5 = nn.Sequential( - SpectralNorm(nn.Conv2d(self.ndf * 4, self.ndf * 8, 4, 2, 1)), - nn.InstanceNorm2d(self.ndf * 8), - nn.LeakyReLU(0.2, inplace=True), - ) - self.layer6 = nn.Sequential( - SpectralNorm(nn.Conv2d(self.ndf * 8, self.ndf * 16, 4, 2, 1)), - nn.InstanceNorm2d(self.ndf * 16), - nn.LeakyReLU(0.2, inplace=True), - ) - - self.last = SpectralNorm(nn.Conv2d(self.ndf * 16, 1, [3, 6], 1, 0)) - - def forward(self, input): - feature1 = self.layer1(input) - feature2 = self.layer2(feature1) - feature_attention = self.attention(feature2) - feature3 = self.layer3(feature_attention) - feature4 = self.layer4(feature3) - feature5 = self.layer5(feature4) - feature6 = self.layer6(feature5) - output = self.last(feature6) - output = F.avg_pool2d(output, output.size()[2:]).view(output.size()[0], -1) - - return output, feature4 - - -class Discriminator_x64_224(nn.Module): - """ - Discriminative Network - """ - - def __init__(self, in_size=6, ndf=64): - super(Discriminator_x64_224, self).__init__() - self.in_size = in_size - self.ndf = ndf - - self.layer1 = nn.Sequential(SpectralNorm(nn.Conv2d(self.in_size, self.ndf, 4, 2, 1)), nn.LeakyReLU(0.2, inplace=True)) - - self.layer2 = nn.Sequential( - SpectralNorm(nn.Conv2d(self.ndf, self.ndf, 4, 2, 1)), - nn.InstanceNorm2d(self.ndf), - nn.LeakyReLU(0.2, inplace=True), - ) - self.attention = Self_Attention(self.ndf) - self.layer3 = nn.Sequential( - SpectralNorm(nn.Conv2d(self.ndf, self.ndf * 2, 4, 2, 1)), - nn.InstanceNorm2d(self.ndf * 2), - nn.LeakyReLU(0.2, inplace=True), - ) - self.layer4 = nn.Sequential( - SpectralNorm(nn.Conv2d(self.ndf * 2, self.ndf * 4, 4, 2, 1)), - nn.InstanceNorm2d(self.ndf * 4), - nn.LeakyReLU(0.2, inplace=True), - ) - self.layer5 = nn.Sequential( - SpectralNorm(nn.Conv2d(self.ndf * 4, self.ndf * 8, 4, 2, 1)), - nn.InstanceNorm2d(self.ndf * 8), - nn.LeakyReLU(0.2, inplace=True), - ) - self.layer6 = nn.Sequential( - SpectralNorm(nn.Conv2d(self.ndf * 8, self.ndf * 16, 4, 2, 1)), - nn.InstanceNorm2d(self.ndf * 16), - nn.LeakyReLU(0.2, inplace=True), - ) - - self.last = SpectralNorm(nn.Conv2d(self.ndf * 16, 1, [3, 3], 1, 0)) - - def forward(self, input): - feature1 = self.layer1(input) - feature2 = self.layer2(feature1) - feature_attention = self.attention(feature2) - feature3 = self.layer3(feature_attention) - feature4 = self.layer4(feature3) - feature5 = self.layer5(feature4) - feature6 = self.layer6(feature5) - output = self.last(feature6) - output = F.avg_pool2d(output, output.size()[2:]).view(output.size()[0], -1) - - return output, feature4 diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/varLib/plot.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/varLib/plot.py deleted file mode 100644 index e0a7ca50d3f317d7c3219b77ff84f0f8bb310c6d..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/varLib/plot.py +++ /dev/null @@ -1,238 +0,0 @@ -"""Visualize DesignSpaceDocument and resulting VariationModel.""" - -from fontTools.varLib.models import VariationModel, supportScalar -from fontTools.designspaceLib import DesignSpaceDocument -from matplotlib import pyplot -from mpl_toolkits.mplot3d import axes3d -from itertools import cycle -import math -import logging -import sys - -log = logging.getLogger(__name__) - - -def stops(support, count=10): - a, b, c = support - - return ( - [a + (b - a) * i / count for i in range(count)] - + [b + (c - b) * i / count for i in range(count)] - + [c] - ) - - -def _plotLocationsDots(locations, axes, subplot, **kwargs): - for loc, color in zip(locations, cycle(pyplot.cm.Set1.colors)): - if len(axes) == 1: - subplot.plot([loc.get(axes[0], 0)], [1.0], "o", color=color, **kwargs) - elif len(axes) == 2: - subplot.plot( - [loc.get(axes[0], 0)], - [loc.get(axes[1], 0)], - [1.0], - "o", - color=color, - **kwargs, - ) - else: - raise AssertionError(len(axes)) - - -def plotLocations(locations, fig, names=None, **kwargs): - n = len(locations) - cols = math.ceil(n**0.5) - rows = math.ceil(n / cols) - - if names is None: - names = [None] * len(locations) - - model = VariationModel(locations) - names = [names[model.reverseMapping[i]] for i in range(len(names))] - - axes = sorted(locations[0].keys()) - if len(axes) == 1: - _plotLocations2D(model, axes[0], fig, cols, rows, names=names, **kwargs) - elif len(axes) == 2: - _plotLocations3D(model, axes, fig, cols, rows, names=names, **kwargs) - else: - raise ValueError("Only 1 or 2 axes are supported") - - -def _plotLocations2D(model, axis, fig, cols, rows, names, **kwargs): - subplot = fig.add_subplot(111) - for i, (support, color, name) in enumerate( - zip(model.supports, cycle(pyplot.cm.Set1.colors), cycle(names)) - ): - if name is not None: - subplot.set_title(name) - subplot.set_xlabel(axis) - pyplot.xlim(-1.0, +1.0) - - Xs = support.get(axis, (-1.0, 0.0, +1.0)) - X, Y = [], [] - for x in stops(Xs): - y = supportScalar({axis: x}, support) - X.append(x) - Y.append(y) - subplot.plot(X, Y, color=color, **kwargs) - - _plotLocationsDots(model.locations, [axis], subplot) - - -def _plotLocations3D(model, axes, fig, rows, cols, names, **kwargs): - ax1, ax2 = axes - - axis3D = fig.add_subplot(111, projection="3d") - for i, (support, color, name) in enumerate( - zip(model.supports, cycle(pyplot.cm.Set1.colors), cycle(names)) - ): - if name is not None: - axis3D.set_title(name) - axis3D.set_xlabel(ax1) - axis3D.set_ylabel(ax2) - pyplot.xlim(-1.0, +1.0) - pyplot.ylim(-1.0, +1.0) - - Xs = support.get(ax1, (-1.0, 0.0, +1.0)) - Ys = support.get(ax2, (-1.0, 0.0, +1.0)) - for x in stops(Xs): - X, Y, Z = [], [], [] - for y in Ys: - z = supportScalar({ax1: x, ax2: y}, support) - X.append(x) - Y.append(y) - Z.append(z) - axis3D.plot(X, Y, Z, color=color, **kwargs) - for y in stops(Ys): - X, Y, Z = [], [], [] - for x in Xs: - z = supportScalar({ax1: x, ax2: y}, support) - X.append(x) - Y.append(y) - Z.append(z) - axis3D.plot(X, Y, Z, color=color, **kwargs) - - _plotLocationsDots(model.locations, [ax1, ax2], axis3D) - - -def plotDocument(doc, fig, **kwargs): - doc.normalize() - locations = [s.location for s in doc.sources] - names = [s.name for s in doc.sources] - plotLocations(locations, fig, names, **kwargs) - - -def _plotModelFromMasters2D(model, masterValues, fig, **kwargs): - assert len(model.axisOrder) == 1 - axis = model.axisOrder[0] - - axis_min = min(loc.get(axis, 0) for loc in model.locations) - axis_max = max(loc.get(axis, 0) for loc in model.locations) - - import numpy as np - - X = np.arange(axis_min, axis_max, (axis_max - axis_min) / 100) - Y = [] - - for x in X: - loc = {axis: x} - v = model.interpolateFromMasters(loc, masterValues) - Y.append(v) - - subplot = fig.add_subplot(111) - subplot.plot(X, Y, "-", **kwargs) - - -def _plotModelFromMasters3D(model, masterValues, fig, **kwargs): - assert len(model.axisOrder) == 2 - axis1, axis2 = model.axisOrder[0], model.axisOrder[1] - - axis1_min = min(loc.get(axis1, 0) for loc in model.locations) - axis1_max = max(loc.get(axis1, 0) for loc in model.locations) - axis2_min = min(loc.get(axis2, 0) for loc in model.locations) - axis2_max = max(loc.get(axis2, 0) for loc in model.locations) - - import numpy as np - - X = np.arange(axis1_min, axis1_max, (axis1_max - axis1_min) / 100) - Y = np.arange(axis2_min, axis2_max, (axis2_max - axis2_min) / 100) - X, Y = np.meshgrid(X, Y) - Z = [] - - for row_x, row_y in zip(X, Y): - z_row = [] - Z.append(z_row) - for x, y in zip(row_x, row_y): - loc = {axis1: x, axis2: y} - v = model.interpolateFromMasters(loc, masterValues) - z_row.append(v) - Z = np.array(Z) - - axis3D = fig.add_subplot(111, projection="3d") - axis3D.plot_surface(X, Y, Z, **kwargs) - - -def plotModelFromMasters(model, masterValues, fig, **kwargs): - """Plot a variation model and set of master values corresponding - to the locations to the model into a pyplot figure. Variation - model must have axisOrder of size 1 or 2.""" - if len(model.axisOrder) == 1: - _plotModelFromMasters2D(model, masterValues, fig, **kwargs) - elif len(model.axisOrder) == 2: - _plotModelFromMasters3D(model, masterValues, fig, **kwargs) - else: - raise ValueError("Only 1 or 2 axes are supported") - - -def main(args=None): - from fontTools import configLogger - - if args is None: - args = sys.argv[1:] - - # configure the library logger (for >= WARNING) - configLogger() - # comment this out to enable debug messages from logger - # log.setLevel(logging.DEBUG) - - if len(args) < 1: - print("usage: fonttools varLib.plot source.designspace", file=sys.stderr) - print(" or") - print("usage: fonttools varLib.plot location1 location2 ...", file=sys.stderr) - print(" or") - print( - "usage: fonttools varLib.plot location1=value1 location2=value2 ...", - file=sys.stderr, - ) - sys.exit(1) - - fig = pyplot.figure() - fig.set_tight_layout(True) - - if len(args) == 1 and args[0].endswith(".designspace"): - doc = DesignSpaceDocument() - doc.read(args[0]) - plotDocument(doc, fig) - else: - axes = [chr(c) for c in range(ord("A"), ord("Z") + 1)] - if "=" not in args[0]: - locs = [dict(zip(axes, (float(v) for v in s.split(",")))) for s in args] - plotLocations(locs, fig) - else: - locations = [] - masterValues = [] - for arg in args: - loc, v = arg.split("=") - locations.append(dict(zip(axes, (float(v) for v in loc.split(","))))) - masterValues.append(float(v)) - model = VariationModel(locations, axes[: len(locations[0])]) - plotModelFromMasters(model, masterValues, fig) - - pyplot.show() - - -if __name__ == "__main__": - import sys - - sys.exit(main()) diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fsspec/dircache.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fsspec/dircache.py deleted file mode 100644 index eca19566b135e5a7a4f6e7407d56411ec58bfe44..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fsspec/dircache.py +++ /dev/null @@ -1,98 +0,0 @@ -import time -from collections.abc import MutableMapping -from functools import lru_cache - - -class DirCache(MutableMapping): - """ - Caching of directory listings, in a structure like:: - - {"path0": [ - {"name": "path0/file0", - "size": 123, - "type": "file", - ... - }, - {"name": "path0/file1", - }, - ... - ], - "path1": [...] - } - - Parameters to this class control listing expiry or indeed turn - caching off - """ - - def __init__( - self, - use_listings_cache=True, - listings_expiry_time=None, - max_paths=None, - **kwargs, - ): - """ - - Parameters - ---------- - use_listings_cache: bool - If False, this cache never returns items, but always reports KeyError, - and setting items has no effect - listings_expiry_time: int or float (optional) - Time in seconds that a listing is considered valid. If None, - listings do not expire. - max_paths: int (optional) - The number of most recent listings that are considered valid; 'recent' - refers to when the entry was set. - """ - self._cache = {} - self._times = {} - if max_paths: - self._q = lru_cache(max_paths + 1)(lambda key: self._cache.pop(key, None)) - self.use_listings_cache = use_listings_cache - self.listings_expiry_time = listings_expiry_time - self.max_paths = max_paths - - def __getitem__(self, item): - if self.listings_expiry_time is not None: - if self._times.get(item, 0) - time.time() < -self.listings_expiry_time: - del self._cache[item] - if self.max_paths: - self._q(item) - return self._cache[item] # maybe raises KeyError - - def clear(self): - self._cache.clear() - - def __len__(self): - return len(self._cache) - - def __contains__(self, item): - try: - self[item] - return True - except KeyError: - return False - - def __setitem__(self, key, value): - if not self.use_listings_cache: - return - if self.max_paths: - self._q(key) - self._cache[key] = value - if self.listings_expiry_time is not None: - self._times[key] = time.time() - - def __delitem__(self, key): - del self._cache[key] - - def __iter__(self): - entries = list(self._cache) - - return (k for k in entries if k in self) - - def __reduce__(self): - return ( - DirCache, - (self.use_listings_cache, self.listings_expiry_time, self.max_paths), - ) diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/functorch/_src/make_functional/__init__.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/functorch/_src/make_functional/__init__.py deleted file mode 100644 index 3de7787df0c3304207b42b51e9fb62da9d33c7d0..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/functorch/_src/make_functional/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# This file has moved to under torch/_functorch. It is not public API. -# If you are not a PyTorch developer and you are relying on the following -# imports, please file an issue. -from torch._functorch.make_functional import _swap_state diff --git a/spaces/cihyFjudo/fairness-paper-search/The Killer Of Killers Dubbed Italian Movie Free LINK Download Torrent.md b/spaces/cihyFjudo/fairness-paper-search/The Killer Of Killers Dubbed Italian Movie Free LINK Download Torrent.md deleted file mode 100644 index f649a2861b01dc9989ede361c9291a7b27d8a0ec..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/The Killer Of Killers Dubbed Italian Movie Free LINK Download Torrent.md +++ /dev/null @@ -1,12 +0,0 @@ - -

-ka.com/forum2_theme_112973819.xhtml?tema=168
-2-full-movie-free-download-in-hd-720p



-reslyicemcabe/nadiya_ke_paar_1982_movie_209/
-lierolquiwatdi/tk_actions_panel_download_freerar/
-haynacapecoc/maya_full_movie_download_mp4/

-ishq-aur-mohabbat-movie-full-download

-

Bruno the Kid full movie download mp4
Exist tamil dubbed movie torrent
New Alcatraz tamil pdf download
Cinnamon Roll full movie in hindi 720p
The War of Loong full movie in hindi free download
Ido no soko dubbed hindi movie free download torrent
Tata deocamdata full movie 720p download
Lydia Bailey full movie in hindi free download hd 720p
ObsCure II full movie hd download
download Cold Warriors

-

The Killer of Killers dubbed italian movie free download torrent


DOWNLOAD 🗹 https://tinurli.com/2uwkce



-

Unknown Woman download movie free
A Sheep in Wolf's Clothing malayalam full movie free download
Bonnie and Clyde's Great Escape torrent
Battle of the Stars - Part 2 malayalam movie download
The Echelon Conspiracy
The Neighborhood full movie in hindi free download hd 1080p
Marked Men in hindi free download
Tall Cotton 720p
Cowboy and the Senorita telugu full movie download
1 Contender Triple Threat tamil dubbed movie torrent

-

The Witcher 3: Wild Hunt Launch song free download
Download the Batman Sets the Pace full movie tamil dubbed in torrent
Maartin dubbed hindi movie free download torrent
Friend or Foe online free
The Italian Job movie free download in hindi
free download Kara Para Ask
Episode 1.186 tamil pdf download
Back for Blood torrent
Fighting Shadows full movie in hindi free download hd 1080p
Transformers full movie online free

-

Shellshock movie download
The Pacific dubbed hindi movie free download torrent
the En defensa propia full movie download in hindi
Ellen was here in hindi 720p
The Holcroft Covenant full movie hindi download
Leningrad: Kolshik in hindi free download
Spider-Man hd mp4 download
All Hallow's Eve telugu full movie download
AkaKILL! Theater: 6th Elimination - Esdeath's Fun Torture Classroom tamil dubbed movie free download
the Local Hero full movie download in hindi

-

Flipping Out: Part 1 full movie hd 1080p download kickass movie
the Reap the Whirlwind italian dubbed free download
Hell Ambassador! The True Meaning of Fear download completo di film in italiano
Garalia's Pursuit movie free download hd
Get Back the Rusted Bonds download torrent
italian movie dubbed in italian free download Three
Lyubochka and Romka Yard movie in italian free download
free download Franken-Brain Evil Idol
Gourmet School Lunch: Meal 2 Pudding! song free download
Episode 1.245 download completo di film in italiano

-

This is a criminal and nail-biting drama of merciless killers with full of intrigue , unstopped action, tense , suspeseful , and lots of violence . John Lee (Chow Yun Fat) is the best hired hitman but a relentless vendetta emerges when he refuses to commit a murder because of a morally complicated target . However, the drug lord (Kenneth Tsang) has hired replacements (Til Schweiger, Danny Trejo) to finish the job , and kill the hitman . John then teams up with Meg Coburn to help him getaway these "Replacement Killers¨. Along the way , they're partenered to defend themselves and save a cop (Michael Rooker) and his seven year-old son . Both of whom will fight at whatever cost and they'll stop at nothing to get it , facing off Chinese bands and Triads chasing them , along with The Replacement Killers. Later on, things get awry more and more . Both , John Lee and Meg Coburn are drawn into a cobweb of violence , treason and deception. Kill or be replaced. When people need to disappear, they come to see Meg. When they're about to go, they never see John coming. Conscience has no place in the heart of an assassin. No law. No rules. No justice.

Slick and passable crime thriller in which two divergent characters , a Chinese hitman and a young Caucasian take on a war against hoodlums , rival bands and other underworld factions .Attractive action movie with thrills , action-packed , violence , betrayal and bloodletting shoot'em up . The movie has a John Woo style , there are amount of shoot-outs , frenetic action and running men while they are shooting . Chow Yun-Fat finally receives a Hollywood stardom that already had before internationally in Asia . Nevertheless , the action segments suffer in comparison to his work in John Woo films . While Mira Sorvino is fine as the tough forger and she showcases the talents that previously showed in other movies as Woody Allen's Mighty Aphrodite , Academy Award included. The thrilling script and interpretation are top-drawer , and there's crossfire enough to satisfy the average appetite for destruction. A good starring duo , Chow Yun-Fat as a troubled hitman who seeks aid from a forger to get papers and Mira Sorvino as the resourceful forger , both of them giving acceptable performances , being well accompanied by an appropriate support cast , such as Michael Rooker , Kenneth Tsang , Clifton Collins Jr , Carlos Gómez, Frank Medrano, Patrick Kilpatrick , Randall Duk Kim and Danny Trejo , Til Schweiger as the ring of hitmen and special appearance by the veteran German actor Jürgen Prochnow.

It contains an adequate and colorful cinematography by Peter Collister . As well as moving and thrilling score by Harry Gregson-Williams , composed by means of synthesizer and lots of oriental sounds . The motion picture was professionally directed by Antoine Fuqua , providing from his standout cast some decent interpretations . Fuqua originally did not want to do John Woo's trademark "Mexican Standoff" in this film , but producers begged him to include these kinds of rip-roaring scenes . This fine craftsman, director Antoine Fuqua , frequently deals with familiar conflicts set in N. Y , keeps the film slick and stokes up the race some , but this only accelerates the flick's deafening rush toward the top and ever over. Fuqua handles the explosive and the psychological undercurrents with equal assurance . Antoine Fuqua has made a lot of succesful movies , such as : The Magnificent Seven , Olympus Has Fallen, The Equalizer , Equalizer 2 , Southpaw , King Arthur , Training Day , Shooter , Tears of the Sun , Bait , among others. Rating : 6/10 . The yarn will appeal to Chow Yun Fat and Mira Sorvino fans.

-

aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/clevrpwn/CompVis-stable-diffusion-v1-4/app.py b/spaces/clevrpwn/CompVis-stable-diffusion-v1-4/app.py deleted file mode 100644 index e1e1025c8f06010197c50917ac9dd1ddeaf7e5aa..0000000000000000000000000000000000000000 --- a/spaces/clevrpwn/CompVis-stable-diffusion-v1-4/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/CompVis/stable-diffusion-v1-4").launch() \ No newline at end of file diff --git a/spaces/codejin/diffsingerkr/Modules/Layer.py b/spaces/codejin/diffsingerkr/Modules/Layer.py deleted file mode 100644 index 53dd0885acae4f2c6af115b973a2d07bd34660b9..0000000000000000000000000000000000000000 --- a/spaces/codejin/diffsingerkr/Modules/Layer.py +++ /dev/null @@ -1,317 +0,0 @@ -import torch - -class Conv1d(torch.nn.Conv1d): - def __init__(self, w_init_gain= 'linear', *args, **kwargs): - self.w_init_gain = w_init_gain - super().__init__(*args, **kwargs) - - def reset_parameters(self): - if self.w_init_gain in ['zero']: - torch.nn.init.zeros_(self.weight) - elif self.w_init_gain is None: - pass - elif self.w_init_gain in ['relu', 'leaky_relu']: - torch.nn.init.kaiming_uniform_(self.weight, nonlinearity= self.w_init_gain) - elif self.w_init_gain == 'glu': - assert self.out_channels % 2 == 0, 'The out_channels of GLU requires even number.' - torch.nn.init.kaiming_uniform_(self.weight[:self.out_channels // 2], nonlinearity= 'linear') - torch.nn.init.xavier_uniform_(self.weight[self.out_channels // 2:], gain= torch.nn.init.calculate_gain('sigmoid')) - elif self.w_init_gain == 'gate': - assert self.out_channels % 2 == 0, 'The out_channels of GLU requires even number.' - torch.nn.init.xavier_uniform_(self.weight[:self.out_channels // 2], gain= torch.nn.init.calculate_gain('tanh')) - torch.nn.init.xavier_uniform_(self.weight[self.out_channels // 2:], gain= torch.nn.init.calculate_gain('sigmoid')) - else: - torch.nn.init.xavier_uniform_(self.weight, gain= torch.nn.init.calculate_gain(self.w_init_gain)) - if not self.bias is None: - torch.nn.init.zeros_(self.bias) - -class ConvTranspose1d(torch.nn.ConvTranspose1d): - def __init__(self, w_init_gain= 'linear', *args, **kwargs): - self.w_init_gain = w_init_gain - super().__init__(*args, **kwargs) - - def reset_parameters(self): - if self.w_init_gain in ['zero']: - torch.nn.init.zeros_(self.weight) - elif self.w_init_gain in ['relu', 'leaky_relu']: - torch.nn.init.kaiming_uniform_(self.weight, nonlinearity= self.w_init_gain) - elif self.w_init_gain == 'glu': - assert self.out_channels % 2 == 0, 'The out_channels of GLU requires even number.' - torch.nn.init.kaiming_uniform_(self.weight[:self.out_channels // 2], nonlinearity= 'linear') - torch.nn.init.xavier_uniform_(self.weight[self.out_channels // 2:], gain= torch.nn.init.calculate_gain('sigmoid')) - elif self.w_init_gain == 'gate': - assert self.out_channels % 2 == 0, 'The out_channels of GLU requires even number.' - torch.nn.init.xavier_uniform_(self.weight[:self.out_channels // 2], gain= torch.nn.init.calculate_gain('tanh')) - torch.nn.init.xavier_uniform_(self.weight[self.out_channels // 2:], gain= torch.nn.init.calculate_gain('sigmoid')) - else: - torch.nn.init.xavier_uniform_(self.weight, gain= torch.nn.init.calculate_gain(self.w_init_gain)) - if not self.bias is None: - torch.nn.init.zeros_(self.bias) - -class Conv2d(torch.nn.Conv2d): - def __init__(self, w_init_gain= 'linear', *args, **kwargs): - self.w_init_gain = w_init_gain - super().__init__(*args, **kwargs) - - def reset_parameters(self): - if self.w_init_gain in ['zero']: - torch.nn.init.zeros_(self.weight) - elif self.w_init_gain in ['relu', 'leaky_relu']: - torch.nn.init.kaiming_uniform_(self.weight, nonlinearity= self.w_init_gain) - elif self.w_init_gain == 'glu': - assert self.out_channels % 2 == 0, 'The out_channels of GLU requires even number.' - torch.nn.init.kaiming_uniform_(self.weight[:self.out_channels // 2], nonlinearity= 'linear') - torch.nn.init.xavier_uniform_(self.weight[self.out_channels // 2:], gain= torch.nn.init.calculate_gain('sigmoid')) - elif self.w_init_gain == 'gate': - assert self.out_channels % 2 == 0, 'The out_channels of GLU requires even number.' - torch.nn.init.xavier_uniform_(self.weight[:self.out_channels // 2], gain= torch.nn.init.calculate_gain('tanh')) - torch.nn.init.xavier_uniform_(self.weight[self.out_channels // 2:], gain= torch.nn.init.calculate_gain('sigmoid')) - else: - torch.nn.init.xavier_uniform_(self.weight, gain= torch.nn.init.calculate_gain(self.w_init_gain)) - if not self.bias is None: - torch.nn.init.zeros_(self.bias) - -class ConvTranspose2d(torch.nn.ConvTranspose2d): - def __init__(self, w_init_gain= 'linear', *args, **kwargs): - self.w_init_gain = w_init_gain - super().__init__(*args, **kwargs) - - def reset_parameters(self): - if self.w_init_gain in ['zero']: - torch.nn.init.zeros_(self.weight) - elif self.w_init_gain in ['relu', 'leaky_relu']: - torch.nn.init.kaiming_uniform_(self.weight, nonlinearity= self.w_init_gain) - elif self.w_init_gain == 'glu': - assert self.out_channels % 2 == 0, 'The out_channels of GLU requires even number.' - torch.nn.init.kaiming_uniform_(self.weight[:self.out_channels // 2], nonlinearity= 'linear') - torch.nn.init.xavier_uniform_(self.weight[self.out_channels // 2:], gain= torch.nn.init.calculate_gain('sigmoid')) - elif self.w_init_gain == 'gate': - assert self.out_channels % 2 == 0, 'The out_channels of GLU requires even number.' - torch.nn.init.xavier_uniform_(self.weight[:self.out_channels // 2], gain= torch.nn.init.calculate_gain('tanh')) - torch.nn.init.xavier_uniform_(self.weight[self.out_channels // 2:], gain= torch.nn.init.calculate_gain('sigmoid')) - else: - torch.nn.init.xavier_uniform_(self.weight, gain= torch.nn.init.calculate_gain(self.w_init_gain)) - if not self.bias is None: - torch.nn.init.zeros_(self.bias) - -class Linear(torch.nn.Linear): - def __init__(self, w_init_gain= 'linear', *args, **kwargs): - self.w_init_gain = w_init_gain - super().__init__(*args, **kwargs) - - def reset_parameters(self): - if self.w_init_gain in ['zero']: - torch.nn.init.zeros_(self.weight) - elif self.w_init_gain in ['relu', 'leaky_relu']: - torch.nn.init.kaiming_uniform_(self.weight, nonlinearity= self.w_init_gain) - elif self.w_init_gain == 'glu': - assert self.out_channels % 2 == 0, 'The out_channels of GLU requires even number.' - torch.nn.init.kaiming_uniform_(self.weight[:self.out_channels // 2], nonlinearity= 'linear') - torch.nn.init.xavier_uniform_(self.weight[self.out_channels // 2:], gain= torch.nn.init.calculate_gain('sigmoid')) - else: - torch.nn.init.xavier_uniform_(self.weight, gain= torch.nn.init.calculate_gain(self.w_init_gain)) - if not self.bias is None: - torch.nn.init.zeros_(self.bias) - -class Lambda(torch.nn.Module): - def __init__(self, lambd): - super().__init__() - self.lambd = lambd - - def forward(self, x): - return self.lambd(x) - -class Residual(torch.nn.Module): - def __init__(self, module): - super().__init__() - self.module = module - - def forward(self, *args, **kwargs): - return self.module(*args, **kwargs) - -class LayerNorm(torch.nn.Module): - def __init__(self, num_features: int, eps: float= 1e-5): - super().__init__() - - self.eps = eps - self.gamma = torch.nn.Parameter(torch.ones(num_features)) - self.beta = torch.nn.Parameter(torch.zeros(num_features)) - - - def forward(self, inputs: torch.Tensor): - means = inputs.mean(dim= 1, keepdim= True) - variances = (inputs - means).pow(2.0).mean(dim= 1, keepdim= True) - - x = (inputs - means) * (variances + self.eps).rsqrt() - - shape = [1, -1] + [1] * (x.ndim - 2) - - return x * self.gamma.view(*shape) + self.beta.view(*shape) - -class LightweightConv1d(torch.nn.Module): - ''' - Args: - input_size: # of channels of the input and output - kernel_size: convolution channels - padding: padding - num_heads: number of heads used. The weight is of shape - `(num_heads, 1, kernel_size)` - weight_softmax: normalize the weight with softmax before the convolution - - Shape: - Input: BxCxT, i.e. (batch_size, input_size, timesteps) - Output: BxCxT, i.e. (batch_size, input_size, timesteps) - - Attributes: - weight: the learnable weights of the module of shape - `(num_heads, 1, kernel_size)` - bias: the learnable bias of the module of shape `(input_size)` - ''' - - def __init__( - self, - input_size, - kernel_size=1, - padding=0, - num_heads=1, - weight_softmax=False, - bias=False, - weight_dropout=0.0, - w_init_gain= 'linear' - ): - super().__init__() - self.input_size = input_size - self.kernel_size = kernel_size - self.num_heads = num_heads - self.padding = padding - self.weight_softmax = weight_softmax - self.weight = torch.nn.Parameter(torch.Tensor(num_heads, 1, kernel_size)) - self.w_init_gain = w_init_gain - - if bias: - self.bias = torch.nn.Parameter(torch.Tensor(input_size)) - else: - self.bias = None - self.weight_dropout_module = FairseqDropout( - weight_dropout, module_name=self.__class__.__name__ - ) - self.reset_parameters() - - def reset_parameters(self): - if self.w_init_gain in ['relu', 'leaky_relu']: - torch.nn.init.kaiming_uniform_(self.weight, nonlinearity= self.w_init_gain) - elif self.w_init_gain == 'glu': - assert self.out_channels % 2 == 0, 'The out_channels of GLU requires even number.' - torch.nn.init.kaiming_uniform_(self.weight[:self.out_channels // 2], nonlinearity= 'linear') - torch.nn.init.xavier_uniform_(self.weight[self.out_channels // 2:], gain= torch.nn.init.calculate_gain('sigmoid')) - else: - torch.nn.init.xavier_uniform_(self.weight, gain= torch.nn.init.calculate_gain(self.w_init_gain)) - if not self.bias is None: - torch.nn.init.zeros_(self.bias) - - def forward(self, input): - """ - input size: B x C x T - output size: B x C x T - """ - B, C, T = input.size() - H = self.num_heads - - weight = self.weight - if self.weight_softmax: - weight = weight.softmax(dim=-1) - - weight = self.weight_dropout_module(weight) - # Merge every C/H entries into the batch dimension (C = self.input_size) - # B x C x T -> (B * C/H) x H x T - # One can also expand the weight to C x 1 x K by a factor of C/H - # and do not reshape the input instead, which is slow though - input = input.view(-1, H, T) - output = torch.nn.functional.conv1d(input, weight, padding=self.padding, groups=self.num_heads) - output = output.view(B, C, T) - if self.bias is not None: - output = output + self.bias.view(1, -1, 1) - - return output - -class FairseqDropout(torch.nn.Module): - def __init__(self, p, module_name=None): - super().__init__() - self.p = p - self.module_name = module_name - self.apply_during_inference = False - - def forward(self, x, inplace: bool = False): - if self.training or self.apply_during_inference: - return torch.nn.functional.dropout(x, p=self.p, training=True, inplace=inplace) - else: - return x - -class LinearAttention(torch.nn.Module): - def __init__( - self, - channels: int, - calc_channels: int, - num_heads: int, - dropout_rate: float= 0.1, - use_scale: bool= True, - use_residual: bool= True, - use_norm: bool= True - ): - super().__init__() - assert calc_channels % num_heads == 0 - self.calc_channels = calc_channels - self.num_heads = num_heads - self.use_scale = use_scale - self.use_residual = use_residual - self.use_norm = use_norm - - self.prenet = Conv1d( - in_channels= channels, - out_channels= calc_channels * 3, - kernel_size= 1, - bias=False, - w_init_gain= 'linear' - ) - self.projection = Conv1d( - in_channels= calc_channels, - out_channels= channels, - kernel_size= 1, - w_init_gain= 'linear' - ) - self.dropout = torch.nn.Dropout(p= dropout_rate) - - if use_scale: - self.scale = torch.nn.Parameter(torch.zeros(1)) - - if use_norm: - self.norm = LayerNorm(num_features= channels) - - def forward(self, x: torch.Tensor, *args, **kwargs): - ''' - x: [Batch, Enc_d, Enc_t] - ''' - residuals = x - - x = self.prenet(x) # [Batch, Calc_d * 3, Enc_t] - x = x.view(x.size(0), self.num_heads, x.size(1) // self.num_heads, x.size(2)) # [Batch, Head, Calc_d // Head * 3, Enc_t] - queries, keys, values = x.chunk(chunks= 3, dim= 2) # [Batch, Head, Calc_d // Head, Enc_t] * 3 - keys = (keys + 1e-5).softmax(dim= 3) - - contexts = keys @ values.permute(0, 1, 3, 2) # [Batch, Head, Calc_d // Head, Calc_d // Head] - contexts = contexts.permute(0, 1, 3, 2) @ queries # [Batch, Head, Calc_d // Head, Enc_t] - contexts = contexts.view(contexts.size(0), contexts.size(1) * contexts.size(2), contexts.size(3)) # [Batch, Calc_d, Enc_t] - contexts = self.projection(contexts) # [Batch, Enc_d, Enc_t] - - if self.use_scale: - contexts = self.scale * contexts - - contexts = self.dropout(contexts) - - if self.use_residual: - contexts = contexts + residuals - - if self.use_norm: - contexts = self.norm(contexts) - - return contexts diff --git a/spaces/colakin/video-generater/public/ffmpeg/doc/t2h.pm b/spaces/colakin/video-generater/public/ffmpeg/doc/t2h.pm deleted file mode 100644 index d07d974286c4ad2663b25d665db0cb95edc9fd94..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/doc/t2h.pm +++ /dev/null @@ -1,359 +0,0 @@ -# makeinfo HTML output init file -# -# Copyright (c) 2011, 2012 Free Software Foundation, Inc. -# Copyright (c) 2014 Andreas Cadhalpun -# Copyright (c) 2014 Tiancheng "Timothy" Gu -# -# This file is part of FFmpeg. -# -# FFmpeg is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# FFmpeg is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public -# License along with FFmpeg; if not, write to the Free Software -# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - -# no navigation elements -set_from_init_file('HEADERS', 0); - -sub ffmpeg_heading_command($$$$$) -{ - my $self = shift; - my $cmdname = shift; - my $command = shift; - my $args = shift; - my $content = shift; - - my $result = ''; - - # not clear that it may really happen - if ($self->in_string) { - $result .= $self->command_string($command) ."\n" if ($cmdname ne 'node'); - $result .= $content if (defined($content)); - return $result; - } - - my $element_id = $self->command_id($command); - $result .= "\n" - if (defined($element_id) and $element_id ne ''); - - print STDERR "Process $command " - .Texinfo::Structuring::_print_root_command_texi($command)."\n" - if ($self->get_conf('DEBUG')); - my $element; - if ($Texinfo::Common::root_commands{$command->{'cmdname'}} - and $command->{'parent'} - and $command->{'parent'}->{'type'} - and $command->{'parent'}->{'type'} eq 'element') { - $element = $command->{'parent'}; - } - if ($element) { - $result .= &{$self->{'format_element_header'}}($self, $cmdname, - $command, $element); - } - - my $heading_level; - # node is used as heading if there is nothing else. - if ($cmdname eq 'node') { - if (!$element or (!$element->{'extra'}->{'section'} - and $element->{'extra'}->{'node'} - and $element->{'extra'}->{'node'} eq $command - # bogus node may not have been normalized - and defined($command->{'extra'}->{'normalized'}))) { - if ($command->{'extra'}->{'normalized'} eq 'Top') { - $heading_level = 0; - } else { - $heading_level = 3; - } - } - } else { - $heading_level = $command->{'level'}; - } - - my $heading = $self->command_text($command); - # $heading not defined may happen if the command is a @node, for example - # if there is an error in the node. - if (defined($heading) and $heading ne '' and defined($heading_level)) { - - if ($Texinfo::Common::root_commands{$cmdname} - and $Texinfo::Common::sectioning_commands{$cmdname}) { - my $content_href = $self->command_contents_href($command, 'contents', - $self->{'current_filename'}); - if ($content_href) { - my $this_href = $content_href =~ s/^\#toc-/\#/r; - $heading .= ''. - '". - ($ENV{"FA_ICONS"} ? '' - : '#'). - ' '. - '". - ($ENV{"FA_ICONS"} ? '' - : 'TOC'). - ''. - ''; - } - } - - if ($self->in_preformatted()) { - $result .= $heading."\n"; - } else { - # if the level was changed, set the command name right - if ($cmdname ne 'node' - and $heading_level ne $Texinfo::Common::command_structuring_level{$cmdname}) { - $cmdname - = $Texinfo::Common::level_to_structuring_command{$cmdname}->[$heading_level]; - } - $result .= &{$self->{'format_heading_text'}}( - $self, $cmdname, $heading, - $heading_level + - $self->get_conf('CHAPTER_HEADER_LEVEL') - 1, $command); - } - } - $result .= $content if (defined($content)); - return $result; -} - -foreach my $command (keys(%Texinfo::Common::sectioning_commands), 'node') { - texinfo_register_command_formatting($command, \&ffmpeg_heading_command); -} - -# determine if texinfo is at least version 6.8 -my $program_version_num = version->declare(get_conf('PACKAGE_VERSION'))->numify; -my $program_version_6_8 = $program_version_num >= 6.008000; - -# print the TOC where @contents is used -if ($program_version_6_8) { - set_from_init_file('CONTENTS_OUTPUT_LOCATION', 'inline'); -} else { - set_from_init_file('INLINE_CONTENTS', 1); -} - -# make chapters

-set_from_init_file('CHAPTER_HEADER_LEVEL', 2); - -# Do not add
-set_from_init_file('DEFAULT_RULE', ''); -set_from_init_file('BIG_RULE', ''); - -# Customized file beginning -sub ffmpeg_begin_file($$$) -{ - my $self = shift; - my $filename = shift; - my $element = shift; - - my $command; - if ($element and $self->get_conf('SPLIT')) { - $command = $self->element_command($element); - } - - my ($title, $description, $encoding, $date, $css_lines, - $doctype, $bodytext, $copying_comment, $after_body_open, - $extra_head, $program_and_version, $program_homepage, - $program, $generator) = $self->_file_header_informations($command); - - my $links = $self->_get_links ($filename, $element); - - my $head1 = $ENV{"FFMPEG_HEADER1"} || < - - - - - -EOT - my $head_title = <<EOT; - $title -EOT - - my $head2 = $ENV{"FFMPEG_HEADER2"} || <<EOT; - - - - - - -
-

-EOT - - my $head3 = $ENV{"FFMPEG_HEADER3"} || < -EOT - - return $head1 . $head_title . $head2 . $head_title . $head3; -} -if ($program_version_6_8) { - texinfo_register_formatting_function('format_begin_file', \&ffmpeg_begin_file); -} else { - texinfo_register_formatting_function('begin_file', \&ffmpeg_begin_file); -} - -sub ffmpeg_program_string($) -{ - my $self = shift; - if (defined($self->get_conf('PROGRAM')) - and $self->get_conf('PROGRAM') ne '' - and defined($self->get_conf('PACKAGE_URL'))) { - return $self->convert_tree( - $self->gdt('This document was generated using @uref{{program_homepage}, @emph{{program}}}.', - { 'program_homepage' => $self->get_conf('PACKAGE_URL'), - 'program' => $self->get_conf('PROGRAM') })); - } else { - return $self->convert_tree( - $self->gdt('This document was generated automatically.')); - } -} -if ($program_version_6_8) { - texinfo_register_formatting_function('format_program_string', \&ffmpeg_program_string); -} else { - texinfo_register_formatting_function('program_string', \&ffmpeg_program_string); -} - -# Customized file ending -sub ffmpeg_end_file($) -{ - my $self = shift; - my $program_string = &{$self->{'format_program_string'}}($self); - my $program_text = < - $program_string -

-EOT - my $footer = $ENV{FFMPEG_FOOTER} || < - - -EOT - return $program_text . $footer; -} -if ($program_version_6_8) { - texinfo_register_formatting_function('format_end_file', \&ffmpeg_end_file); -} else { - texinfo_register_formatting_function('end_file', \&ffmpeg_end_file); -} - -# Dummy title command -# Ignore title. Title is handled through ffmpeg_begin_file(). -set_from_init_file('USE_TITLEPAGE_FOR_TITLE', 1); -sub ffmpeg_title($$$$) -{ - return ''; -} - -texinfo_register_command_formatting('titlefont', - \&ffmpeg_title); - -# Customized float command. Part of code borrowed from GNU Texinfo. -sub ffmpeg_float($$$$$) -{ - my $self = shift; - my $cmdname = shift; - my $command = shift; - my $args = shift; - my $content = shift; - - my ($caption, $prepended) = Texinfo::Common::float_name_caption($self, - $command); - my $caption_text = ''; - my $prepended_text; - my $prepended_save = ''; - - if ($self->in_string()) { - if ($prepended) { - $prepended_text = $self->convert_tree_new_formatting_context( - $prepended, 'float prepended'); - } else { - $prepended_text = ''; - } - if ($caption) { - $caption_text = $self->convert_tree_new_formatting_context( - {'contents' => $caption->{'args'}->[0]->{'contents'}}, - 'float caption'); - } - return $prepended.$content.$caption_text; - } - - my $id = $self->command_id($command); - my $label; - if (defined($id) and $id ne '') { - $label = ""; - } else { - $label = ''; - } - - if ($prepended) { - if ($caption) { - # prepend the prepended tree to the first paragraph - my @caption_original_contents = @{$caption->{'args'}->[0]->{'contents'}}; - my @caption_contents; - my $new_paragraph; - while (@caption_original_contents) { - my $content = shift @caption_original_contents; - if ($content->{'type'} and $content->{'type'} eq 'paragraph') { - %{$new_paragraph} = %{$content}; - $new_paragraph->{'contents'} = [@{$content->{'contents'}}]; - unshift (@{$new_paragraph->{'contents'}}, {'cmdname' => 'strong', - 'args' => [{'type' => 'brace_command_arg', - 'contents' => [$prepended]}]}); - push @caption_contents, $new_paragraph; - last; - } else { - push @caption_contents, $content; - } - } - push @caption_contents, @caption_original_contents; - if ($new_paragraph) { - $caption_text = $self->convert_tree_new_formatting_context( - {'contents' => \@caption_contents}, 'float caption'); - $prepended_text = ''; - } - } - if ($caption_text eq '') { - $prepended_text = $self->convert_tree_new_formatting_context( - $prepended, 'float prepended'); - if ($prepended_text ne '') { - $prepended_save = $prepended_text; - $prepended_text = '

'.$prepended_text.'

'; - } - } - } else { - $prepended_text = ''; - } - - if ($caption and $caption_text eq '') { - $caption_text = $self->convert_tree_new_formatting_context( - $caption->{'args'}->[0], 'float caption'); - } - if ($prepended_text.$caption_text ne '') { - $prepended_text = $self->_attribute_class('div','float-caption'). '>' - . $prepended_text; - $caption_text .= '

'; - } - my $html_class = ''; - if ($prepended_save =~ /NOTE/) { - $html_class = 'info'; - $prepended_text = ''; - $caption_text = ''; - } elsif ($prepended_save =~ /IMPORTANT/) { - $html_class = 'warning'; - $prepended_text = ''; - $caption_text = ''; - } - return $self->_attribute_class('div', $html_class). '>' . "\n" . - $prepended_text . $caption_text . $content . ''; -} - -texinfo_register_command_formatting('float', - \&ffmpeg_float); - -1; diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/misc4_parser.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/misc4_parser.c deleted file mode 100644 index d234dbb62956c804913b77da1fd6df1d07486650..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/misc4_parser.c +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Micronas SC-4 parser - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include "parser.h" - -typedef struct MISC4Context { - ParseContext pc; -} MISC4Context; - -static int misc4_parse(AVCodecParserContext *s, AVCodecContext *avctx, - const uint8_t **poutbuf, int *poutbuf_size, - const uint8_t *buf, int buf_size) -{ - MISC4Context *ctx = s->priv_data; - uint32_t state = ctx->pc.state; - int next = END_NOT_FOUND, i = 0; - - *poutbuf_size = 0; - *poutbuf = NULL; - - if (s->flags & PARSER_FLAG_COMPLETE_FRAMES) { - next = buf_size; - } else { - uint32_t marker = 0; - - switch (avctx->sample_rate) { - case 8000: - case 11025: - marker = 0x11b; - break; - case 16000: - case 32000: - marker = 0x2b2; - break; - } - - for (; i < buf_size; i++) { - state = (state << 8) | buf[i]; - if (state == marker && i > 3) { - next = i - 3; - break; - } - } - - ctx->pc.state = state; - if (ff_combine_frame(&ctx->pc, next, &buf, &buf_size) < 0) { - *poutbuf = NULL; - *poutbuf_size = 0; - return buf_size; - } - } - - *poutbuf = buf; - *poutbuf_size = buf_size; - - return next; -} - -const AVCodecParser ff_misc4_parser = { - .codec_ids = { AV_CODEC_ID_MISC4 }, - .priv_data_size = sizeof(MISC4Context), - .parser_parse = misc4_parse, - .parser_close = ff_parse_close, -}; diff --git a/spaces/colossalturtle4/andite-pastel-mix/app.py b/spaces/colossalturtle4/andite-pastel-mix/app.py deleted file mode 100644 index f90587fb1b4538cf5a92df4655fed8e4e2cf9579..0000000000000000000000000000000000000000 --- a/spaces/colossalturtle4/andite-pastel-mix/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/andite/pastel-mix").launch() \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/Three-Words-Eight-Letters-Say-It-And-Im-Yours-Book-2-Download-Pdf-BEST.md b/spaces/congsaPfin/Manga-OCR/Three-Words-Eight-Letters-Say-It-And-Im-Yours-Book-2-Download-Pdf-BEST.md deleted file mode 100644 index 58c69ba0baf232ed7fbb7f002592175ab380b6f7..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/Three-Words-Eight-Letters-Say-It-And-Im-Yours-Book-2-Download-Pdf-BEST.md +++ /dev/null @@ -1,84 +0,0 @@ -## Three Words Eight Letters Say It And Im Yours Book 2 Download Pdf - - - - - - - - - -**LINK › [https://urlcod.com/2txiNc](https://urlcod.com/2txiNc)** - - - - - - - - - - - - - -# How to Download PDF of Three Words Eight Letters Say It And Im Yours Book 2 - - - -If you are looking for a romantic and heartwarming story, you might want to check out **Three Words Eight Letters Say It And Im Yours Book 2** by Jade Margarette Pitogo. This is the sequel to the popular Wattpad novel that follows the love story of Chanel and Kean, two young people who face many challenges and obstacles in their relationship. In this book, you will find out what happens after Chanel confesses her feelings to Kean, and how they deal with the consequences of their choices. - - - -But how can you read this book if you don't have a copy? Don't worry, we have a solution for you. In this article, we will show you how to download PDF of **Three Words Eight Letters Say It And Im Yours Book 2** for free. You don't need to pay anything or sign up for anything. Just follow these simple steps and enjoy reading this amazing story. - - - -## Step 1: Go to Scribd.com - - - -Scribd is a website that allows you to access millions of books, audiobooks, magazines, and documents online. You can also upload your own files and share them with others. Scribd has a large collection of Wattpad novels, including **Three Words Eight Letters Say It And Im Yours Book 2**. To go to Scribd, just type [www.scribd.com](https://www.scribd.com/) in your browser or click on this link. - - - -## Step 2: Search for the book - - - -Once you are on Scribd, you can use the search bar at the top of the page to look for the book you want. Just type in **Three Words Eight Letters Say It And Im Yours Book 2** and hit enter. You will see a list of results that match your query. You can also filter the results by category, language, format, and date. - - - -The book you are looking for is uploaded by dquimson and has a cover image of a couple holding hands. You can click on the title or the image to open the book page. - - - -## Step 3: Download the book - - - -On the book page, you will see a preview of the first few pages of the book. You can scroll down to read more or use the arrows at the bottom to navigate. To download the book as a PDF file, you need to click on the download button at the top right corner of the page. A pop-up window will appear asking you to choose a format. Select PDF and click on download again. - - - -The download will start automatically and you will see a progress bar at the bottom of your browser. Depending on your internet speed and the size of the file, it may take a few minutes to complete. Once it is done, you can open the file with any PDF reader or save it to your device for offline reading. - - - -## Conclusion - - - -Congratulations! You have successfully downloaded PDF of **Three Words Eight Letters Say It And Im Yours Book 2**. Now you can enjoy reading this romantic novel anytime and anywhere. If you liked this book, you might also want to check out other Wattpad novels by Jade Margarette Pitogo or other authors on Scribd. You can also share this article with your friends who are looking for this book. - - - -We hope this article was helpful and informative. If you have any questions or feedback, please leave a comment below. Thank you for reading! - - 1b8d091108 - - - - - diff --git a/spaces/congsaPfin/Manga-OCR/logs/Download Pyramid Solitaire Saga and Join Helena and Kingsley on a Mysterious Adventure.md b/spaces/congsaPfin/Manga-OCR/logs/Download Pyramid Solitaire Saga and Join Helena and Kingsley on a Mysterious Adventure.md deleted file mode 100644 index f4fe745cb60e5608233ce41f0e0caedde24b491a..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Download Pyramid Solitaire Saga and Join Helena and Kingsley on a Mysterious Adventure.md +++ /dev/null @@ -1,110 +0,0 @@ - -

Free Download Pyramid Solitaire Saga

-

If you are looking for a fun and exciting game that combines Solitaire and puzzle elements, then you should try Pyramid Solitaire Saga. This game is developed by King, the makers of Candy Crush Saga and Farm Heroes Saga, and it offers a unique and immersive experience that will keep you entertained for hours. In this article, we will tell you what Pyramid Solitaire Saga is, how to play it, how to download it for free, and why you should play it.

-

What is Pyramid Solitaire Saga?

-

Pyramid Solitaire Saga is a game that mixes the classic card game of Solitaire with a twist of strategy and mystery. You will join Helena, a treasure hunter, and her loyal friend Kingsley, a gerbil, on their quest to uncover the secrets of the ancient world. You will travel to different locations, such as The Hidden Tomb, The Emerald Dream, and beyond, and solve puzzles by matching cards that are one higher or lower than the one on your deck. You will also encounter special cards, such as the Scarab Card, the Mummy Card, and the Joker Card, that will help or hinder your progress. Along the way, you will discover clues, collect treasures, cast spells, and unlock new episodes and levels.

-

free download pyramid solitaire saga


Download File ———>>> https://urlca.com/2uO5KE



-

A Solitaire strategy puzzle game

-

Pyramid Solitaire Saga is not your typical Solitaire game. It requires more than just luck and skill; it also requires strategy and planning. You will have to think ahead and use your moves wisely, as you have a limited number of cards in your deck. You will also have to deal with obstacles, such as locked cards, frozen cards, cursed cards, and more. You will have to use your magic boosters, such as the Shuffle Booster, the Extra Joker Booster, the Undo Booster, and more, to overcome these challenges. You will also have to complete different objectives in each level, such as clearing all the cards, reaching a certain score, finding all the Scarabs, and more.

-

A mysterious adventure with Helena and Kingsley

-

Pyramid Solitaire Saga is not just a game; it is also a story. You will follow the adventures of Helena and Kingsley as they explore the wonders of the ancient world. You will learn about their personalities, their motivations, their relationships, and their secrets. You will also meet other characters along the way, such as Professor Crabtree, Cleopatra's Ghost, The Pharaoh's Cat, and more. You will enjoy the humor, the drama, the romance, and the mystery that unfold in each episode.

-

A magical journey through ancient worlds

-

Pyramid Solitaire Saga is not just a story; it is also a spectacle. You will be amazed by the stunning graphics and animations that bring the ancient worlds to life. You will see pyramids, temples, tombs, oases, deserts, jungles, islands, volcanoes, caves, and more. You will hear the sounds of nature, music, voices, and effects that create a realistic and immersive atmosphere. You will feel the magic and mystery that surround each location.

-

How to play Pyramid Solitaire Saga?

-

Pyramid Solitaire Saga is easy to play but hard to master. Here are some tips on how to play it:

-

The basics of the game

-

The goal of each level is to clear all the cards from the board by matching them with the card on your deck. You can match a card that is one higher or lower than the one on your deck. For example, you can match a 2 with a 3 or an A, or a Q with a J or a K. You can also match a Joker with any card. You can tap on the deck to draw a new card if you run out of matches. You can also tap on the discard pile to see the last card you played.

-

The special cards and boosters

-

Some cards have special effects that can help or hinder your progress. Here are some of them:

-
    -
  • The Scarab Card: This card has a scarab beetle on it. If you match it, you will collect it and earn extra points. You can also find hidden Scarabs under some cards. Some levels require you to find all the Scarabs to complete them.
  • -
  • The Mummy Card: This card has a mummy on it. If you match it, you will remove it from the board, but it will also lock one of your deck cards for a few turns. You will not be able to use that card until it is unlocked.
  • -
  • The Key Card: This card has a key on it. If you match it, you will unlock one of the locked cards on the board. Some cards are locked by chains or ice and you need to match them with a Key Card to free them.
  • -
  • The Shuffle Booster: This booster has a shuffle icon on it. You can use it to shuffle all the cards on the board. This can help you find new matches or get rid of unwanted cards.
  • -
  • The Extra Joker Booster: This booster has a joker icon on it. You can use it to add an extra Joker Card to your deck. This can help you match any card on the board.
  • -
  • The Undo Booster: This booster has an undo icon on it. You can use it to undo your last move. This can help you correct a mistake or change your strategy.
  • -
-

There are more special cards and boosters that you will discover as you play the game. You can also buy more boosters with gold bars, which are the premium currency of the game.

-

The episodes and levels

-

Pyramid Solitaire Saga has hundreds of episodes and levels that you can play and enjoy. Each episode has a different theme and setting, such as Egypt, Greece, China, Atlantis, and more. Each level has a different layout and objective, such as clearing all the cards, reaching a certain score, finding all the Scarabs, and more. You will also face different challenges and surprises in each level, such as sandstorms, fireballs, traps, and more.

-

NBA 2K19 APK free download for Android
-How to install NBA 2K19 APK and OBB files on your device
-NBA 2K19 APK mod with unlimited VC and money
-NBA 2K19 APK latest version 52.0.1 update
-NBA 2K19 APK offline mode without internet connection
-NBA 2K19 APK gameplay and features review
-NBA 2K19 APK compatible devices and requirements
-NBA 2K19 APK download link from Google Drive
-NBA 2K19 APK download from APKCombo website
-NBA 2K19 APK download from Archive.org website
-NBA 2K19 APK best settings and tips for optimal performance
-NBA 2K19 APK cheats and hacks for Android
-NBA 2K19 APK vs NBA 2K20 APK comparison
-NBA 2K19 APK problems and solutions
-NBA 2K19 APK ratings and feedback from users
-NBA 2K19 APK alternatives and similar games for Android
-NBA 2K19 APK support and contact information
-NBA 2K19 APK legal and ethical issues
-NBA 2K19 APK awards and achievements
-NBA 2K19 APK fun facts and trivia

-

To play an episode, you need to have enough lives. You have five lives at the start of the game, and you lose one life every time you fail a level. You can regain lives by waiting for some time, asking your friends for help, or buying them with gold bars. To unlock a new episode, you need to have enough stars. You earn stars by completing levels with high scores. You can also unlock episodes by asking your friends for help or paying with gold bars.

-

How to download Pyramid Solitaire Saga for free?

-

Pyramid Solitaire Saga is a free game that you can download and play on your mobile device or your computer. Here are some ways to download it:

-

Download from Google Play Store

-

If you have an Android device, such as a smartphone or a tablet, you can download Pyramid Solitaire Saga from the Google Play Store. Here are the steps:

-
    -
  1. Open the Google Play Store app on your device.
  2. -
  3. Search for Pyramid Solitaire Saga in the search bar.
  4. -
  5. Tap on the game icon and then tap on Install.
  6. -
  7. Wait for the game to download and install on your device.
  8. -
  9. Tap on Open to launch the game and start playing.
  10. -
-

Download from Apple App Store

-

If you have an iOS device, such as an iPhone or an iPad, you can download Pyramid Solitaire Saga from the Apple App Store. Here are the steps:

-
    -
  1. Open the App Store app on your device.
  2. -
  3. Search for Pyramid Solitaire Saga in the search bar.
  4. -
  5. Tap on the game icon and then tap on Get.
  6. -
  7. Wait for the game to download and install on your device.
  8. -
  9. Tap on Open to launch the game and start playing.
  10. -
-

Download from King.com

-

If you have a computer with an internet connection, you can download Pyramid Solitaire Saga from King.com, which is the official website of King, the developer of the game. Here are the steps:

    -
  1. Open your web browser and go to https://king.com/game/pyramidsolitairesaga.
  2. -
  3. Click on the Play Now button and then click on Download.
  4. -
  5. Wait for the game to download and install on your computer.
  6. -
  7. Click on the game icon to launch the game and start playing.
  8. -
-

Why play Pyramid Solitaire Saga?

-

Pyramid Solitaire Saga is more than just a game; it is a source of fun, entertainment, and relaxation. Here are some reasons why you should play it:

-

The benefits of playing Solitaire games

-

Solitaire games are not only enjoyable, but also beneficial for your brain and mental health. Playing Solitaire games can help you improve your memory, concentration, logic, problem-solving, and decision-making skills. It can also help you reduce stress, anxiety, boredom, and loneliness. It can also boost your mood, confidence, and self-esteem. Playing Solitaire games can also keep your mind sharp and prevent cognitive decline as you age.

-

The features and graphics of Pyramid Solitaire Saga

-

Pyramid Solitaire Saga is not just any Solitaire game; it is a Solitaire game with amazing features and graphics. You will love the colorful and detailed design of the cards, the board, the background, and the characters. You will also appreciate the smooth and responsive gameplay, the easy and intuitive controls, the clear and helpful instructions, and the friendly and supportive feedback. You will also enjoy the variety and diversity of the game modes, the levels, the objectives, the challenges, and the rewards. You will also be impressed by the original and captivating story, the music, the sound effects, and the voice-overs.

-

The challenges and rewards of Pyramid Solitaire Saga

-

Pyramid Solitaire Saga is not just a Solitaire game; it is a Solitaire game with challenges and rewards. You will never get bored or frustrated with this game, as it offers a balanced and fair difficulty level that adapts to your skill and progress. You will also have fun and feel motivated by the different goals and achievements that you can pursue and accomplish in this game. You can also compete and cooperate with other players around the world through the leaderboards, the events, the tournaments, and the social features. You can also earn and collect various items and rewards in this game, such as gold bars, magic points, stars, boosters, cards, treasures, clues, spells, stickers, badges, trophies, and more.

-

Conclusion

-

Pyramid Solitaire Saga is a game that you should not miss if you are a fan of Solitaire games or puzzle games. It is a game that will challenge your mind, entertain your senses, and transport you to a magical world of mystery and adventure. It is a game that you can download for free on your mobile device or your computer and play anytime and anywhere you want. It is a game that you can enjoy alone or with your friends. It is a game that will make you happy.

-

If you are ready to join Helena and Kingsley on their quest to uncover the secrets of the ancient world, then download Pyramid Solitaire Saga today and start playing!

-

Frequently Asked Questions

-

Here are some common questions that people have about Pyramid Solitaire Saga:

-

Q: How many episodes and levels are there in Pyramid Solitaire Saga?

-

A: As of June 2023, there are 250 episodes and 5,000 levels in Pyramid Solitaire Saga. The developers are constantly adding new episodes and levels to keep the game fresh and exciting.

-

Q: How can I get more lives in Pyramid Solitaire Saga?

-

A: There are several ways to get more lives in Pyramid Solitaire Saga. You can wait for some time until your lives refill automatically (one life every 30 minutes). You can ask your friends for help by sending or receiving lives through Facebook or King.com. You can buy more lives with gold bars (the premium currency of the game). You can also get free lives by watching ads or completing offers.

-

Q: How can I get more gold bars in Pyramid Solitaire Saga?

-

A: There are several ways to get more gold bars in Pyramid Solitaire Saga. You can buy them with real money through in-app purchases. You can earn them by completing achievements or reaching milestones in the game. You can also get free gold bars by watching ads or completing offers.

-

Q: How can I contact the customer support of Pyramid Solitaire Saga?

-

A: If you have any questions or issues with Pyramid Solitaire Saga, you can contact the customer support team by following these steps:

-
    -
  1. Open the game and tap on the settings icon (the gear symbol) on the top right corner of the screen.
  2. -
  3. Tap on the help icon (the question mark symbol) on the bottom left corner of the screen.
  4. -
  5. Tap on the contact us button on the bottom right corner of the screen.
  6. -
  7. Fill out the form with your name, email, subject, and message.
  8. -
  9. Tap on the send button to submit your query.
  10. -
-

You can also visit the official website of King, https://king.com/, and click on the support link at the bottom of the page. You can then select Pyramid Solitaire Saga from the list of games and browse through the FAQs or contact the support team.

-

Q: How can I play Pyramid Solitaire Saga with my friends?

-

A: Pyramid Solitaire Saga is a social game that you can play with your friends. You can connect your game to Facebook or King.com and invite your friends to join you. You can also see your friends' progress and scores on the map and the leaderboards. You can also send and receive lives, gifts, messages, and tips with your friends. You can also participate in events and tournaments with your friends and compete for prizes and glory.

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/FDE.AI Pro [ROOT] FPS meter The Ultimate AI Optimization Tool for Android.md b/spaces/congsaPfin/Manga-OCR/logs/FDE.AI Pro [ROOT] FPS meter The Ultimate AI Optimization Tool for Android.md deleted file mode 100644 index e891ec62f89bbdd26a2bd079377596864b528c28..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/FDE.AI Pro [ROOT] FPS meter The Ultimate AI Optimization Tool for Android.md +++ /dev/null @@ -1,105 +0,0 @@ - -
- Benefits: List some of the benefits of using FDE.AI, such as performance, battery, and compatibility | | H2: How to download FDE.AI apk | - Requirements: Mention that you need ROOT access and a compatible device
- Sources: Provide some reliable sources to download the apk file, such as GitHub and APKCombo
- Installation: Explain how to install the apk file as a Magisk module or a standalone app | | H2: How to use FDE.AI app | - Features: Describe some of the features of the app, such as AI mode, FPS meter, floating window, and premium functions
- Settings: Explain how to customize the app settings, such as AI cycle, thermal fuse, and overlay windows
- Tips: Give some tips on how to get the best results from the app, such as disabling power-saving mode and OS logs | | H2: Conclusion | - Summary: Summarize the main points of the article
- Call to action: Encourage the reader to try FDE.AI and share their feedback | Table 2: Article with HTML formatting

What is FDE.AI and why you should download it

-

If you are looking for a way to optimize your Android device and make it run faster, smoother, and longer, you might want to check out FDE.AI. FDE.AI stands for FeraDroid Engine Artificial Intelligence, and it is an all-in-one ultimate optimizer for all devices running Android OS.

-

download fde ai apk


Download › https://urlca.com/2uObNR



-

FDE.AI automatically modifies a wide range of low-level system parameters using ROOT access. It adjusts the settings based on your device's hardware and software characteristics, so that every device is configured individually. FDE.AI is compatible with a very wide range of devices and Android OS versions, from Android 4.4 KitKat to Android 13.

-

Some of the benefits of using FDE.AI are:

-
    -
  • It improves your device's performance by optimizing CPU, GPU, RAM, network, and battery usage.
  • -
  • It reduces your device's power consumption by controlling thermal throttling, screen refresh rate, and background processes.
  • -
  • It enhances your device's compatibility by fixing bugs, errors, and crashes caused by incompatible apps or ROMs.
  • -
  • It offers you a variety of features and settings to customize your device according to your preferences.
  • -
-

How to download FDE.AI apk

-

If you are interested in trying FDE.AI, you will need to download the apk file and install it on your device. However, before you do that, you should be aware of some requirements:

-
    -
  • You need ROOT access on your device. This means that you have full control over your device's system and can modify it as you wish. If you don't have ROOT access, you can use tools like Magisk or SuperSU to get it.
  • -
  • You need a compatible device. FDE.AI works on most devices running Android OS, but some devices may not support some features or may have issues with stability. You can check the compatibility list on GitHub or ask other users on Telegram or XDA forums.
  • -
-

Once you have met these requirements, you can proceed to download the apk file from one of these sources:

-
    -
  • GitHub: This is the official source of FDE.AI. You can find the latest releases, changelogs, documentation, and support here.
  • -
  • APKCombo: This is an alternative source of FDE.AI. You can find older versions, virus scan reports, and ratings here.
  • -
-

After you have downloaded the apk file, you can install it in two ways:

-
    -
  • As a Magisk module: This is the recommended way of installing FDE.AI. You just need to rename the apk file to .zip and flash it through Magisk Manager. This way, you can easily enable/disable/uninstall FDE.AI without affecting your system partition.
  • -
  • As a standalone app: This is another way of installing FDE.AI. You just need to enable unknown sources in your settings and install the apk file as a normal app. This way, you can use FDE.AI without Magisk, but you may have some issues with system updates or app compatibility.
  • -
-

How to use FDE.AI app

-

Once you have installed FDE.AI, you can launch the app and start optimizing your device. FDE.AI has a simple and intuitive user interface that lets you access its features and settings easily. Some of the features of FDE.AI are:

-

How to download fde ai apk for android
-Download fde ai pro apk with fps meter
-Fde ai apk latest version free download
-Fde ai apk github releases by feravolt
-Download fde ai apk from apkcombo.com
-Fde ai apk root optimizer for android
-Download fde ai apk for android 13
-Fde ai pro apk features and benefits
-Fde ai apk changelog and update history
-Download fde ai apk for magisk module
-Fde ai apk eula and faq
-Download fde ai apk for kernel optimization
-Fde ai pro apk with advanced fps meter overlay
-Fde ai apk reviews and ratings
-Download fde ai apk for battery saving mode
-Fde ai apk compatible devices and os versions
-Download fde ai apk for per-app screen refresh rate support
-Fde ai pro apk premium functions description
-Fde ai apk virus total report and security
-Download fde ai apk for disable os logs and statistics option
-Fde ai apk core info and parameters
-Download fde ai apk for bypass charging statusbar quick tile
-Fde ai pro apk chinese simplified language support
-Fde ai apk systemless modifications and installation
-Download fde ai apk for customizable thermal fuse for throttling
-Fde ai apk predictive back gesture support
-Download fde ai apk for floating window with top-5 cpu using processes
-Fde ai pro apk paypal and qiwi links for donations
-Fde ai apk accessibility for disabled people
-Download fde ai apk for gpu load in floating window
-Fde ai pro [root] + fps meter app download link
-Download fde.ai-v23.04.3 latest version from github.com/feravolt/FDE.AI-docs/releases/
-Fde.ai-v22.11.6 new features and improvements download link
-Download fde.ai-v22.12.2 bug fixes and performance enhancements link
-Fde.ai-v23.01.2 automatic os power-save mode download link
-Download fde.ai-v22.11.5 net energy gain when carrying out a nuclear fusion experiment link

-
    -
  • AI mode: This is the main feature of FDE.AI. It automatically adjusts the system parameters based on your device's current state and usage. You can choose between three AI modes: Balanced, Performance, and Battery. You can also enable/disable AI mode manually or set a schedule for it.
  • -
  • FPS meter: This is a feature that shows you the current frame rate of your device's screen. You can use it to monitor the performance of your device and the impact of FDE.AI. You can customize the FPS meter's position, size, color, and transparency.
  • -
  • Floating window: This is a feature that allows you to access FDE.AI from any app or screen. You can use it to quickly switch between AI modes, enable/disable FPS meter, or open the app settings. You can customize the floating window's position, size, icon, and transparency.
  • -
  • Premium functions: These are some advanced features that require a donation to unlock. They include CPU/GPU/RAM overclocking, screen resolution changer, sound enhancer, and more. You can donate any amount you want to support the developer and get access to these functions.
  • -
-

In addition to these features, FDE.AI also has a variety of settings that you can customize according to your preferences. Some of the settings are:

-
    -
  • AI cycle: This is the interval at which FDE.AI applies the system parameters. You can choose between 5, 10, 15, 30, or 60 seconds. The shorter the cycle, the faster the optimization, but also the higher the battery consumption.
  • -
  • Thermal fuse: This is a safety feature that prevents your device from overheating. It monitors your device's temperature and disables AI mode if it exceeds a certain threshold. You can choose between 40°C, 45°C, 50°C, 55°C, or 60°C.
  • -
  • Overlay windows: This is a setting that allows you to enable/disable the FPS meter and the floating window. You can also choose which apps to exclude from showing these windows.
  • -
-

Finally, here are some tips on how to get the best results from FDE.AI:

-
    -
  • Disable power-saving mode and OS logs on your device. These features may interfere with FDE.AI's optimization and cause performance issues.
  • -
  • Reboot your device after installing or updating FDE.AI. This will ensure that FDE.AI is properly initialized and applied.
  • -
  • Give feedback to the developer and report any bugs or issues you encounter. This will help improve FDE.AI and make it more compatible and stable.
  • -
-

Conclusion

-

FDE.AI is an amazing app that can optimize your Android device and make it run faster, smoother, and longer. It uses artificial intelligence to adjust the system parameters based on your device's characteristics and usage. It is compatible with most devices and Android OS versions, and it offers you a lot of features and settings to customize your device according to your preferences.

-

If you want to try FDE.AI, you can download the apk file from GitHub or APKCombo and install it as a Magisk module or a standalone app. You will need ROOT access and a compatible device to use FDE.AI. You can also donate any amount you want to unlock some premium functions and support the developer.

-

So what are you waiting for? Download FDE.AI today and enjoy a better Android experience!

-

FAQs

-
    -
  1. What is FDE.AI?
  2. -

    FDE.AI stands for FeraDroid Engine Artificial Intelligence, and it is an all-in-one ultimate optimizer for all devices running Android OS.

    -
  3. How does FDE.AI work?
  4. -

    FDE.AI automatically modifies a wide range of low-level system parameters using ROOT access. It adjusts the settings based on your device's hardware and software characteristics, so that every device is configured individually.

    -
  5. What are the benefits of using FDE.AI?
  6. -

    FDE.AI improves your device's performance by optimizing CPU, GPU, RAM, network, and battery usage. It reduces your device's power consumption by controlling thermal throttling, screen refresh rate, and background processes. It enhances your device's compatibility by fixing bugs, errors, and crashes caused by incompatible apps or ROMs. It offers you a variety of features and settings to customize your device according to your preferences

    -
  7. How to download and install FDE.AI?
  8. -

    You can download the apk file from GitHub or APKCombo and install it as a Magisk module or a standalone app. You will need ROOT access and a compatible device to use FDE.AI.

    -
  9. How to use FDE.AI app?
  10. -

    You can use FDE.AI app to access its features and settings. You can choose between three AI modes: Balanced, Performance, and Battery. You can also enable/disable FPS meter, floating window, and premium functions. You can customize the app settings, such as AI cycle, thermal fuse, and overlay windows.

    -
  11. How to get the best results from FDE.AI?
  12. -

    You can get the best results from FDE.AI by disabling power-saving mode and OS logs on your device, rebooting your device after installing or updating FDE.AI, and giving feedback to the developer and reporting any bugs or issues you encounter.

    -

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Taxi Sim 2019 The Ultimate Taxi Game for Windows 10.md b/spaces/congsaPfin/Manga-OCR/logs/Taxi Sim 2019 The Ultimate Taxi Game for Windows 10.md deleted file mode 100644 index 80b952a9e054ebf5fe1fd6c49945a8bb0a4ee83d..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Taxi Sim 2019 The Ultimate Taxi Game for Windows 10.md +++ /dev/null @@ -1,138 +0,0 @@ - -

Taxi Game Download for PC Windows 10: How to Play the Best Taxi Simulator Games on Your Computer

-

Introduction

-

Do you love driving around the city, picking up passengers, and earning money as a taxi driver? If so, you might enjoy playing taxi simulator games on your PC windows 10. Taxi simulator games are a genre of simulation games that let you experience the life of a taxi driver in a realistic and immersive way. You can choose from different types of vehicles, explore various cities, complete different missions, and interact with different customers. Taxi simulator games are fun because they offer you a lot of freedom, challenge, and excitement.

-

taxi game download for pc windows 10


Download ✓ https://urlca.com/2uO7yp



-

But why play taxi games on PC windows 10 instead of your mobile device? Well, there are many benefits of playing taxi games on PC windows 10, such as:

-
    -
  • Better graphics and sound quality
  • -
  • Larger screen and more comfortable controls
  • -
  • More storage space and faster performance
  • -
  • More options and features
  • -
  • More compatibility and accessibility
  • -
-

In this article, we will show you how to download and install some of the best taxi games on PC windows 10, such as Taxi Game by baklabs, Crazy Taxi Classic by SEGA, and Taxi Sim 2019: Free Taxi Game by Microsoft Store. We will also give you some features and reviews of each game, as well as some tips on how to play them. So, buckle up and get ready for a ride!

-

How to download and install taxi games on PC windows 10

-

Taxi Game by baklabs

-

Taxi Game by baklabs is one of the most popular taxi simulator games on Android, with over 50 million downloads. It is a realistic and open-world driving game that lets you drive a taxi cab in one of three 3D cities: New York, Miami, or Los Angeles. You can pick up passengers, follow the GPS directions, avoid traffic jams, obey traffic rules, and earn money. You can also upgrade your taxi, hire drivers, buy new cars, and customize your vehicle.

-

Features and reviews

-

Some of the features of Taxi Game by baklabs are:

-
    -
  • Realistic controls (tilt steering, buttons or virtual steering wheel)
  • -
  • 2020 updated engine sounds
  • -
  • Realistic vehicle features (you car will get dirty or require repairs)
  • -
  • Visual tuning options
  • -
  • Spectacular environments and weather
  • -
  • Realistic city traffic (cars, vans, trucks, motorcycles, bicycles)
  • -
  • Diverse and realistic pedestrian traffic
  • -
  • Career, Free Roam and Multiplayer modes
  • -
  • New cars and challenges added weekly
  • -
-

Taxi Game by baklabs has a rating of 4.1 out of 5 stars on Google Play Store, based on over 600 thousand reviews. Most users praise the game for its graphics, gameplay, variety, and realism. Some users complain about the ads, glitches, controls, and difficulty.

-

* Taxi Game emulator for pc windows 10
-* Crazy Taxi Classic arcade game for pc windows 10
-* Taxi Sim 2019 free taxi game for pc windows 10
-* Taxi Game simulation game for pc windows 10
-* Crazy Taxi Classic download and play on pc windows 10
-* Taxi Sim 2019 download and install on pc windows 10
-* Taxi Game bluestacks app player for pc windows 10
-* Crazy Taxi Classic bluestacks app player for pc windows 10
-* Taxi Sim 2019 microsoft store app for pc windows 10
-* Taxi Game realistic driving game for pc windows 10
-* Crazy Taxi Classic original music game for pc windows 10
-* Taxi Sim 2019 city traffic game for pc windows 10
-* Taxi Game pick up passengers game for pc windows 10
-* Crazy Taxi Classic mini games for pc windows 10
-* Taxi Sim 2019 race your car game for pc windows 10
-* Taxi Game google play store game for pc windows 10
-* Crazy Taxi Classic net energy gain game for pc windows 10
-* Taxi Sim 2019 violence rating game for pc windows 10
-* Taxi Game taxi driver role play game for pc windows 10
-* Crazy Taxi Classic craziest cabbies game for pc windows 10
-* Taxi Sim 2019 safe driving game for pc windows 10
-* Taxi Game best taxi driving game for pc windows 10
-* Crazy Taxi Classic classic arcade game for pc windows 10
-* Taxi Sim 2019 modern taxi game for pc windows 10
-* Taxi Game easy to play game for pc windows 10
-* Crazy Taxi Classic challenging to master game for pc windows 10
-* Taxi Sim 2019 realistic graphics game for pc windows 10
-* Taxi Game fun and addictive game for pc windows 10
-* Crazy Taxi Classic fast and furious game for pc windows 10
-* Taxi Sim 2019 smooth and simple game for pc windows 10

-

How to download and play with BlueStacks

-

To play Taxi Game by baklabs on your PC windows 10, you will need an emulator software that can run Android apps on your computer. One of the best emulator software is BlueStacks, which is free, easy, and safe to use. Here are the steps to download and play Taxi Game by baklabs with BlueStacks:

-
    -
  1. Download and install BlueStacks on your PC windows 10 from the official website: https://www.bluestacks.com/
  2. -
  3. Launch BlueStacks and sign in with your Google account (or create one if you don't have one)
  4. -
  5. Go to the search bar and type "Taxi Game by baklabs" and hit enter
  6. -
  7. Select the game from the list of results and click on "Install"
  8. -
  9. Wait for the game to download and install on your PC windows 10
  10. -
  11. Click on the game icon on the home screen of BlueStacks to launch the game
  12. -
  13. Enjoy playing Taxi Game by baklabs on your PC windows 10 with a larger screen, better graphics, and more comfortable controls
  14. -
-

Crazy Taxi Classic by SEGA

-

Crazy Taxi Classic by SEGA is a remastered version of the original arcade hit that was released in 1999. It is a fast-paced and action-packed driving game that lets you drive a crazy taxi in one of two 3D cities: Arcade City or Original City. You can pick up customers, take them to their destinations, and earn money. You can also perform stunts, jumps, drifts, and combos to boost your score. You can choose from four different cabs, each with their own driver and personality.

-

Features and reviews

-

Some of the features of Crazy Taxi Classic by SEGA are:

-
    -
  • Original music by The Offspring and Bad Religion
  • -
  • 16 mini-games to test your driving skills
  • -
  • Leaderboards and achievements to compete with other players
  • -
  • Controller compatibility for a more authentic arcade experience
  • -
  • Customizable graphics, controls, and music settings
  • -
-

Crazy Taxi Classic by SEGA has a rating of 4.4 out of 5 stars on Google Play Store, based on over 200 thousand reviews. Most users love the game for its nostalgia, fun, music, and challenge. Some users dislike the game for its ads, controls, graphics, and bugs.

-

How to download and play with BlueStacks

-

To play Crazy Taxi Classic by SEGA on your PC windows 10, you can also use BlueStacks as an emulator software. Here are the steps to download and play Crazy Taxi Classic by SEGA with BlueStacks:

-
    -
  1. Download and install BlueStacks on your PC windows 10 from the official website: https://www.bluestacks.com/
  2. -
  3. Launch BlueStacks and sign in with your Google account (or create one if you don't have one)
  4. -
  5. Go to the search bar and type "Crazy Taxi Classic by SEGA" and hit enter
  6. -
  7. Select the game from the list of results and click on "Install"
  8. -
  9. Wait for the game to download and install on your PC windows 10
  10. -
  11. Click on the game icon on the home screen of BlueStacks to launch the game
  12. -
  13. Enjoy playing Crazy Taxi Classic by SEGA on your PC windows 10 with a larger screen, better graphics, and more comfortable controls
  14. -
-

Taxi Sim 2019: Free Taxi Game by Microsoft Store

-

Taxi Sim 2019: Free Taxi Game by Microsoft Store is a free taxi simulator game that you can download directly from the Microsoft Store on your PC windows 10. It is a realistic and modern driving game that lets you drive a taxi cab in one of four 3D cities: New York, London, Berlin, or Paris. You can pick up passengers, follow the GPS directions, avoid traffic jams, obey traffic rules, and earn money. You can also upgrade your taxi, buy new cars, and customize your vehicle.

-

Features and reviews

-

Some of the features of Taxi Sim 2019: Free Taxi Game by Microsoft Store are:

-
    -
  • Realistic physics and car handling
  • -
  • Detailed environments and weather effects
  • -
  • Different camera angles (including first-person view)
  • -
  • Different types of passengers (including celebrities, criminals, tourists, etc.)
  • -
  • Different types of missions (including time trials, parking challenges, VIP services, etc.)
  • -
  • Different types of taxis (including sedan, SUV, limo, etc.)
  • -
  • Different types of customization options ( including color, paint, wheels, etc.)
  • -
-

Taxi Sim 2019: Free Taxi Game by Microsoft Store has a rating of 4.3 out of 5 stars on Microsoft Store, based on over 300 reviews. Most users like the game for its graphics, gameplay, variety, and realism. Some users dislike the game for its ads, glitches, controls, and difficulty.

-

How to download and install from Microsoft Store

-

To play Taxi Sim 2019: Free Taxi Game by Microsoft Store on your PC windows 10, you don't need any emulator software. You can simply download and install the game from the Microsoft Store on your PC windows 10. Here are the steps to download and install Taxi Sim 2019: Free Taxi Game by Microsoft Store:

-
    -
  1. Go to the Microsoft Store app on your PC windows 10 (or open it from the Start menu)
  2. -
  3. Go to the search bar and type "Taxi Sim 2019: Free Taxi Game" and hit enter
  4. -
  5. Select the game from the list of results and click on "Get"
  6. -
  7. Wait for the game to download and install on your PC windows 10
  8. -
  9. Click on the game icon on the home screen of Microsoft Store to launch the game
  10. -
  11. Enjoy playing Taxi Sim 2019: Free Taxi Game by Microsoft Store on your PC windows 10 with a larger screen, better graphics, and more comfortable controls
  12. -
-

Conclusion

-

In this article, we have shown you how to play some of the best taxi simulator games on your PC windows 10, such as Taxi Game by baklabs, Crazy Taxi Classic by SEGA, and Taxi Sim 2019: Free Taxi Game by Microsoft Store. We have also given you some features and reviews of each game, as well as some tips on how to download and install them. We hope you have found this article helpful and informative.

-

If you are looking for a fun and realistic way to experience the life of a taxi driver, you should definitely try playing taxi games on PC windows 10. You will not only enjoy the graphics, sound, and gameplay, but also learn some driving skills, explore different cities, meet different customers, and earn some money. You can also customize your taxi, upgrade your car, and compete with other players. So, what are you waiting for? Download and play taxi games on PC windows 10 today and have a blast!

-

FAQs

-

Here are some frequently asked questions about taxi games on PC windows 10:

-
    -
  1. Q: Are taxi games on PC windows 10 free?
  2. -
  3. A: Most taxi games on PC windows 10 are free to download and play, but they may contain ads or in-app purchases. You can also find some paid taxi games on PC windows 10 that offer more features and content.
  4. -
  5. Q: Are taxi games on PC windows 10 safe?
  6. -
  7. A: Yes, taxi games on PC windows 10 are safe to play as long as you download them from trusted sources, such as Google Play Store, Microsoft Store, or official websites. You should also use antivirus software and firewall to protect your PC windows 10 from malware and viruses.
  8. -
  9. Q: Are taxi games on PC windows 10 realistic?
  10. -
  11. A: Yes, taxi games on PC windows 10 are realistic in terms of graphics, sound, physics, and car handling. They also simulate real-life scenarios, such as traffic rules, weather conditions, customer behavior, etc. However, they are still games and not simulations, so they may not be completely accurate or realistic.
  12. -
  13. Q: Are taxi games on PC windows 10 educational?
  14. -
  15. A: Yes, taxi games on PC windows 10 are educational in some ways. They can help you improve your driving skills, such as steering, braking, accelerating, parking, etc. They can also help you learn about different cities, landmarks, cultures, languages, etc. They can also help you develop your problem-solving, decision-making, and time-management skills.
  16. -
  17. Q: Are taxi games on PC windows 10 fun?
  18. -
  19. A: Yes, taxi games on PC windows 10 are fun and entertaining. They offer you a lot of freedom, challenge, and excitement. You can drive around the city, pick up customers, earn money, and perform stunts. You can also customize your taxi, upgrade your car, and compete with other players. You can also choose from different modes, vehicles, and cities.
  20. -

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Tekken 3 APK Everything You Need to Know About the Amazing Game for Android.md b/spaces/congsaPfin/Manga-OCR/logs/Tekken 3 APK Everything You Need to Know About the Amazing Game for Android.md deleted file mode 100644 index 4b45a598cda9744c0b51e0308ea0319f3d51657c..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Tekken 3 APK Everything You Need to Know About the Amazing Game for Android.md +++ /dev/null @@ -1,136 +0,0 @@ -
-

Tekken 3 APK Download 2018: How to Play the Classic Arcade Game on Your Android Device

-

Tekken 3 is one of the best arcade games of its time. It was released in 1997 by Namco for the PlayStation console and became a worldwide phenomenon. It features a roster of over 20 characters, each with their own unique fighting style, moves, and story. It also offers various modes, such as Arcade, Versus, Team Battle, Survival, Time Attack, Practice, and more. It has a fast-paced, fluid, and realistic gameplay that will keep you hooked for hours.

-

But what if you want to play Tekken 3 on your Android device? Well, you are in luck. You can still enjoy this classic game on your mobile phone by downloading an APK file and using an emulator app. In this article, we will show you how to do that step by step. We will also give you some tips and tricks on how to master the game and beat your opponents. So, let's get started.

-

tekken 3 apk download 2018


DOWNLOAD 🗸🗸🗸 https://urlca.com/2uOecO



-

How to Download and Install Tekken 3 APK on Your Android Device

-

The first thing you need to do is to download the Tekken 3 APK file from a reliable source. You can use this link to get it. It is a safe and secure file that has been tested by many users. It is also compatible with most Android devices.

-

Once you have downloaded the file, you need to install it on your device. To do that, you need to enable the "Unknown Sources" option in your device settings. This will allow you to install apps from sources other than the Google Play Store. To enable this option, follow these steps:

-
    -
  1. Go to your device settings and tap on "Security".
  2. -
  3. Scroll down and find the "Unknown Sources" option.
  4. -
  5. Toggle it on and confirm your choice.
  6. -
-

Now you can install the Tekken 3 APK file by tapping on it and following the instructions. It should take only a few seconds to complete the installation.

-

How to Play Tekken 3 on Your Android Device Using an Emulator App

-

After installing the Tekken 3 APK file, you need to use an emulator app to run it on your device. An emulator app is a software that mimics the functions of another device or platform. In this case, you need an emulator app that can run PlayStation games on your Android device.

-

There are many emulator apps available for Android devices, but we recommend using ePSXe. It is one of the best PlayStation emulators for Android devices. It has a high compatibility rate, smooth performance, easy configuration, and support for various features such as save states, cheats, controllers, etc.

-

To use ePSXe to play Tekken 3 on your Android device, follow these steps:

-
    -
  1. Download and install ePSXe from the Google Play Store or from this link . It is a paid app, but it is worth the price for the quality and features it offers.
  2. -
  3. Open ePSXe and tap on "Run Game". It will scan your device for any PlayStation games you have installed.
  4. -
  5. Select Tekken 3 from the list and tap on it. It will launch the game and you can start playing.
  6. -
-

You can also customize the settings of ePSXe to suit your preferences. You can change the video, audio, input, and other options by tapping on the menu icon and selecting "Preferences". You can also save and load your game progress by tapping on the menu icon and selecting "Save State" or "Load State".

-

Tekken 3 Gameplay and Features

-

Tekken 3 is a 3D fighting game that pits two fighters against each other in a variety of stages. The objective is to deplete the opponent's health bar by using punches, kicks, throws, and special moves. The game has a simple control scheme that uses four buttons: left punch, right punch, left kick, and right kick. You can also perform combos by pressing different combinations of buttons in a sequence.

-

tekken 3 android game free download apk
-tekken 3 apk full version download for android
-tekken 3 mod apk download unlimited money
-tekken 3 apk obb file download offline
-tekken 3 apk download latest version 2018
-tekken 3 game download for android mobile9 apk
-tekken 3 apk download for pc windows 10
-tekken 3 apk download uptodown
-tekken 3 apk download apkpure
-tekken 3 apk download rexdl
-tekken 3 apk download highly compressed
-tekken 3 apk download with cheats
-tekken 3 apk download old version
-tekken 3 apk download for android 4.4.2
-tekken 3 apk download for android tv
-tekken 3 apk download for ios
-tekken 3 apk download for ppsspp
-tekken 3 apk download no emulator
-tekken 3 apk download original
-tekken 3 apk download play store
-tekken 3 apk download revdl
-tekken 3 apk download hack
-tekken 3 apk download muzhiwan
-tekken 3 apk download mob.org
-tekken 3 apk download android oyun club
-tekken 3 apk data zip file download
-tekken 3 all characters unlocked apk download
-tekken 3 hd graphics mod apk download
-tekken 3 online multiplayer apk download
-tekken 3 new version mod apk download
-tekken 3 panda mod apk download
-tekken 3 plus mod apk download
-tekken 3 pro mod apk download
-tekken 3 real mod apk download
-tekken 3 ultimate mod apk download
-how to install tekken 3 on android phone from apk file
-how to play tekken 3 on android without emulator using apk file
-how to unlock all characters in tekken 3 android game using apk editor pro
-how to update tekken 3 android game to latest version using apk file
-how to fix black screen issue in tekken 3 android game after installing apk file
-how to transfer saved data of tekken 3 android game from one device to another using apk file and obb folder
-how to enable cheats in tekken 3 android game using game guardian app and apk file
-how to change language in tekken 3 android game using es file explorer app and apk file
-how to connect joystick or gamepad with tekken 3 android game using octopus app and apk file
-how to record gameplay of tekken 3 android game using du recorder app and apk file
-how to stream live gameplay of tekken 3 android game using omlet arcade app and apk file

-

The game has a rich and diverse cast of characters, each with their own backstory, personality, and fighting style. Some of the characters are returning from previous Tekken games, such as Jin Kazama, Paul Phoenix, Nina Williams, Yoshimitsu, etc. Some of the characters are new to Tekken 3, such as Ling Xiaoyu, Hwoarang, Eddy Gordo, Bryan Fury, etc. You can unlock more characters by completing certain modes or fulfilling certain conditions.

-

The game also has various modes that offer different challenges and rewards. Some of the modes are:

-
    -
  • Arcade Mode: The main mode of the game where you fight against a series of opponents until you reach the final boss.
  • -
  • Versus Mode: A mode where you can fight against another player or the computer in a single match.
  • -
  • Team Battle Mode: A mode where you can form a team of up to eight characters and fight against another team.
  • -
  • Survival Mode: A mode where you have to survive as long as possible against an endless stream of opponents.
  • -
  • Time Attack Mode: A mode where you have to beat as many opponents as possible in a given time limit.
  • -
  • Practice Mode: A mode where you can practice your moves and combos without any interference.
  • -
  • Tekken Force Mode: A special mode where you have to fight your way through four stages of enemies in a side-scrolling beat 'em up style.
  • -
  • Tekken Ball Mode: A special mode where you have to hit a ball back and forth with your opponent using your moves. The ball can damage your opponent if it hits them with enough force.
  • -
-

The game also has a variety of stages that have different themes, backgrounds, and music. Some of the stages are based on real-world locations, such as Hong Kong, Mexico, India, etc. Some of the stages are based on fantasy or sci-fi settings, such as a laboratory, a temple, a spaceship, etc. Some of the stages also have interactive elements, such as breakable walls, floors, or objects.

-

Tekken 3 Tips and Tricks

-

Tekken 3 is a fun and addictive game that will test your skills and reflexes. However, it can also be challenging and frustrating at times. To help you overcome the difficulties and enjoy the game more, here are some tips and tricks that you can use:

-
    -
  • Learn the basics: Before you jump into the action, make sure you know the basics of the game. Learn how to move, block, attack, throw, and evade. Learn how to use the different buttons and combinations to perform different moves and combos. Learn how to use the directional pad or joystick to control your character's movement and position.
  • -
  • Choose your character wisely: Each character in Tekken 3 has their own strengths and weaknesses. Some characters are fast and agile, some are strong and powerful, some are balanced and versatile. Choose a character that suits your playstyle and preference. Experiment with different characters until you find one that you like.
  • -
  • Know your opponent: Each character in Tekken 3 also has their own moveset and strategy. Some characters rely on speed and combos, some rely on power and throws, some rely on range and projectiles. Know what your opponent can do and how to counter them. Study their patterns and habits and exploit their weaknesses.
  • -
  • Use your environment: The stages in Tekken 3 are not just for show. They can also affect your gameplay and strategy. Use the environment to your advantage. Use the breakable walls, floors, or objects to create openings or deal extra damage. Use the different elevations, angles, or distances to vary your attacks or avoid your opponent's attacks.
  • -
  • Practice and improve: The best way to get better at Tekken 3 is to practice and improve. Play the game regularly and try different modes, characters, and stages. Learn from your mistakes and successes and apply them to your next match. Watch other players and see how they play and what they do. Read guides and tutorials and learn new tips and tricks. The more you play, the more you will learn and improve.
  • -
-

Tekken 3 Alternatives and Recommendations

-

Tekken 3 is a great game that will give you hours of fun and excitement. However, if you want to try something different or expand your horizons, here are some alternatives and recommendations that you can check out:

-
    -
  • Other Tekken games available for Android devices: If you love Tekken 3, you might also love the other Tekken games that are available for Android devices. You can download Tekken, Tekken Card Tournament, or Tekken Arena from the Google Play Store and enjoy more Tekken action on your mobile phone.
  • -
  • Other fighting games similar to Tekken 3: If you love fighting games in general, you might also love the other fighting games that are similar to Tekken 3. You can download Street Fighter IV Champion Edition, Mortal Kombat, Injustice 2, or SoulCalibur from the Google Play Store and enjoy more fighting games on your mobile phone.
  • -
  • Other sources of information and entertainment related to Tekken 3: If you love everything related to Tekken 3, you might also love the other sources of information and entertainment that are related to Tekken 3. You can watch the Tekken movie, read the Tekken comics, listen to the Tekken soundtrack, or visit the Tekken wiki and enjoy more Tekken content on your mobile phone.
  • -
-

Conclusion

-

Tekken 3 is one of the best arcade games of all time. It has a captivating gameplay, a diverse roster of characters, a variety of modes, and a stunning presentation. It is a game that will appeal to both casual and hardcore gamers alike. It is a game that you can play on your Android device by downloading an APK file and using an emulator app.

-

So, what are you waiting for? Download Tekken 3 APK now and enjoy the classic arcade game on your mobile phone. You will not regret it.

-

FAQs

-

Here are some frequently asked questions about Tekken 3 APK download 2018:

-
    -
  1. Is Tekken 3 APK safe to download and install?
  2. -

    Yes, it is safe to download and install. The APK file we provided is from a reliable source and has been tested by many users. It does not contain any viruses, malware, or spyware. However, make sure you download it from our link or another trusted source and not from any random or suspicious website.

    -
  3. Is Tekken 3 APK legal to download and install?
  4. -

    Yes, it is legal to download and install. The APK file we provided is not a pirated or cracked version of the game. It is a legitimate copy of the game that was released by Namco for the PlayStation console. However, make sure you own a physical or digital copy of the game before downloading it.

    -
  5. Is Tekken 3 APK compatible with my Android device?
  6. -

    Yes, it is compatible with most Android devices. The APK file we provided is designed to work with most Android devices that have at least Android 4.0 or higher. However, some devices may have issues with performance, compatibility, or stability depending on their specifications, settings, or software.

    -
  7. How can I fix any problems or errors with Tekken 3 APK?
  8. -

    If you encounter any problems or errors with Tekken 3 APK, such as installation failure, black screen, sound issues, etc., you can try the following solutions:

    -
      -
    • Make sure you have enough storage space on your device before downloading and installing the APK file.
    • -
    • Make sure you have enabled the "Unknown Sources" option in your device settings before installing the APK file.
    • -
    • Make sure you have downloaded the correct APK file from our link or another trusted source and not from any random or suspicious website.
    • -
    • Make sure you have updated your device software to the latest version before installing the APK file.
    • -
    • Make sure you have closed any other apps or processes that may interfere with the APK file or the emulator app.
    • -
    • Make sure you have configured the emulator app settings to suit your device and the game.
    • -
    • Make sure you have restarted your device or the emulator app if you encounter any glitches or crashes.
    • -
    -
  9. Where can I get more help or support for Tekken 3 APK?
  10. -

    If you need more help or support for Tekken 3 APK, you can visit the following websites or forums:

    -
      -
    • The official website of ePSXe, where you can find FAQs, guides, tutorials, and contact information for the emulator app.
    • -
    • The official website of Namco, where you can find information, news, and updates about Tekken 3 and other Tekken games.
    • -
    • The Tekken subreddit, where you can find discussions, tips, videos, fan art, and more about Tekken 3 and other Tekken games.
    • -
    • The Tekken Zaibatsu website, where you can find forums, guides, moves lists, combos, and more about Tekken 3 and other Tekken games.
    • -
    -
-

I hope this article has helped you to download and play Tekken 3 on your Android device. If you have any questions or feedback, please leave a comment below. Thank you for reading and have fun playing Tekken 3.

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/contluForse/HuggingGPT/assets/Ah Se Eu Soubesse... Como baixar o pdf do livro que mudou a vida de muitos.md b/spaces/contluForse/HuggingGPT/assets/Ah Se Eu Soubesse... Como baixar o pdf do livro que mudou a vida de muitos.md deleted file mode 100644 index 9b9fdba62e0df05313388af627779d9a8fb93066..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/Ah Se Eu Soubesse... Como baixar o pdf do livro que mudou a vida de muitos.md +++ /dev/null @@ -1,6 +0,0 @@ -

ahseeusoubesselivropdfdownload


Download File ✔✔✔ https://ssurll.com/2uzxXV



-
- aaccfb2cb3
-
-
-

diff --git a/spaces/contluForse/HuggingGPT/assets/Ajay Kannada Full Movie Download Puneeth Rajkumars Blockbuster Hit of 2006.md b/spaces/contluForse/HuggingGPT/assets/Ajay Kannada Full Movie Download Puneeth Rajkumars Blockbuster Hit of 2006.md deleted file mode 100644 index 741c73b86a16463435c5e7a3f11b64af8fa1e1f1..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/Ajay Kannada Full Movie Download Puneeth Rajkumars Blockbuster Hit of 2006.md +++ /dev/null @@ -1,28 +0,0 @@ -
-

download Ajay Kannada Movie unlimited Movies and videos Download Here.Ajay Kannada Movie Hd,3gp. mp4 320p and More Videos You Can Download Easyly. tamilrockers and movierulz, tamilgun, filmywap, and pagalworld videos and Movies download.

-

Here we have given more information about Drishyam 2 , like Drishyam 2 release date, Drishyam 2 full movie watch online, Drishyam 2 ott platform, Drishyam 2 cast and other information is given below.

-

ajay kannada full movie download


Download File … https://ssurll.com/2uzxYo



-

Filmyzilla is a popular illegal movie download website. Filmyzilla has linked Drishyam 2Movie Download link. You can download old movies to the latest movies on Filmyzilla. Here you can download Hollywood, Bollywood, South Indian, and Marathi movies in HD, Full HD D, and 4K. Filmyzilla promises to provide you with all new and old movies from all types of movies like Horror, Thriller, Action, Adult, Romantic, and Kids for free.

-

Drishyam 2 Movie Download Filmyhit: On Filmyhit, you will get Bollywood, Hollywood, South Indian, and Marathi movies to download for free. Drishyam 2 Movie has been linked on Filmyhit in hd, full hd, and 4k resolution.

-

On this website, you will find adult, action Romantic, Horror Hindi, Marathi, Telugu, Tamil, Malayalam, and Kannada movies will be available to download. Filmymeet is banned in India by the Government of India for piracy of films, so you will not be able to visit this website. You should avoid visiting such websites.

-

You can download Hindi, Marathi, Telugu, Tamil, Kannada, and Malayalam movies on Tamilrockers. From here, you can download Drishyam 2 Movie. If you like to watch web series, you can download the web series of all ott platforms from Tamilrockers. Tell them you cannot visit the Tamilrockers website because the Government of India has banned Tamilrockers.

-

You must have heard the name Hotstar. Hotstar is a legal ott platform that allows you to watch movies and web series. You can watch Drishyam 2 full movie online if Drishyam 2 full movie is released on Hotstar. You have to subscribe to their monthly plan, and you can watch Drishyam 2 full movie online. There are many good web series and movies on Hotstar that you should watch once. You will enjoy it.

-

-

Drishyam 2Movie Download Mx Player: On Mx Player, you can download Drishyam 2 Movie for free without any monthly subscription. If you want, you can watch Drishyam 2 Movie online on MX player for free if the movie is released here, then you If you like to watch webseries, then here is a good web series that you must watch once. You will enjoy it.

-

iBomma is a popular illegal movie download website. Drishyam 2 Movie Download link has been linked by iBomma. You can download old movies to the latest movies on iBomma. Here you can download Hollywood, Bollywood, South Indian, and Marathi movies in HD, Full HD, and 4K.

-

Drishyam 2 Movie Download Moviesflix: On Moviesflix, you will get Bollywood, Hollywood, South Indian, and Marathi movies to download for free. Drishyam 2 Movie has been linked on Moviesflix in hd, full hd, and 4k resolution.

-

Filmywap is a popular illegal movie download website. Filmywap has linked Drishyam 2 Movie Download link. You can download old movies to the latest movies on Filmywap. Here you can download Hollywood, Bollywood, South Indian, and Marathi movies in HD, Full HD, and 4K.

-

On this website, you will find adult, action Romantic, Horror Hindi, Marathi, Telugu, Tamil, Malayalam, and Kannada movies will be available to download. 9xMovies is banned in India by the Government of India for the piracy of films, so you will not be able to visit this website. You should avoid visiting such websites.

-

Drishyam 2 Movie Download Khatrimaza: On Khatrimaza, you will get Bollywood, Hollywood, South Indian, and Marathi movies to download for free. Drishyam 2 Movie has been linked on Khatrimaza in hd, full hd, and 4k resolution.

-

Skymovieshd is a popular illegal movie download website. Skymovieshd has linked Drishyam 2 Movie Download link. You can download old movies to the latest movies on Skymovieshd. Here you can download Hollywood, Bollywood, South Indian, and Marathi movies in HD, Full HD, and 4K.

-

On this website, you will find adult, action Romantic, Horror Hindi, Marathi, Telugu, Tamil, Malayalam, and Kannada movies will be available to download. Coolmoviez is banned in India by the Government of India for the piracy of films, so you will not be able to visit this website. You should avoid visiting such websites.

-

This website is illegal, and you should not download any movie from here because of the piracy of movies. Doing it is a crime. And by downloading the movie from such a website, the virus can come into your device, and your personal information can be stolen.

-

Drishyam 2 Movie Download Tamilyogi: On Tamilyogi, you will get Bollywood, Hollywood, South Indian, and Marathi movies to download for free. Drishyam 2 Movie has been linked on Tamilyogi in hd, full hd, and 4k resolution.

-

On this website, you will find adult, action Romantic, Horror Hindi, Marathi, Telugu, Tamil, Malayalam, and Kannada movies will be available to download. Kuttymovies is banned in India by the Government of India for the piracy of films, so you will not be able to visit this website. You should avoid visiting such websites.

-

Pagalworld is a popular illegal movie download website. Pagalworld has linked Drishyam 2 Movie Download link. You can download old movies to the latest movies on Pagalworld. Here you can download Hollywood, Bollywood, South Indian, and Marathi movies in HD, Full HD, and 4K.

-

Drishyam 2 Movie Download Pagalmovies: On Pagalmovies, you will get Bollywood, Hollywood, South Indian, and Marathi movies to download for free. Drishyam 2 Movie has been linked on Pagalmovies in hd, full hd, and 4k resolution. If you think of doing Drishyam 2 Full Movie Download from Pagalmovies, you can get into trouble.

-

Pagalmovies is a torrent and illegal website. On this website, without the permission of the film producer, it is made available to download for free. Due to this, there is a big loss to the filmmakers. You are committing a crime by downloading the film from a website like this. Anyway, this kind of website is not secure.

-

Filmyzilla is one of the kings of movie piracy websites. On their website, Filmyzilla leaked a replacement Love You Rachchu movie freed from charge. Love You Rachchu HD is out there for download on the Filmyzilla website. After several court warnings, Filmyzilla once more began film piracy. Most film owners complain against Filmyzilla, on the other hand, they leak new films freely on their website. Filmyzilla still updates their website thanks to web-blocking, so they use proxy and still change their website.

-

According to the piracy law in India, a private is taken to the court and if he/she is proven that he/she has knowingly infringed or helped somebody else infringe and download a copyrighted movie from piracy websites, then it might be considered to be a criminal act. Under the law, the punishment for an individual being convicted for his or her first such offence may be a jail term between six months and three years, with a fine anywhere between Rs.50,000 and Rs.200,000 (depending on the seriousness of the offence). We advise our users to avoid such illegal downloads of flicks.

aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/contluForse/HuggingGPT/assets/Binding of Isaac Mods Achievements Tips and Tricks for Modding the Game without Losing Progress.md b/spaces/contluForse/HuggingGPT/assets/Binding of Isaac Mods Achievements Tips and Tricks for Modding the Game without Losing Progress.md deleted file mode 100644 index 8b6d1767d872dc09590a9442ef46c8a59f34f324..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/Binding of Isaac Mods Achievements Tips and Tricks for Modding the Game without Losing Progress.md +++ /dev/null @@ -1,8 +0,0 @@ -
-

In Afterbirth+ and Repentance, having any enabled mod will disable the ability to unlock any achievements to encourage new players to play the game without any. To enable unlocking achievements with mods enabled, you need to defeat Mom at least once with all of your mods disabled. If your mods disappear after installing them (only on cracked versions of AB+), it is greatly recommended to defeat Mom's Heart once.

-

The Binding of Isaac's modding community is a strong one, and already Repentance-specific mods are coming in fast. While some of them aim to 'fix' some of the balance decisions that players feel were detrimental, many mods add new content and great quality of life changes. Players need to note that the game encourages new players to play without mods first for a 'vanilla' experience. It does this by disabling achievements until the player has defeated 'Mom' (the first final boss) without mods. After that milestone, players are free to mod to their hearts' delights. With that said, let's look at some of the better mods to consider downloading.

-

binding of isaac mods achievements


DOWNLOAD ➡ https://ssurll.com/2uzvRJ



-

Thanks to the Steam Workshop (one of the many handy Steam features), it's easy to install and use mods with your Steam games. However, you might find that some of these mods cause achievements not to unlock. This could be because the mod changes the gameplay or your character's performance, to the point where it doesn't fulfill the achievement's unlock requirements. It could also be a bug.

-

Another problem were the mods and console commands. Klei decided to give the players a huge freedom by letting us use mods and commands (which I and many people really like). Now, killing bearger for the achievement would be a minute for every noob by reading the guide "how to get all achievements with console commands" and the achievements would lose their sense. Okay then, let's say achievements cannot be earned in worlds where the console was used. Achievement hunters will say: okay dokay, we'll make mods that solve this problem. Klei could say then, okay, no mods can be allowed to get achievements. (There would be still the question if client-only mods could be allowed, because they don't change the game, but many of them still help a lot.) But then Klei would practically ban people who like to play with mods from getting the achievements and they had to chose: mods or achievements. And this is definitely not what Klei wants.

aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/contluForse/HuggingGPT/assets/Download Fifa 13 Cracked Ipa Tips and Tricks to Master the Game.md b/spaces/contluForse/HuggingGPT/assets/Download Fifa 13 Cracked Ipa Tips and Tricks to Master the Game.md deleted file mode 100644 index aef3f708bcc8eb6ce5349b641545d93d93563cc1..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/Download Fifa 13 Cracked Ipa Tips and Tricks to Master the Game.md +++ /dev/null @@ -1,6 +0,0 @@ -

Download Fifa 13 Cracked Ipa


Download File ———>>> https://ssurll.com/2uzyQJ



-
- aaccfb2cb3
-
-
-

diff --git a/spaces/contluForse/HuggingGPT/assets/Ejaculations Fou Transsexuelle Publique.md b/spaces/contluForse/HuggingGPT/assets/Ejaculations Fou Transsexuelle Publique.md deleted file mode 100644 index 9d7c03175e718097bb3f0debd44ea87cc96a46da..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/Ejaculations Fou Transsexuelle Publique.md +++ /dev/null @@ -1,6 +0,0 @@ -

Ejaculations Fou Transsexuelle Publique


Download Zip ✅ https://ssurll.com/2uzx7V



-
- aaccfb2cb3
-
-
-

diff --git a/spaces/cooelf/Multimodal-CoT/timm/optim/adahessian.py b/spaces/cooelf/Multimodal-CoT/timm/optim/adahessian.py deleted file mode 100644 index 985c67ca686a65f61f5c5b1a7db3e5bba815a19b..0000000000000000000000000000000000000000 --- a/spaces/cooelf/Multimodal-CoT/timm/optim/adahessian.py +++ /dev/null @@ -1,156 +0,0 @@ -""" AdaHessian Optimizer - -Lifted from https://github.com/davda54/ada-hessian/blob/master/ada_hessian.py -Originally licensed MIT, Copyright 2020, David Samuel -""" -import torch - - -class Adahessian(torch.optim.Optimizer): - """ - Implements the AdaHessian algorithm from "ADAHESSIAN: An Adaptive Second OrderOptimizer for Machine Learning" - - Arguments: - params (iterable): iterable of parameters to optimize or dicts defining parameter groups - lr (float, optional): learning rate (default: 0.1) - betas ((float, float), optional): coefficients used for computing running averages of gradient and the - squared hessian trace (default: (0.9, 0.999)) - eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) - weight_decay (float, optional): weight decay (L2 penalty) (default: 0.0) - hessian_power (float, optional): exponent of the hessian trace (default: 1.0) - update_each (int, optional): compute the hessian trace approximation only after *this* number of steps - (to save time) (default: 1) - n_samples (int, optional): how many times to sample `z` for the approximation of the hessian trace (default: 1) - """ - - def __init__(self, params, lr=0.1, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, - hessian_power=1.0, update_each=1, n_samples=1, avg_conv_kernel=False): - if not 0.0 <= lr: - raise ValueError(f"Invalid learning rate: {lr}") - if not 0.0 <= eps: - raise ValueError(f"Invalid epsilon value: {eps}") - if not 0.0 <= betas[0] < 1.0: - raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}") - if not 0.0 <= betas[1] < 1.0: - raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}") - if not 0.0 <= hessian_power <= 1.0: - raise ValueError(f"Invalid Hessian power value: {hessian_power}") - - self.n_samples = n_samples - self.update_each = update_each - self.avg_conv_kernel = avg_conv_kernel - - # use a separate generator that deterministically generates the same `z`s across all GPUs in case of distributed training - self.seed = 2147483647 - self.generator = torch.Generator().manual_seed(self.seed) - - defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, hessian_power=hessian_power) - super(Adahessian, self).__init__(params, defaults) - - for p in self.get_params(): - p.hess = 0.0 - self.state[p]["hessian step"] = 0 - - @property - def is_second_order(self): - return True - - def get_params(self): - """ - Gets all parameters in all param_groups with gradients - """ - - return (p for group in self.param_groups for p in group['params'] if p.requires_grad) - - def zero_hessian(self): - """ - Zeros out the accumalated hessian traces. - """ - - for p in self.get_params(): - if not isinstance(p.hess, float) and self.state[p]["hessian step"] % self.update_each == 0: - p.hess.zero_() - - @torch.no_grad() - def set_hessian(self): - """ - Computes the Hutchinson approximation of the hessian trace and accumulates it for each trainable parameter. - """ - - params = [] - for p in filter(lambda p: p.grad is not None, self.get_params()): - if self.state[p]["hessian step"] % self.update_each == 0: # compute the trace only each `update_each` step - params.append(p) - self.state[p]["hessian step"] += 1 - - if len(params) == 0: - return - - if self.generator.device != params[0].device: # hackish way of casting the generator to the right device - self.generator = torch.Generator(params[0].device).manual_seed(self.seed) - - grads = [p.grad for p in params] - - for i in range(self.n_samples): - # Rademacher distribution {-1.0, 1.0} - zs = [torch.randint(0, 2, p.size(), generator=self.generator, device=p.device) * 2.0 - 1.0 for p in params] - h_zs = torch.autograd.grad( - grads, params, grad_outputs=zs, only_inputs=True, retain_graph=i < self.n_samples - 1) - for h_z, z, p in zip(h_zs, zs, params): - p.hess += h_z * z / self.n_samples # approximate the expected values of z*(H@z) - - @torch.no_grad() - def step(self, closure=None): - """ - Performs a single optimization step. - Arguments: - closure (callable, optional) -- a closure that reevaluates the model and returns the loss (default: None) - """ - - loss = None - if closure is not None: - loss = closure() - - self.zero_hessian() - self.set_hessian() - - for group in self.param_groups: - for p in group['params']: - if p.grad is None or p.hess is None: - continue - - if self.avg_conv_kernel and p.dim() == 4: - p.hess = torch.abs(p.hess).mean(dim=[2, 3], keepdim=True).expand_as(p.hess).clone() - - # Perform correct stepweight decay as in AdamW - p.mul_(1 - group['lr'] * group['weight_decay']) - - state = self.state[p] - - # State initialization - if len(state) == 1: - state['step'] = 0 - # Exponential moving average of gradient values - state['exp_avg'] = torch.zeros_like(p) - # Exponential moving average of Hessian diagonal square values - state['exp_hessian_diag_sq'] = torch.zeros_like(p) - - exp_avg, exp_hessian_diag_sq = state['exp_avg'], state['exp_hessian_diag_sq'] - beta1, beta2 = group['betas'] - state['step'] += 1 - - # Decay the first and second moment running average coefficient - exp_avg.mul_(beta1).add_(p.grad, alpha=1 - beta1) - exp_hessian_diag_sq.mul_(beta2).addcmul_(p.hess, p.hess, value=1 - beta2) - - bias_correction1 = 1 - beta1 ** state['step'] - bias_correction2 = 1 - beta2 ** state['step'] - - k = group['hessian_power'] - denom = (exp_hessian_diag_sq / bias_correction2).pow_(k / 2).add_(group['eps']) - - # make update - step_size = group['lr'] / bias_correction1 - p.addcdiv_(exp_avg, denom, value=-step_size) - - return loss diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/visualization/color.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/visualization/color.py deleted file mode 100644 index 48379a283e48570f226426510270de8e15323c8d..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/visualization/color.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from enum import Enum - -import numpy as np - -from annotator.mmpkg.mmcv.utils import is_str - - -class Color(Enum): - """An enum that defines common colors. - - Contains red, green, blue, cyan, yellow, magenta, white and black. - """ - red = (0, 0, 255) - green = (0, 255, 0) - blue = (255, 0, 0) - cyan = (255, 255, 0) - yellow = (0, 255, 255) - magenta = (255, 0, 255) - white = (255, 255, 255) - black = (0, 0, 0) - - -def color_val(color): - """Convert various input to color tuples. - - Args: - color (:obj:`Color`/str/tuple/int/ndarray): Color inputs - - Returns: - tuple[int]: A tuple of 3 integers indicating BGR channels. - """ - if is_str(color): - return Color[color].value - elif isinstance(color, Color): - return color.value - elif isinstance(color, tuple): - assert len(color) == 3 - for channel in color: - assert 0 <= channel <= 255 - return color - elif isinstance(color, int): - assert 0 <= color <= 255 - return color, color, color - elif isinstance(color, np.ndarray): - assert color.ndim == 1 and color.size == 3 - assert np.all((color >= 0) & (color <= 255)) - color = color.astype(np.uint8) - return tuple(color) - else: - raise TypeError(f'Invalid type for color: {type(color)}') diff --git a/spaces/cstorm125/foodydudy_for_lesson1/README.md b/spaces/cstorm125/foodydudy_for_lesson1/README.md deleted file mode 100644 index e82d4d7ab659b79286484fbd114632033a957b72..0000000000000000000000000000000000000000 --- a/spaces/cstorm125/foodydudy_for_lesson1/README.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: FoodyDudy 48-class Thai Food Classifier -emoji: 📈 -colorFrom: red -colorTo: green -sdk: gradio -app_file: app.py -pinned: false -duplicated_from: tmabraham/fastai_pet_classifier ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/danurahul/pop-music/finetune.py b/spaces/danurahul/pop-music/finetune.py deleted file mode 100644 index ea914b5986caadf9be5f5c1913b7ada942e51461..0000000000000000000000000000000000000000 --- a/spaces/danurahul/pop-music/finetune.py +++ /dev/null @@ -1,45 +0,0 @@ -from model import PopMusicTransformer -from glob import glob -import os -os.environ['CUDA_VISIBLE_DEVICES'] = '0' - -def main(): - # declare model - model = PopMusicTransformer( - checkpoint='REMI-tempo-checkpoint', - is_training=True) - # prepare data - midi_paths = glob('YOUR PERSOANL FOLDER/*.midi') # you need to revise it - training_data = model.prepare_data(midi_paths=midi_paths) - - # check output checkpoint folder - #################################### - # if you use "REMI-tempo-chord-checkpoint" for the pre-trained checkpoint - # please name your output folder as something with "chord" - # for example: my-love-chord, cute-doggy-chord, ... - # if use "REMI-tempo-checkpoint" - # for example: my-love, cute-doggy, ... - #################################### - output_checkpoint_folder = 'REMI-finetune' # your decision - if not os.path.exists(output_checkpoint_folder): - os.mkdir(output_checkpoint_folder) - - # finetune - model.finetune( - training_data=training_data, - output_checkpoint_folder=output_checkpoint_folder) - - #################################### - # after finetuning, please choose which checkpoint you want to try - # and change the checkpoint names you choose into "model" - # and copy the "dictionary.pkl" into the your output_checkpoint_folder - # ***** the same as the content format in "REMI-tempo-checkpoint" ***** - # and then, you can use "main.py" to generate your own music! - # (do not forget to revise the checkpoint path to your own in "main.py") - #################################### - - # close - model.close() - -if __name__ == '__main__': - main() diff --git a/spaces/davidtsong/whisper-demo/assets.py b/spaces/davidtsong/whisper-demo/assets.py deleted file mode 100644 index 7f06ee43f4cf468c8841b38d22d41d824451b51a..0000000000000000000000000000000000000000 --- a/spaces/davidtsong/whisper-demo/assets.py +++ /dev/null @@ -1,110 +0,0 @@ -LANGUAGES = { - "en": "english", - "zh": "chinese", - "de": "german", - "es": "spanish", - "ru": "russian", - "ko": "korean", - "fr": "french", - "ja": "japanese", - "pt": "portuguese", - "tr": "turkish", - "pl": "polish", - "ca": "catalan", - "nl": "dutch", - "ar": "arabic", - "sv": "swedish", - "it": "italian", - "id": "indonesian", - "hi": "hindi", - "fi": "finnish", - "vi": "vietnamese", - "iw": "hebrew", - "uk": "ukrainian", - "el": "greek", - "ms": "malay", - "cs": "czech", - "ro": "romanian", - "da": "danish", - "hu": "hungarian", - "ta": "tamil", - "no": "norwegian", - "th": "thai", - "ur": "urdu", - "hr": "croatian", - "bg": "bulgarian", - "lt": "lithuanian", - "la": "latin", - "mi": "maori", - "ml": "malayalam", - "cy": "welsh", - "sk": "slovak", - "te": "telugu", - "fa": "persian", - "lv": "latvian", - "bn": "bengali", - "sr": "serbian", - "az": "azerbaijani", - "sl": "slovenian", - "kn": "kannada", - "et": "estonian", - "mk": "macedonian", - "br": "breton", - "eu": "basque", - "is": "icelandic", - "hy": "armenian", - "ne": "nepali", - "mn": "mongolian", - "bs": "bosnian", - "kk": "kazakh", - "sq": "albanian", - "sw": "swahili", - "gl": "galician", - "mr": "marathi", - "pa": "punjabi", - "si": "sinhala", - "km": "khmer", - "sn": "shona", - "yo": "yoruba", - "so": "somali", - "af": "afrikaans", - "oc": "occitan", - "ka": "georgian", - "be": "belarusian", - "tg": "tajik", - "sd": "sindhi", - "gu": "gujarati", - "am": "amharic", - "yi": "yiddish", - "lo": "lao", - "uz": "uzbek", - "fo": "faroese", - "ht": "haitian creole", - "ps": "pashto", - "tk": "turkmen", - "nn": "nynorsk", - "mt": "maltese", - "sa": "sanskrit", - "lb": "luxembourgish", - "my": "myanmar", - "bo": "tibetan", - "tl": "tagalog", - "mg": "malagasy", - "as": "assamese", - "tt": "tatar", - "haw": "hawaiian", - "ln": "lingala", - "ha": "hausa", - "ba": "bashkir", - "jw": "javanese", - "su": "sundanese", -} - -lang_detect = ['tiny', 'base', 'small', 'medium', 'large'] - -css = """ -#audio_inputs{ - height:100px; - max-height:100px; -} -""" \ No newline at end of file diff --git a/spaces/dawood17/SayBot_Enchancer/CodeFormer/basicsr/data/prefetch_dataloader.py b/spaces/dawood17/SayBot_Enchancer/CodeFormer/basicsr/data/prefetch_dataloader.py deleted file mode 100644 index 5088425050d4cc98114a9b93eb50ea60273f35a0..0000000000000000000000000000000000000000 --- a/spaces/dawood17/SayBot_Enchancer/CodeFormer/basicsr/data/prefetch_dataloader.py +++ /dev/null @@ -1,125 +0,0 @@ -import queue as Queue -import threading -import torch -from torch.utils.data import DataLoader - - -class PrefetchGenerator(threading.Thread): - """A general prefetch generator. - - Ref: - https://stackoverflow.com/questions/7323664/python-generator-pre-fetch - - Args: - generator: Python generator. - num_prefetch_queue (int): Number of prefetch queue. - """ - - def __init__(self, generator, num_prefetch_queue): - threading.Thread.__init__(self) - self.queue = Queue.Queue(num_prefetch_queue) - self.generator = generator - self.daemon = True - self.start() - - def run(self): - for item in self.generator: - self.queue.put(item) - self.queue.put(None) - - def __next__(self): - next_item = self.queue.get() - if next_item is None: - raise StopIteration - return next_item - - def __iter__(self): - return self - - -class PrefetchDataLoader(DataLoader): - """Prefetch version of dataloader. - - Ref: - https://github.com/IgorSusmelj/pytorch-styleguide/issues/5# - - TODO: - Need to test on single gpu and ddp (multi-gpu). There is a known issue in - ddp. - - Args: - num_prefetch_queue (int): Number of prefetch queue. - kwargs (dict): Other arguments for dataloader. - """ - - def __init__(self, num_prefetch_queue, **kwargs): - self.num_prefetch_queue = num_prefetch_queue - super(PrefetchDataLoader, self).__init__(**kwargs) - - def __iter__(self): - return PrefetchGenerator(super().__iter__(), self.num_prefetch_queue) - - -class CPUPrefetcher(): - """CPU prefetcher. - - Args: - loader: Dataloader. - """ - - def __init__(self, loader): - self.ori_loader = loader - self.loader = iter(loader) - - def next(self): - try: - return next(self.loader) - except StopIteration: - return None - - def reset(self): - self.loader = iter(self.ori_loader) - - -class CUDAPrefetcher(): - """CUDA prefetcher. - - Ref: - https://github.com/NVIDIA/apex/issues/304# - - It may consums more GPU memory. - - Args: - loader: Dataloader. - opt (dict): Options. - """ - - def __init__(self, loader, opt): - self.ori_loader = loader - self.loader = iter(loader) - self.opt = opt - self.stream = torch.cuda.Stream() - self.device = torch.device('cuda' if opt['num_gpu'] != 0 else 'cpu') - self.preload() - - def preload(self): - try: - self.batch = next(self.loader) # self.batch is a dict - except StopIteration: - self.batch = None - return None - # put tensors to gpu - with torch.cuda.stream(self.stream): - for k, v in self.batch.items(): - if torch.is_tensor(v): - self.batch[k] = self.batch[k].to(device=self.device, non_blocking=True) - - def next(self): - torch.cuda.current_stream().wait_stream(self.stream) - batch = self.batch - self.preload() - return batch - - def reset(self): - self.loader = iter(self.ori_loader) - self.preload() diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/aiofiles/threadpool/text.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/aiofiles/threadpool/text.py deleted file mode 100644 index 0e625909b6c960ebed4a0ed99941b28156fbf2d1..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/aiofiles/threadpool/text.py +++ /dev/null @@ -1,64 +0,0 @@ -from ..base import AsyncBase, AsyncIndirectBase -from .utils import delegate_to_executor, proxy_method_directly, proxy_property_directly - - -@delegate_to_executor( - "close", - "flush", - "isatty", - "read", - "readable", - "readline", - "readlines", - "seek", - "seekable", - "tell", - "truncate", - "write", - "writable", - "writelines", -) -@proxy_method_directly("detach", "fileno", "readable") -@proxy_property_directly( - "buffer", - "closed", - "encoding", - "errors", - "line_buffering", - "newlines", - "name", - "mode", -) -class AsyncTextIOWrapper(AsyncBase): - """The asyncio executor version of io.TextIOWrapper.""" - - -@delegate_to_executor( - "close", - "flush", - "isatty", - "read", - "readable", - "readline", - "readlines", - "seek", - "seekable", - "tell", - "truncate", - "write", - "writable", - "writelines", -) -@proxy_method_directly("detach", "fileno", "readable") -@proxy_property_directly( - "buffer", - "closed", - "encoding", - "errors", - "line_buffering", - "newlines", - "name", - "mode", -) -class AsyncTextIndirectIOWrapper(AsyncIndirectBase): - """The indirect asyncio executor version of io.TextIOWrapper.""" diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/markdown_it/parser_block.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/markdown_it/parser_block.py deleted file mode 100644 index 72360f9b31bebca77250168bccae646e9a67dc6e..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/markdown_it/parser_block.py +++ /dev/null @@ -1,111 +0,0 @@ -"""Block-level tokenizer.""" -from __future__ import annotations - -import logging -from typing import TYPE_CHECKING, Callable - -from . import rules_block -from .ruler import Ruler -from .rules_block.state_block import StateBlock -from .token import Token -from .utils import EnvType - -if TYPE_CHECKING: - from markdown_it import MarkdownIt - -LOGGER = logging.getLogger(__name__) - - -RuleFuncBlockType = Callable[[StateBlock, int, int, bool], bool] -"""(state: StateBlock, startLine: int, endLine: int, silent: bool) -> matched: bool) - -`silent` disables token generation, useful for lookahead. -""" - -_rules: list[tuple[str, RuleFuncBlockType, list[str]]] = [ - # First 2 params - rule name & source. Secondary array - list of rules, - # which can be terminated by this one. - ("table", rules_block.table, ["paragraph", "reference"]), - ("code", rules_block.code, []), - ("fence", rules_block.fence, ["paragraph", "reference", "blockquote", "list"]), - ( - "blockquote", - rules_block.blockquote, - ["paragraph", "reference", "blockquote", "list"], - ), - ("hr", rules_block.hr, ["paragraph", "reference", "blockquote", "list"]), - ("list", rules_block.list_block, ["paragraph", "reference", "blockquote"]), - ("reference", rules_block.reference, []), - ("html_block", rules_block.html_block, ["paragraph", "reference", "blockquote"]), - ("heading", rules_block.heading, ["paragraph", "reference", "blockquote"]), - ("lheading", rules_block.lheading, []), - ("paragraph", rules_block.paragraph, []), -] - - -class ParserBlock: - """ - ParserBlock#ruler -> Ruler - - [[Ruler]] instance. Keep configuration of block rules. - """ - - def __init__(self) -> None: - self.ruler = Ruler[RuleFuncBlockType]() - for name, rule, alt in _rules: - self.ruler.push(name, rule, {"alt": alt}) - - def tokenize(self, state: StateBlock, startLine: int, endLine: int) -> None: - """Generate tokens for input range.""" - rules = self.ruler.getRules("") - line = startLine - maxNesting = state.md.options.maxNesting - hasEmptyLines = False - - while line < endLine: - state.line = line = state.skipEmptyLines(line) - if line >= endLine: - break - if state.sCount[line] < state.blkIndent: - # Termination condition for nested calls. - # Nested calls currently used for blockquotes & lists - break - if state.level >= maxNesting: - # If nesting level exceeded - skip tail to the end. - # That's not ordinary situation and we should not care about content. - state.line = endLine - break - - # Try all possible rules. - # On success, rule should: - # - update `state.line` - # - update `state.tokens` - # - return True - for rule in rules: - if rule(state, line, endLine, False): - break - - # set state.tight if we had an empty line before current tag - # i.e. latest empty line should not count - state.tight = not hasEmptyLines - - line = state.line - - # paragraph might "eat" one newline after it in nested lists - if (line - 1) < endLine and state.isEmpty(line - 1): - hasEmptyLines = True - - if line < endLine and state.isEmpty(line): - hasEmptyLines = True - line += 1 - state.line = line - - def parse( - self, src: str, md: MarkdownIt, env: EnvType, outTokens: list[Token] - ) -> list[Token] | None: - """Process input string and push block tokens into `outTokens`.""" - if not src: - return None - state = StateBlock(src, md, env, outTokens) - self.tokenize(state, state.line, state.lineMax) - return state.tokens diff --git a/spaces/dcq/freegpt-webui/client/js/theme-toggler.js b/spaces/dcq/freegpt-webui/client/js/theme-toggler.js deleted file mode 100644 index 67e1a9501b70d54ab8a717f34983c012328e74a0..0000000000000000000000000000000000000000 --- a/spaces/dcq/freegpt-webui/client/js/theme-toggler.js +++ /dev/null @@ -1,22 +0,0 @@ -var switch_theme_toggler = document.getElementById("theme-toggler"); - -switch_theme_toggler.addEventListener("change", toggleTheme); - -function setTheme(themeName) { - localStorage.setItem("theme", themeName); - document.documentElement.className = themeName; -} - -function toggleTheme() { - var currentTheme = localStorage.getItem("theme"); - var newTheme = currentTheme === "theme-dark" ? "theme-light" : "theme-dark"; - - setTheme(newTheme); - switch_theme_toggler.checked = newTheme === "theme-dark"; -} - -(function () { - var currentTheme = localStorage.getItem("theme") || "theme-dark"; - setTheme(currentTheme); - switch_theme_toggler.checked = currentTheme === "theme-dark"; -})(); diff --git a/spaces/deepdoctection/deepdoctection/app.py b/spaces/deepdoctection/deepdoctection/app.py deleted file mode 100644 index 6bf362abfa4b7fa13f9bfe1bc329a7f844c02d5a..0000000000000000000000000000000000000000 --- a/spaces/deepdoctection/deepdoctection/app.py +++ /dev/null @@ -1,301 +0,0 @@ -import os -import importlib.metadata -from os import getcwd, path, environ -from dotenv import load_dotenv -import json - - -def check_additional_requirements(): - if importlib.util.find_spec("detectron2") is None: - os.system('pip install detectron2@git+https://github.com/facebookresearch/detectron2.git') - if importlib.util.find_spec("gradio") is not None: - if importlib.metadata.version("gradio")!="3.44.3": - os.system("pip uninstall -y gradio") - os.system("pip install gradio==3.44.3") - else: - os.system("pip install gradio==3.44.3") - os.system(os.environ["DD_ADDONS"]) - return - - -load_dotenv() -check_additional_requirements() - -import deepdoctection as dd -from deepdoctection.dataflow.serialize import DataFromList -import time -from dd_addons.extern import PdfTextDetector, PostProcessor, get_xsl_path -from dd_addons.pipe.conn import PostProcessorService -import gradio as gr -from botocore.config import Config - - -# work around: https://discuss.huggingface.co/t/how-to-install-a-specific-version-of-gradio-in-spaces/13552 -_DD_ONE = "conf_dd_one.yaml" -_XSL_PATH = get_xsl_path() - -dd.ModelCatalog.register("xrf_layout/model_final_inf_only.pt",dd.ModelProfile( - name="xrf_layout/model_final_inf_only.pt", - description="layout_detection/morning-dragon-114", - config="xrf_dd/layout/CASCADE_RCNN_R_50_FPN_GN.yaml", - size=[274632215], - tp_model=False, - hf_repo_id=environ.get("HF_REPO_LAYOUT"), - hf_model_name="model_final_inf_only.pt", - hf_config_file=["Base-RCNN-FPN.yaml", "CASCADE_RCNN_R_50_FPN_GN.yaml"], - categories={"1": dd.LayoutType.text, - "2": dd.LayoutType.title, - "3": dd.LayoutType.list, - "4": dd.LayoutType.table, - "5": dd.LayoutType.figure}, - model_wrapper="D2FrcnnDetector", - )) - -dd.ModelCatalog.register("xrf_cell/model_final_inf_only.pt", dd.ModelProfile( - name="xrf_cell/model_final_inf_only.pt", - description="cell_detection/restful-eon-6", - config="xrf_dd/cell/CASCADE_RCNN_R_50_FPN_GN.yaml", - size=[274583063], - tp_model=False, - hf_repo_id=environ.get("HF_REPO_CELL"), - hf_model_name="model_final_inf_only.pt", - hf_config_file=["Base-RCNN-FPN.yaml", "CASCADE_RCNN_R_50_FPN_GN.yaml"], - categories={"1": dd.LayoutType.cell}, - model_wrapper="D2FrcnnDetector", - )) - -dd.ModelCatalog.register("xrf_item/model_final_inf_only.pt", dd.ModelProfile( - name="xrf_item/model_final_inf_only.pt", - description="item_detection/firm_plasma_14", - config="xrf_dd/item/CASCADE_RCNN_R_50_FPN_GN.yaml", - size=[274595351], - tp_model=False, - hf_repo_id=environ.get("HF_REPO_ITEM"), - hf_model_name="model_final_inf_only.pt", - hf_config_file=["Base-RCNN-FPN.yaml", "CASCADE_RCNN_R_50_FPN_GN.yaml"], - categories={"1": dd.LayoutType.row, "2": dd.LayoutType.column}, - model_wrapper="D2FrcnnDetector", - )) - -# Set up of the configuration and logging. Models are globally defined, so that they are not re-loaded once the input -# updates -cfg = dd.set_config_by_yaml(path.join(getcwd(),_DD_ONE)) -cfg.freeze(freezed=False) -cfg.DEVICE = "cpu" -cfg.freeze() - -# layout detector -layout_config_path = dd.ModelCatalog.get_full_path_configs(cfg.CONFIG.D2LAYOUT) -layout_weights_path = dd.ModelDownloadManager.maybe_download_weights_and_configs(cfg.WEIGHTS.D2LAYOUT) -categories_layout = dd.ModelCatalog.get_profile(cfg.WEIGHTS.D2LAYOUT).categories -assert categories_layout is not None -assert layout_weights_path is not None -d_layout = dd.D2FrcnnDetector(layout_config_path, layout_weights_path, categories_layout, device=cfg.DEVICE) - -# cell detector -cell_config_path = dd.ModelCatalog.get_full_path_configs(cfg.CONFIG.D2CELL) -cell_weights_path = dd.ModelDownloadManager.maybe_download_weights_and_configs(cfg.WEIGHTS.D2CELL) -categories_cell = dd.ModelCatalog.get_profile(cfg.WEIGHTS.D2CELL).categories -assert categories_cell is not None -d_cell = dd.D2FrcnnDetector(cell_config_path, cell_weights_path, categories_cell, device=cfg.DEVICE) - -# row/column detector -item_config_path = dd.ModelCatalog.get_full_path_configs(cfg.CONFIG.D2ITEM) -item_weights_path = dd.ModelDownloadManager.maybe_download_weights_and_configs(cfg.WEIGHTS.D2ITEM) -categories_item = dd.ModelCatalog.get_profile(cfg.WEIGHTS.D2ITEM).categories -assert categories_item is not None -d_item = dd.D2FrcnnDetector(item_config_path, item_weights_path, categories_item, device=cfg.DEVICE) - -# pdf miner -pdf_text = PdfTextDetector(_XSL_PATH) - -# text detector -credentials_kwargs={"aws_access_key_id": os.environ["ACCESS_KEY"], - "aws_secret_access_key": os.environ["SECRET_KEY"], - "config": Config(region_name=os.environ["REGION"])} -tex_text = dd.TextractOcrDetector(**credentials_kwargs) - - -def build_gradio_analyzer(): - """Building the Detectron2/DocTr analyzer based on the given config""" - - cfg.freeze(freezed=False) - cfg.TAB = True - cfg.TAB_REF = True - cfg.OCR = True - cfg.freeze() - - pipe_component_list = [] - layout = dd.ImageLayoutService(d_layout, to_image=True, crop_image=True) - pipe_component_list.append(layout) - - nms_service = dd.AnnotationNmsService(nms_pairs=cfg.LAYOUT_NMS_PAIRS.COMBINATIONS, - thresholds=cfg.LAYOUT_NMS_PAIRS.THRESHOLDS) - pipe_component_list.append(nms_service) - - if cfg.TAB: - - detect_result_generator = dd.DetectResultGenerator(categories_cell) - cell = dd.SubImageLayoutService(d_cell, dd.LayoutType.table, {1: 6}, detect_result_generator) - pipe_component_list.append(cell) - - detect_result_generator = dd.DetectResultGenerator(categories_item) - item = dd.SubImageLayoutService(d_item, dd.LayoutType.table, {1: 7, 2: 8}, detect_result_generator) - pipe_component_list.append(item) - - table_segmentation = dd.TableSegmentationService( - cfg.SEGMENTATION.ASSIGNMENT_RULE, - cfg.SEGMENTATION.THRESHOLD_ROWS, - cfg.SEGMENTATION.THRESHOLD_COLS, - cfg.SEGMENTATION.FULL_TABLE_TILING, - cfg.SEGMENTATION.REMOVE_IOU_THRESHOLD_ROWS, - cfg.SEGMENTATION.REMOVE_IOU_THRESHOLD_COLS, - cfg.SEGMENTATION.STRETCH_RULE - ) - pipe_component_list.append(table_segmentation) - - if cfg.TAB_REF: - table_segmentation_refinement = dd.TableSegmentationRefinementService() - pipe_component_list.append(table_segmentation_refinement) - - if cfg.OCR: - - d_text = dd.TextExtractionService(pdf_text) - pipe_component_list.append(d_text) - - t_text = dd.TextExtractionService(tex_text,skip_if_text_extracted=True) - pipe_component_list.append(t_text) - - match_words = dd.MatchingService( - parent_categories=cfg.WORD_MATCHING.PARENTAL_CATEGORIES, - child_categories=cfg.WORD_MATCHING.CHILD_CATEGORIES, - matching_rule=cfg.WORD_MATCHING.RULE, - threshold=cfg.WORD_MATCHING.THRESHOLD, - max_parent_only=cfg.WORD_MATCHING.MAX_PARENT_ONLY - ) - pipe_component_list.append(match_words) - - order = dd.TextOrderService( - text_container=cfg.TEXT_ORDERING.TEXT_CONTAINER, - floating_text_block_categories=cfg.TEXT_ORDERING.FLOATING_TEXT_BLOCK, - text_block_categories=cfg.TEXT_ORDERING.TEXT_BLOCK, - include_residual_text_container=cfg.TEXT_ORDERING.TEXT_CONTAINER_TO_TEXT_BLOCK) - pipe_component_list.append(order) - - pipe = dd.DoctectionPipe(pipeline_component_list=pipe_component_list) - - post_processor = PostProcessor("deepdoctection", **credentials_kwargs) - post_service = PostProcessorService(post_processor) - pipe_component_list.append(post_service) - - return pipe - - -def analyze_image(img, pdf, max_datapoints): - - # creating an image object and passing to the analyzer by using dataflows - analyzer = build_gradio_analyzer() - - if img is not None: - image = dd.Image(file_name=str(time.time()).replace(".","") + ".png", location="") - image.image = img[:, :, ::-1] - - df = DataFromList(lst=[image]) - df = analyzer.analyze(dataset_dataflow=df) - elif pdf: - df = analyzer.analyze(path=pdf.name, max_datapoints=max_datapoints) - else: - raise ValueError - - df.reset_state() - - layout_items_str = "" - jsonl_out = [] - dpts = [] - html_list = [] - - for dp in df: - dpts.append(dp) - out = dp.as_dict() - jsonl_out.append(out) - out.pop("_image") - layout_items = [layout for layout in dp.layouts if layout.reading_order is not None] - layout_items.sort(key=lambda x: x.reading_order) - layout_items_str += f"\n\n -------- PAGE NUMBER: {dp.page_number+1} ------------- \n" - for item in layout_items: - layout_items_str += f"\n {item.category_name}: {item.text}" - html_list.extend([table.html for table in dp.tables]) - if html_list: - html = ("


").join(html_list) - else: - html = None - json_object = json.dumps(jsonl_out, indent = 4) - return [dp.viz(show_cells=False) for dp in dpts], layout_items_str, html, json_object - - -demo = gr.Blocks(css="scrollbar.css") - - -with demo: - with gr.Box(): - gr.Markdown("

deepdoctection - A Document AI Package

") - gr.Markdown("deepdoctection is a Python library that orchestrates document extraction" - " and document layout analysis tasks using deep learning models. It does not implement models" - " but enables you to build pipelines using highly acknowledged libraries for object detection," - " OCR and selected NLP tasks and provides an integrated frameworks for fine-tuning, evaluating" - " and running models.
" - "This pipeline consists of a stack of models powered by Detectron2" - " for layout analysis and table recognition. OCR will be provided as well. You can process" - "an image or even a PDF-document. Up to nine pages can be processed.
") - gr.Markdown("[https://github.com/deepdoctection/deepdoctection](https://github.com/deepdoctection/deepdoctection)") - with gr.Box(): - gr.Markdown("

Upload a document and choose setting

") - with gr.Row(): - with gr.Column(): - with gr.Tab("Image upload"): - with gr.Column(): - inputs = gr.Image(type='numpy', label="Original Image") - with gr.Tab("PDF upload (only first image will be processed) *"): - with gr.Column(): - inputs_pdf = gr.File(label="PDF") - gr.Markdown("* If an image is cached in tab, remove it first") - with gr.Column(): - gr.Examples( - examples=[path.join(getcwd(), "sample_1.jpg"), path.join(getcwd(), "sample_2.png")], - inputs = inputs) - gr.Examples(examples=[path.join(getcwd(), "sample_3.pdf")], inputs = inputs_pdf) - - with gr.Row(): - max_imgs = gr.Slider(1, 8, value=2, step=1, label="Number of pages in multi page PDF", - info="Will stop after 9 pages") - - with gr.Row(): - btn = gr.Button("Run model", variant="primary") - - with gr.Box(): - gr.Markdown("

Outputs

") - with gr.Row(): - with gr.Column(): - with gr.Box(): - gr.Markdown("
Contiguous text
") - image_text = gr.Textbox() - with gr.Column(): - with gr.Box(): - gr.Markdown("
Layout detection
") - gallery = gr.Gallery( - label="Output images", show_label=False, elem_id="gallery" - ).style(grid=2) - with gr.Row(): - with gr.Box(): - gr.Markdown("
Table
") - html = gr.HTML() - - with gr.Row(): - with gr.Box(): - gr.Markdown("
JSON
") - json_output = gr.JSON() - - btn.click(fn=analyze_image, inputs=[inputs, inputs_pdf, max_imgs], - outputs=[gallery, image_text, html, json_output]) - -demo.launch() diff --git a/spaces/diacanFperku/AutoGPT/HD Online Player (Kariera Nicosia Dyzmy 720p Torrent).md b/spaces/diacanFperku/AutoGPT/HD Online Player (Kariera Nicosia Dyzmy 720p Torrent).md deleted file mode 100644 index 3827c7beee80e2d0b7b337b397558ba21a794404..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/HD Online Player (Kariera Nicosia Dyzmy 720p Torrent).md +++ /dev/null @@ -1,5 +0,0 @@ - -

Probably, you will need a powerful video card with good drivers to enjoy SexVilla 2 Everlust Full Crack Torrent files. The 3D SexVilla 2 Everlust Free Crack torrent has been downloaded from the site of the free software download category. 3D SexVilla 2 Everlust Cracked Torrent files. 3d SexVilla 2 Everlust crack torrent. Additional subtitles in the category selection of the online software distribution "Strong" can be found in the list of programs below. Write the letter to the editor-with. You'll find some of the best deals on mobile phones, mobile accessories, tablets, and much more. Play the best free games on your iPhone and iPad. Download 3d SexVilla 2 Everlust Full Crack Torrent. 3d SexVilla 2 Everlust crack torrent. 3d sex villa 2 everlust game cracked in direct link and torrent.right after. The new 3d sexvilla sexvilla full crack 2 free download pc version.Free download 3d sex villa 2 everlust game cracked in direct link and torrent.right after. The new 3d sexvilla sexvilla full crack 2 free download pc version.Download 3d sex villa 2 everlust game cracked in direct link and torrent.right after. The new 3d sexvilla sexvilla full crack 2 free download pc version.
Free 3d sex villa 2 everlust game download. Version 2.
Download 3d sex villa 2 everlust full version crack. Download 3d sex villa 2 everlust game cracked in direct link and torrent.right after. The new 3d sexvilla sexvilla full crack 2 free download pc version.
Download full version 3d sex villa 2 everlust game cracked in direct link and torrent. Right after download, crack. 3d SexVilla 2 Everlust Full Crack Torrent is now available for free download at.
Once you find the perfect model and. In fact, the most realistic 3D sex simulation video game on the. This may be hard to believe, but it is quite true for.Download 3d sex villa 2 everlust game cracked in direct link and torrent.right after. The new 3d sexvilla sexvilla full crack 2 free download pc version.Download full version 3d sex villa 2 everlust game cracked in direct link and torrent.right after. The new 3d sexvilla sexvilla full crack 2 free download pc version.Download 3d sex villa 2 everlust game cracked in direct link and torrent.right after. The new 3d sexvilla sexvilla full crack 2 free deposit pc version.Download 3d sex villa 2 everlust game cracked in direct link and torrent.right after. The new 3d sexvilla sexvilla full crack 2 free deposit pc version.
If you need a mobile app to play on your. Its possible to play your first casino game on your mobile. You can sign up to play your first casino game, apply.CamIs worth.28, 42, 50, 52, 68, 105, 166, 365,.2020-3,PRKVK2014 158 iMGSRC.RU [url= chobi bangla movie mp3 song download [url= Funny, comical, pretty cool, ridiculous 2014-3, PRKVK2014 168 iMGSRC.RU [url= Download Game Pc Dynasty Warrior [url= sesspaphpag [url= NatttureCemFrawlHem [url=
-[url= itunes-content-protection-software-for-windows-pc-20.

-

HD Online Player (Kariera Nicosia Dyzmy 720p Torrent)


Download · https://gohhs.com/2uFVEU



899543212b
-
-
\ No newline at end of file diff --git a/spaces/diacanFperku/AutoGPT/Hearty Paws Full Movie Free BEST Download.md b/spaces/diacanFperku/AutoGPT/Hearty Paws Full Movie Free BEST Download.md deleted file mode 100644 index ea6f149188ed2ae421d092d73d2d2d8ac6ba6ef0..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Hearty Paws Full Movie Free BEST Download.md +++ /dev/null @@ -1,11 +0,0 @@ -

hearty paws full movie free download


DOWNLOAD ✫ https://gohhs.com/2uFUQb



- -Hearty Paws is a touching story about a family that split into a brother, his little sister, and a dog they adopted for their sister's sixth birthday. One day when the dog was outside, the brother comes home and sees that the dog has disappeared. -He thought she was dead and went to mourn the death of his dog. -When the sister returns home, she sees her crying brother and the dog, who, as usual, sits next to him. -The girl starts crying, thinking her dog is dead and they pass out. -After recovering, they both realize they have a dog they didn't know about. -A man's body is found on the shore of a lake in the woods. 8a78ff9644
-
-
-

diff --git a/spaces/diacanFperku/AutoGPT/Nel Zel Formula Zipl.md b/spaces/diacanFperku/AutoGPT/Nel Zel Formula Zipl.md deleted file mode 100644 index 43bb54afe5954f73f4438a2dec21c8a2e4a83ff2..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Nel Zel Formula Zipl.md +++ /dev/null @@ -1,16 +0,0 @@ -

Nel Zel Formula Zipl


Download File ★★★★★ https://gohhs.com/2uFTY1



- -Watch and download 124 hentai manga and porn comics with nel-zel group formula for free on IMHentai. com. -Porn comics. -Watch online porn photos of Russian girls! -Erotic hentai watch free online in good quality. -Description of hentai manga. -1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30. -Porn comics, hentai manga in Russian. -Sex comics, hentai manga. -Porn comics are a special genre in the field of adult entertainment, Russian porn comics have conquered. -All sex pictures and other hentai videos fucking and hentai in this category! -Porn comics in Russian. 8a78ff9644
-
-
-

diff --git a/spaces/diacanFperku/AutoGPT/Saawan Ko Aane Do Dubbed In Hindi Download Torrent !LINK!.md b/spaces/diacanFperku/AutoGPT/Saawan Ko Aane Do Dubbed In Hindi Download Torrent !LINK!.md deleted file mode 100644 index 50104af139e6fdc36bbe8f6e9283b15c73714302..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Saawan Ko Aane Do Dubbed In Hindi Download Torrent !LINK!.md +++ /dev/null @@ -1,6 +0,0 @@ -

Saawan Ko Aane Do dubbed in hindi download torrent


DOWNLOAD ---> https://gohhs.com/2uFUv7



- -AK 47 full movie hd download in hindi Saawan Ko Aane Do full movie ... Hindi 720p Torrent > DOWNLOAD Show Spoiler. hindi movies/Kabhi ... Search and download wanted full movie salman khan in arabic dubbed videos. 4d29de3e1b
-
-
-

diff --git a/spaces/dolceschokolade/chatbot-mini/components/Spinner/Spinner.tsx b/spaces/dolceschokolade/chatbot-mini/components/Spinner/Spinner.tsx deleted file mode 100644 index f0cf09fca8da7c8479319670d0736db2ce84cad2..0000000000000000000000000000000000000000 --- a/spaces/dolceschokolade/chatbot-mini/components/Spinner/Spinner.tsx +++ /dev/null @@ -1,34 +0,0 @@ -import { FC } from 'react'; - -interface Props { - size?: string; - className?: string; -} - -const Spinner = ({ size = '1em', className = '' }: Props) => { - return ( - - - - - - - - - - - ); -}; - -export default Spinner; diff --git a/spaces/ds520/bingo/src/app/page.tsx b/spaces/ds520/bingo/src/app/page.tsx deleted file mode 100644 index 0dff3431b098ce4fe282cc83fc87a93a28a43090..0000000000000000000000000000000000000000 --- a/spaces/ds520/bingo/src/app/page.tsx +++ /dev/null @@ -1,15 +0,0 @@ -import dynamic from 'next/dynamic' - -const DynamicComponentWithNoSSR = dynamic( - () => import('../components/chat'), - { ssr: false } -) - -export default function IndexPage() { - return ( - <> -
- - - ) -} diff --git a/spaces/ds520/bingo/src/components/ui/separator.tsx b/spaces/ds520/bingo/src/components/ui/separator.tsx deleted file mode 100644 index 6c55e0b2ca8e2436658a06748aadbff7cd700db0..0000000000000000000000000000000000000000 --- a/spaces/ds520/bingo/src/components/ui/separator.tsx +++ /dev/null @@ -1,31 +0,0 @@ -'use client' - -import * as React from 'react' -import * as SeparatorPrimitive from '@radix-ui/react-separator' - -import { cn } from '@/lib/utils' - -const Separator = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->( - ( - { className, orientation = 'horizontal', decorative = true, ...props }, - ref - ) => ( - - ) -) -Separator.displayName = SeparatorPrimitive.Root.displayName - -export { Separator } diff --git a/spaces/efchbd1013/animal_classification/app.py b/spaces/efchbd1013/animal_classification/app.py deleted file mode 100644 index a699bc5b3c2e987102ca93e0ee28d601e0a93d02..0000000000000000000000000000000000000000 --- a/spaces/efchbd1013/animal_classification/app.py +++ /dev/null @@ -1,7 +0,0 @@ -import gradio as gr - -def greet(name): - return "Hello " + name + "!!" - -iface = gr.Interface(fn=greet, inputs="text", outputs="text") -iface.launch() \ No newline at end of file diff --git a/spaces/erbanku/gpt-academic/docs/README_JP.md b/spaces/erbanku/gpt-academic/docs/README_JP.md deleted file mode 100644 index 9fc6dbe595657894c9f6f449c50f6f681d762329..0000000000000000000000000000000000000000 --- a/spaces/erbanku/gpt-academic/docs/README_JP.md +++ /dev/null @@ -1,302 +0,0 @@ -> **Note** -> -> このReadmeファイルは、このプロジェクトのmarkdown翻訳プラグインによって自動的に生成されたもので、100%正確ではない可能性があります。 -> - -# ChatGPT 学術最適化 - -**このプロジェクトが好きだったら、スターをつけてください。もし、より使いやすい学術用のショートカットキーまたはファンクションプラグインを発明した場合は、issueを発行するかpull requestを作成してください。また、このプロジェクト自体によって翻訳されたREADMEは[英語説明書|](docs/README_EN.md)[日本語説明書|](docs/README_JP.md)[ロシア語説明書|](docs/README_RS.md)[フランス語説明書](docs/README_FR.md)もあります。** - -> **注意事項** -> -> 1. **赤色**のラベルが付いているファンクションプラグイン(ボタン)のみファイルを読み込めます。一部のプラグインはプラグインエリアのドロップダウンメニューにあります。新しいプラグインのPRを歓迎いたします! -> -> 2. このプロジェクトの各ファイルの機能は`self_analysis.md`(自己解析レポート)で詳しく説明されています。バージョンが追加されると、関連するファンクションプラグインをクリックして、GPTを呼び出して自己解析レポートを再生成することができます。一般的な質問は`wiki`にまとめられています。(`https://github.com/binary-husky/chatgpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98`) - - -
- -機能 | 説明 ---- | --- -ワンクリック整形 | 論文の文法エラーを一括で正確に修正できます。 -ワンクリック日英翻訳 | 日英翻訳には、ワンクリックで対応できます。 -ワンクリックコード説明 | コードの正しい表示と説明が可能です。 -[カスタムショートカットキー](https://www.bilibili.com/video/BV14s4y1E7jN) | カスタムショートカットキーをサポートします。 -[プロキシサーバーの設定](https://www.bilibili.com/video/BV1rc411W7Dr) | プロキシサーバーの設定をサポートします。 -モジュラーデザイン | カスタム高階関数プラグインと[関数プラグイン]、プラグイン[ホット更新]のサポートが可能です。詳細は[こちら](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) -[自己プログラム解析](https://www.bilibili.com/video/BV1cj411A7VW) | [関数プラグイン][ワンクリック理解](https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A)このプロジェクトのソースコード -[プログラム解析機能](https://www.bilibili.com/video/BV1cj411A7VW) | [関数プラグイン] ワンクリックで別のPython/C/C++/Java/Lua/...プロジェクトツリーを解析できます。 -論文読解 | [関数プラグイン] LaTeX論文の全文をワンクリックで解読し、要約を生成します。 -LaTeX全文翻訳、整形 | [関数プラグイン] ワンクリックでLaTeX論文を翻訳または整形できます。 -注釈生成 | [関数プラグイン] ワンクリックで関数の注釈を大量に生成できます。 -チャット分析レポート生成 | [関数プラグイン] 実行後、まとめレポートを自動生成します。 -[arxivヘルパー](https://www.bilibili.com/video/BV1LM4y1279X) | [関数プラグイン] 入力したarxivの記事URLで要約をワンクリック翻訳+PDFダウンロードができます。 -[PDF論文全文翻訳機能](https://www.bilibili.com/video/BV1KT411x7Wn) | [関数プラグイン] PDF論文タイトルと要約を抽出し、全文を翻訳します(マルチスレッド)。 -[Google Scholar Integratorヘルパー](https://www.bilibili.com/video/BV19L411U7ia) | [関数プラグイン] 任意のGoogle Scholar検索ページURLを指定すると、gptが興味深い記事を選択します。 -数式/画像/テーブル表示 | 数式のTex形式とレンダリング形式を同時に表示できます。数式、コードのハイライトをサポートしています。 -マルチスレッド関数プラグインサポート | ChatGPTをマルチスレッドで呼び出すことができ、大量のテキストやプログラムを簡単に処理できます。 -ダークグラジオ[テーマ](https://github.com/binary-husky/chatgpt_academic/issues/173)の起動 | 「/?__dark-theme=true」というURLをブラウザに追加することで、ダークテーマに切り替えることができます。 -[多数のLLMモデル](https://www.bilibili.com/video/BV1wT411p7yf)をサポート、[API2D](https://api2d.com/)インターフェースをサポート | GPT3.5、GPT4、[清華ChatGLM](https://github.com/THUDM/ChatGLM-6B)による同時サポートは、とても素晴らしいですね! -huggingface免科学上网[オンライン版](https://huggingface.co/spaces/qingxu98/gpt-academic) | huggingfaceにログイン後、[このスペース](https://huggingface.co/spaces/qingxu98/gpt-academic)をコピーしてください。 -...... | ...... - - -
- - -- 新しいインターフェース(config.pyのLAYOUTオプションを変更するだけで、「左右レイアウト」と「上下レイアウト」を切り替えることができます) -
- -
- - -- すべてのボタンは、functional.pyを読み込んで動的に生成されます。カスタム機能を自由に追加して、クリップボードを解放します -
- -
- -- 色を修正/修正 -
- -
- -- 出力に数式が含まれている場合、TeX形式とレンダリング形式の両方が表示され、コピーと読み取りが容易になります -
- -
- -- プロジェクトのコードを見るのが面倒?chatgptに整備されたプロジェクトを直接与えましょう -
- -
- -- 多数の大規模言語モデルの混合呼び出し(ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4) -
- -
- -多数の大規模言語モデルの混合呼び出し[huggingfaceテスト版](https://huggingface.co/spaces/qingxu98/academic-chatgpt-beta)(huggigface版はchatglmをサポートしていません) - - ---- - -## インストール-方法1:直接運転 (Windows、LinuxまたはMacOS) - -1. プロジェクトをダウンロードします。 -```sh -git clone https://github.com/binary-husky/chatgpt_academic.git -cd chatgpt_academic -``` - -2. API_KEYとプロキシ設定を構成する - -`config.py`で、海外のProxyとOpenAI API KEYを構成して説明します。 -``` -1.あなたが中国にいる場合、OpenAI APIをスムーズに使用するには海外プロキシを設定する必要があります。構成の詳細については、config.py(1.その中のUSE_PROXYをTrueに変更し、2.手順に従ってプロキシを変更する)を詳細に読んでください。 -2. OpenAI API KEYを構成する。OpenAIのウェブサイトでAPI KEYを取得してください。一旦API KEYを手に入れると、config.pyファイルで設定するだけです。 -3.プロキシネットワークに関連する問題(ネットワークタイムアウト、プロキシが動作しない)をhttps://github.com/binary-husky/chatgpt_academic/issues/1にまとめました。 -``` -(P.S. プログラム実行時にconfig.pyの隣にconfig_private.pyという名前のプライバシー設定ファイルを作成し、同じ名前の設定を上書きするconfig_private.pyが存在するかどうかを優先的に確認します。そのため、私たちの構成読み取りロジックを理解できる場合は、config.pyの隣にconfig_private.pyという名前の新しい設定ファイルを作成し、その中のconfig.pyから設定を移動してください。config_private.pyはgitで保守されていないため、プライバシー情報をより安全にすることができます。) - -3. 依存関係をインストールします。 -```sh -# 選択肢があります。 -python -m pip install -r requirements.txt - - -# (選択肢2) もしAnacondaを使用する場合、手順は同様です: -# (選択肢2.1) conda create -n gptac_venv python=3.11 -# (選択肢2.2) conda activate gptac_venv -# (選択肢2.3) python -m pip install -r requirements.txt - -# 注: 公式のpipソースまたはAlibabaのpipソースを使用してください。 別のpipソース(例:一部の大学のpip)は問題が発生する可能性があります。 一時的なソースの切り替え方法: -# python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ -``` - -もしあなたが清華ChatGLMをサポートする必要がある場合、さらに多くの依存関係をインストールする必要があります(Pythonに慣れない方やコンピューターの設定が十分でない方は、試みないことをお勧めします): -```sh -python -m pip install -r request_llm/requirements_chatglm.txt -``` - -4. 実行 -```sh -python main.py -``` - -5. 関数プラグインのテスト -``` -- Pythonプロジェクト分析のテスト - 入力欄に `./crazy_functions/test_project/python/dqn` と入力し、「Pythonプロジェクト全体の解析」をクリックします。 -- 自己コード解読のテスト - 「[マルチスレッドデモ] このプロジェクト自体を解析します(ソースを翻訳して解読します)」をクリックします。 -- 実験的な機能テンプレート関数のテスト(GPTが「今日の歴史」に何が起こったかを回答することが求められます)。この関数をテンプレートとして使用して、より複雑な機能を実装できます。 - 「[関数プラグインテンプレートデモ] 今日の歴史」をクリックします。 -- 関数プラグインエリアのドロップダウンメニューには他にも選択肢があります。 -``` - -## インストール方法2:Dockerを使用する(Linux) - -1. ChatGPTのみ(大多数の人にお勧めです) -``` sh -# プロジェクトのダウンロード -git clone https://github.com/binary-husky/chatgpt_academic.git -cd chatgpt_academic -# 海外プロキシとOpenAI API KEYの設定 -config.pyを任意のテキストエディタで編集する -# インストール -docker build -t gpt-academic . -# 実行 -docker run --rm -it --net=host gpt-academic - -# 関数プラグインのテスト -## 関数プラグインテンプレート関数のテスト(GPTが「今日の歴史」に何が起こったかを回答することが求められます)。この関数をテンプレートとして使用して、より複雑な機能を実装できます。 -「[関数プラグインテンプレートデモ] 今日の歴史」をクリックします。 -## Latexプロジェクトの要約を書くテスト -入力欄に./crazy_functions/test_project/latex/attentionと入力し、「テックス論文を読んで要約を書く」をクリックします。 -## Pythonプロジェクト分析のテスト -入力欄に./crazy_functions/test_project/python/dqnと入力し、[Pythonプロジェクトの全解析]をクリックします。 - -関数プラグインエリアのドロップダウンメニューには他にも選択肢があります。 -``` - -2. ChatGPT + ChatGLM(Dockerに非常に詳しい人+十分なコンピューター設定が必要) - - - -```sh -# Dockerfileの編集 -cd docs && nano Dockerfile+ChatGLM -# ビルド方法 -docker build -t gpt-academic --network=host -f Dockerfile+ChatGLM . -# 実行方法 (1) 直接実行: -docker run --rm -it --net=host --gpus=all gpt-academic -# 実行方法 (2) コンテナに入って調整する: -docker run --rm -it --net=host --gpus=all gpt-academic bash -``` - -## インストール方法3:その他のデプロイ方法 - -1. クラウドサーバーデプロイ -[デプロイwiki-1](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97) - -2. WSL2を使用 (Windows Subsystem for Linux) -[デプロイwiki-2](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2) - - -## インストール-プロキシ設定 -1. 通常の方法 -[プロキシを設定する](https://github.com/binary-husky/chatgpt_academic/issues/1) - -2. 初心者向けチュートリアル -[初心者向けチュートリアル](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BB%A3%E7%90%86%E8%BD%AF%E4%BB%B6%E9%97%AE%E9%A2%98%E7%9A%84%E6%96%B0%E6%89%8B%E8%A7%A3%E5%86%B3%E6%96%B9%E6%B3%95%EF%BC%88%E6%96%B9%E6%B3%95%E5%8F%AA%E9%80%82%E7%94%A8%E4%BA%8E%E6%96%B0%E6%89%8B%EF%BC%89) - - ---- - -## カスタムボタンの追加(学術ショートカットキー) - -`core_functional.py`を任意のテキストエディタで開き、以下のエントリーを追加し、プログラムを再起動してください。(ボタンが追加されて表示される場合、前置詞と後置詞はホット編集がサポートされているため、プログラムを再起動せずに即座に有効になります。) - -例: -``` -"超级英译中": { - # 前置詞 - あなたの要求を説明するために使用されます。翻訳、コードの説明、編集など。 - "Prefix": "以下のコンテンツを中国語に翻訳して、マークダウンテーブルを使用して専門用語を説明してください。\n\n", - - # 後置詞 - プレフィックスと共に使用すると、入力内容を引用符で囲むことができます。 - "Suffix": "", -}, -``` - -
- -
- - ---- - -## いくつかの機能の例 - -### 画像表示: - -
- -
- - -### プログラムが自己解析できる場合: - -
- -
- -
- -
- -### 他のPython/Cppプロジェクトの解析: - -
- -
- -
- -
- -### Latex論文の一括読解と要約生成 - -
- -
- -### 自動報告生成 - -
- - - -
- -### モジュール化された機能デザイン - -
- - -
- - -### ソースコードの英語翻訳 - -
- -
- -## Todo およびバージョン計画: -- version 3.2+ (todo): 関数プラグインがより多くのパラメーターインターフェースをサポートするようになります。 -- version 3.1: 複数のgptモデルを同時にクエリし、api2dをサポートし、複数のapikeyの負荷分散をサポートします。 -- version 3.0: chatglmおよび他の小型llmのサポート -- version 2.6: プラグイン構造を再構成し、相互作用性を高め、より多くのプラグインを追加しました。 -- version 2.5: 自己更新。総括的な大規模プロジェクトのソースコードをまとめた場合、テキストが長すぎる、トークンがオーバーフローする問題を解決します。 -- version 2.4: (1)PDF全文翻訳機能を追加。(2)入力エリアの位置を切り替える機能を追加。(3)垂直レイアウトオプションを追加。(4)マルチスレッド関数プラグインの最適化。 -- version 2.3: 多スレッドの相互作用性を向上させました。 -- version 2.2: 関数プラグインでホットリロードをサポート -- version 2.1: 折りたたみ式レイアウト -- version 2.0: モジュール化された関数プラグインを導入 -- version 1.0: 基本機能 - -## 参考および学習 - - -以下は中国語のマークダウンファイルです。日本語に翻訳してください。既存のマークダウンコマンドを変更しないでください: - -``` -多くの優秀なプロジェクトの設計を参考にしています。主なものは以下の通りです: - -# 参考プロジェクト1:ChuanhuChatGPTから多くのテクニックを借用 -https://github.com/GaiZhenbiao/ChuanhuChatGPT - -# 参考プロジェクト2:清華ChatGLM-6B: -https://github.com/THUDM/ChatGLM-6B -``` - diff --git a/spaces/eson/tokenizer-arena/vocab/bloomz_6b4_zh/__init__.py b/spaces/eson/tokenizer-arena/vocab/bloomz_6b4_zh/__init__.py deleted file mode 100644 index b1d3fd476d7855d24a7939055bc9f6c7497f96fb..0000000000000000000000000000000000000000 --- a/spaces/eson/tokenizer-arena/vocab/bloomz_6b4_zh/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ - -import os -from transformers import AutoTokenizer, BloomTokenizerFast - -CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) -TOKENIZER_DIR = os.path.join(CURRENT_DIR, "tokenizer") - -tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_DIR, trust_remote_code=True) - -# vocab_size = len(tokenizer.get_vocab()) -# vocab_size = tokenizer.vocab_size diff --git a/spaces/evanpierce/3D_Photo_Inpainting2/utils.py b/spaces/evanpierce/3D_Photo_Inpainting2/utils.py deleted file mode 100644 index 808e48b1979d16f32c050f43f1f6c0ca36d8d18b..0000000000000000000000000000000000000000 --- a/spaces/evanpierce/3D_Photo_Inpainting2/utils.py +++ /dev/null @@ -1,1416 +0,0 @@ -import os -import glob -import cv2 -import scipy.misc as misc -from skimage.transform import resize -import numpy as np -from functools import reduce -from operator import mul -import torch -from torch import nn -import matplotlib.pyplot as plt -import re -try: - import cynetworkx as netx -except ImportError: - import networkx as netx -from scipy.ndimage import gaussian_filter -from skimage.feature import canny -import collections -import shutil -import imageio -import copy -from matplotlib import pyplot as plt -from mpl_toolkits.mplot3d import Axes3D -import time -from scipy.interpolate import interp1d -from collections import namedtuple - -def path_planning(num_frames, x, y, z, path_type=''): - if path_type == 'straight-line': - corner_points = np.array([[0, 0, 0], [(0 + x) * 0.5, (0 + y) * 0.5, (0 + z) * 0.5], [x, y, z]]) - corner_t = np.linspace(0, 1, len(corner_points)) - t = np.linspace(0, 1, num_frames) - cs = interp1d(corner_t, corner_points, axis=0, kind='quadratic') - spline = cs(t) - xs, ys, zs = [xx.squeeze() for xx in np.split(spline, 3, 1)] - elif path_type == 'double-straight-line': - corner_points = np.array([[-x, -y, -z], [0, 0, 0], [x, y, z]]) - corner_t = np.linspace(0, 1, len(corner_points)) - t = np.linspace(0, 1, num_frames) - cs = interp1d(corner_t, corner_points, axis=0, kind='quadratic') - spline = cs(t) - xs, ys, zs = [xx.squeeze() for xx in np.split(spline, 3, 1)] - elif path_type == 'circle': - xs, ys, zs = [], [], [] - for frame_id, bs_shift_val in enumerate(np.arange(-2.0, 2.0, (4./num_frames))): - xs += [np.cos(bs_shift_val * np.pi) * 1 * x] - ys += [np.sin(bs_shift_val * np.pi) * 1 * y] - zs += [np.cos(bs_shift_val * np.pi/2.) * 1 * z] - xs, ys, zs = np.array(xs), np.array(ys), np.array(zs) - - return xs, ys, zs - -def open_small_mask(mask, context, open_iteration, kernel): - np_mask = mask.cpu().data.numpy().squeeze().astype(np.uint8) - raw_mask = np_mask.copy() - np_context = context.cpu().data.numpy().squeeze().astype(np.uint8) - np_input = np_mask + np_context - for _ in range(open_iteration): - np_input = cv2.erode(cv2.dilate(np_input, np.ones((kernel, kernel)), iterations=1), np.ones((kernel,kernel)), iterations=1) - np_mask[(np_input - np_context) > 0] = 1 - out_mask = torch.FloatTensor(np_mask).to(mask)[None, None, ...] - - return out_mask - -def filter_irrelevant_edge_new(self_edge, comp_edge, other_edges, other_edges_with_id, current_edge_id, context, depth, mesh, context_cc, spdb=False): - other_edges = other_edges.squeeze().astype(np.uint8) - other_edges_with_id = other_edges_with_id.squeeze() - self_edge = self_edge.squeeze() - dilate_bevel_self_edge = cv2.dilate((self_edge + comp_edge).astype(np.uint8), np.array([[1,1,1],[1,1,1],[1,1,1]]), iterations=1) - dilate_cross_self_edge = cv2.dilate((self_edge + comp_edge).astype(np.uint8), np.array([[0,1,0],[1,1,1],[0,1,0]]).astype(np.uint8), iterations=1) - edge_ids = np.unique(other_edges_with_id * context + (-1) * (1 - context)).astype(np.int) - end_depth_maps = np.zeros_like(self_edge) - self_edge_ids = np.sort(np.unique(other_edges_with_id[self_edge > 0]).astype(np.int)) - self_edge_ids = self_edge_ids[1:] if self_edge_ids.shape[0] > 0 and self_edge_ids[0] == -1 else self_edge_ids - self_comp_ids = np.sort(np.unique(other_edges_with_id[comp_edge > 0]).astype(np.int)) - self_comp_ids = self_comp_ids[1:] if self_comp_ids.shape[0] > 0 and self_comp_ids[0] == -1 else self_comp_ids - edge_ids = edge_ids[1:] if edge_ids[0] == -1 else edge_ids - other_edges_info = [] - extend_other_edges = np.zeros_like(other_edges) - if spdb is True: - f, ((ax1, ax2, ax3)) = plt.subplots(1, 3, sharex=True, sharey=True); ax1.imshow(self_edge); ax2.imshow(context); ax3.imshow(other_edges_with_id * context + (-1) * (1 - context)); plt.show() - import pdb; pdb.set_trace() - filter_self_edge = np.zeros_like(self_edge) - for self_edge_id in self_edge_ids: - filter_self_edge[other_edges_with_id == self_edge_id] = 1 - dilate_self_comp_edge = cv2.dilate(comp_edge, kernel=np.ones((3, 3)), iterations=2) - valid_self_comp_edge = np.zeros_like(comp_edge) - for self_comp_id in self_comp_ids: - valid_self_comp_edge[self_comp_id == other_edges_with_id] = 1 - self_comp_edge = dilate_self_comp_edge * valid_self_comp_edge - filter_self_edge = (filter_self_edge + self_comp_edge).clip(0, 1) - for edge_id in edge_ids: - other_edge_locs = (other_edges_with_id == edge_id).astype(np.uint8) - condition = (other_edge_locs * other_edges * context.astype(np.uint8)) - end_cross_point = dilate_cross_self_edge * condition * (1 - filter_self_edge) - end_bevel_point = dilate_bevel_self_edge * condition * (1 - filter_self_edge) - if end_bevel_point.max() != 0: - end_depth_maps[end_bevel_point != 0] = depth[end_bevel_point != 0] - if end_cross_point.max() == 0: - nxs, nys = np.where(end_bevel_point != 0) - for nx, ny in zip(nxs, nys): - bevel_node = [xx for xx in context_cc if xx[0] == nx and xx[1] == ny][0] - for ne in mesh.neighbors(bevel_node): - if other_edges_with_id[ne[0], ne[1]] > -1 and dilate_cross_self_edge[ne[0], ne[1]] > 0: - extend_other_edges[ne[0], ne[1]] = 1 - break - else: - other_edges[other_edges_with_id == edge_id] = 0 - other_edges = (other_edges + extend_other_edges).clip(0, 1) * context - - return other_edges, end_depth_maps, other_edges_info - -def clean_far_edge_new(input_edge, end_depth_maps, mask, context, global_mesh, info_on_pix, self_edge, inpaint_id, config): - mesh = netx.Graph() - hxs, hys = np.where(input_edge * mask > 0) - valid_near_edge = (input_edge != 0).astype(np.uint8) * context - valid_map = mask + context - invalid_edge_ids = [] - for hx, hy in zip(hxs, hys): - node = (hx ,hy) - mesh.add_node((hx, hy)) - eight_nes = [ne for ne in [(hx + 1, hy), (hx - 1, hy), (hx, hy + 1), (hx, hy - 1), \ - (hx + 1, hy + 1), (hx - 1, hy - 1), (hx - 1, hy + 1), (hx + 1, hy - 1)]\ - if 0 <= ne[0] < input_edge.shape[0] and 0 <= ne[1] < input_edge.shape[1] and 0 < input_edge[ne[0], ne[1]]] # or end_depth_maps[ne[0], ne[1]] != 0] - for ne in eight_nes: - mesh.add_edge(node, ne, length=np.hypot(ne[0] - hx, ne[1] - hy)) - if end_depth_maps[ne[0], ne[1]] != 0: - mesh.nodes[ne[0], ne[1]]['cnt'] = True - if end_depth_maps[ne[0], ne[1]] == 0: - import pdb; pdb.set_trace() - mesh.nodes[ne[0], ne[1]]['depth'] = end_depth_maps[ne[0], ne[1]] - elif mask[ne[0], ne[1]] != 1: - four_nes = [nne for nne in [(ne[0] + 1, ne[1]), (ne[0] - 1, ne[1]), (ne[0], ne[1] + 1), (ne[0], ne[1] - 1)]\ - if nne[0] < end_depth_maps.shape[0] and nne[0] >= 0 and nne[1] < end_depth_maps.shape[1] and nne[1] >= 0] - for nne in four_nes: - if end_depth_maps[nne[0], nne[1]] != 0: - mesh.add_edge(nne, ne, length=np.hypot(nne[0] - ne[0], nne[1] - ne[1])) - mesh.nodes[nne[0], nne[1]]['cnt'] = True - mesh.nodes[nne[0], nne[1]]['depth'] = end_depth_maps[nne[0], nne[1]] - ccs = [*netx.connected_components(mesh)] - end_pts = [] - for cc in ccs: - end_pts.append(set()) - for node in cc: - if mesh.nodes[node].get('cnt') is not None: - end_pts[-1].add((node[0], node[1], mesh.nodes[node]['depth'])) - predef_npaths = [None for _ in range(len(ccs))] - fpath_map = np.zeros_like(input_edge) - 1 - npath_map = np.zeros_like(input_edge) - 1 - npaths, fpaths = dict(), dict() - break_flag = False - end_idx = 0 - while end_idx < len(end_pts): - end_pt, cc = [*zip(end_pts, ccs)][end_idx] - end_idx += 1 - sorted_end_pt = [] - fpath = [] - iter_fpath = [] - if len(end_pt) > 2 or len(end_pt) == 0: - if len(end_pt) > 2: - continue - continue - if len(end_pt) == 2: - ravel_end = [*end_pt] - tmp_sub_mesh = mesh.subgraph(list(cc)).copy() - tmp_npath = [*netx.shortest_path(tmp_sub_mesh, (ravel_end[0][0], ravel_end[0][1]), (ravel_end[1][0], ravel_end[1][1]), weight='length')] - fpath_map1, npath_map1, disp_diff1 = plan_path(mesh, info_on_pix, cc, ravel_end[0:1], global_mesh, input_edge, mask, valid_map, inpaint_id, npath_map=None, fpath_map=None, npath=tmp_npath) - fpath_map2, npath_map2, disp_diff2 = plan_path(mesh, info_on_pix, cc, ravel_end[1:2], global_mesh, input_edge, mask, valid_map, inpaint_id, npath_map=None, fpath_map=None, npath=tmp_npath) - tmp_disp_diff = [disp_diff1, disp_diff2] - self_end = [] - edge_len = [] - ds_edge = cv2.dilate(self_edge.astype(np.uint8), np.ones((3, 3)), iterations=1) - if ds_edge[ravel_end[0][0], ravel_end[0][1]] > 0: - self_end.append(1) - else: - self_end.append(0) - if ds_edge[ravel_end[1][0], ravel_end[1][1]] > 0: - self_end.append(1) - else: - self_end.append(0) - edge_len = [np.count_nonzero(npath_map1), np.count_nonzero(npath_map2)] - sorted_end_pts = [xx[0] for xx in sorted(zip(ravel_end, self_end, edge_len, [disp_diff1, disp_diff2]), key=lambda x: (x[1], x[2]), reverse=True)] - re_npath_map1, re_fpath_map1 = (npath_map1 != -1).astype(np.uint8), (fpath_map1 != -1).astype(np.uint8) - re_npath_map2, re_fpath_map2 = (npath_map2 != -1).astype(np.uint8), (fpath_map2 != -1).astype(np.uint8) - if np.count_nonzero(re_npath_map1 * re_npath_map2 * mask) / \ - (np.count_nonzero((re_npath_map1 + re_npath_map2) * mask) + 1e-6) > 0.5\ - and np.count_nonzero(re_fpath_map1 * re_fpath_map2 * mask) / \ - (np.count_nonzero((re_fpath_map1 + re_fpath_map2) * mask) + 1e-6) > 0.5\ - and tmp_disp_diff[0] != -1 and tmp_disp_diff[1] != -1: - my_fpath_map, my_npath_map, npath, fpath = \ - plan_path_e2e(mesh, cc, sorted_end_pts, global_mesh, input_edge, mask, valid_map, inpaint_id, npath_map=None, fpath_map=None) - npath_map[my_npath_map != -1] = my_npath_map[my_npath_map != -1] - fpath_map[my_fpath_map != -1] = my_fpath_map[my_fpath_map != -1] - if len(fpath) > 0: - edge_id = global_mesh.nodes[[*sorted_end_pts][0]]['edge_id'] - fpaths[edge_id] = fpath - npaths[edge_id] = npath - invalid_edge_ids.append(edge_id) - else: - if tmp_disp_diff[0] != -1: - ratio_a = tmp_disp_diff[0] / (np.sum(tmp_disp_diff) + 1e-8) - else: - ratio_a = 0 - if tmp_disp_diff[1] != -1: - ratio_b = tmp_disp_diff[1] / (np.sum(tmp_disp_diff) + 1e-8) - else: - ratio_b = 0 - npath_len = len(tmp_npath) - if npath_len > config['depth_edge_dilate_2'] * 2: - npath_len = npath_len - (config['depth_edge_dilate_2'] * 1) - tmp_npath_a = tmp_npath[:int(np.floor(npath_len * ratio_a))] - tmp_npath_b = tmp_npath[::-1][:int(np.floor(npath_len * ratio_b))] - tmp_merge = [] - if len(tmp_npath_a) > 0 and sorted_end_pts[0][0] == tmp_npath_a[0][0] and sorted_end_pts[0][1] == tmp_npath_a[0][1]: - if len(tmp_npath_a) > 0 and mask[tmp_npath_a[-1][0], tmp_npath_a[-1][1]] > 0: - tmp_merge.append([sorted_end_pts[:1], tmp_npath_a]) - if len(tmp_npath_b) > 0 and mask[tmp_npath_b[-1][0], tmp_npath_b[-1][1]] > 0: - tmp_merge.append([sorted_end_pts[1:2], tmp_npath_b]) - elif len(tmp_npath_b) > 0 and sorted_end_pts[0][0] == tmp_npath_b[0][0] and sorted_end_pts[0][1] == tmp_npath_b[0][1]: - if len(tmp_npath_b) > 0 and mask[tmp_npath_b[-1][0], tmp_npath_b[-1][1]] > 0: - tmp_merge.append([sorted_end_pts[:1], tmp_npath_b]) - if len(tmp_npath_a) > 0 and mask[tmp_npath_a[-1][0], tmp_npath_a[-1][1]] > 0: - tmp_merge.append([sorted_end_pts[1:2], tmp_npath_a]) - for tmp_idx in range(len(tmp_merge)): - if len(tmp_merge[tmp_idx][1]) == 0: - continue - end_pts.append(tmp_merge[tmp_idx][0]) - ccs.append(set(tmp_merge[tmp_idx][1])) - if len(end_pt) == 1: - sub_mesh = mesh.subgraph(list(cc)).copy() - pnodes = netx.periphery(sub_mesh) - if len(end_pt) == 1: - ends = [*end_pt] - elif len(sorted_end_pt) == 1: - ends = [*sorted_end_pt] - else: - import pdb; pdb.set_trace() - try: - edge_id = global_mesh.nodes[ends[0]]['edge_id'] - except: - import pdb; pdb.set_trace() - pnodes = sorted(pnodes, - key=lambda x: np.hypot((x[0] - ends[0][0]), (x[1] - ends[0][1])), - reverse=True)[0] - npath = [*netx.shortest_path(sub_mesh, (ends[0][0], ends[0][1]), pnodes, weight='length')] - for np_node in npath: - npath_map[np_node[0], np_node[1]] = edge_id - fpath = [] - if global_mesh.nodes[ends[0]].get('far') is None: - print("None far") - else: - fnodes = global_mesh.nodes[ends[0]].get('far') - dmask = mask + 0 - did = 0 - while True: - did += 1 - dmask = cv2.dilate(dmask, np.ones((3, 3)), iterations=1) - if did > 3: - break - ffnode = [fnode for fnode in fnodes if (dmask[fnode[0], fnode[1]] > 0 and mask[fnode[0], fnode[1]] == 0 and\ - global_mesh.nodes[fnode].get('inpaint_id') != inpaint_id + 1)] - if len(ffnode) > 0: - fnode = ffnode[0] - break - if len(ffnode) == 0: - continue - fpath.append((fnode[0], fnode[1])) - barrel_dir = np.array([[1, 0], [1, 1], [0, 1], [-1, 1], [-1, 0], [-1, -1], [0, -1], [1, -1]]) - n2f_dir = (int(fnode[0] - npath[0][0]), int(fnode[1] - npath[0][1])) - while True: - if barrel_dir[0, 0] == n2f_dir[0] and barrel_dir[0, 1] == n2f_dir[1]: - n2f_barrel = barrel_dir.copy() - break - barrel_dir = np.roll(barrel_dir, 1, axis=0) - for step in range(0, len(npath)): - if step == 0: - continue - elif step == 1: - next_dir = (npath[step][0] - npath[step - 1][0], npath[step][1] - npath[step - 1][1]) - while True: - if barrel_dir[0, 0] == next_dir[0] and barrel_dir[0, 1] == next_dir[1]: - next_barrel = barrel_dir.copy() - break - barrel_dir = np.roll(barrel_dir, 1, axis=0) - barrel_pair = np.stack((n2f_barrel, next_barrel), axis=0) - n2f_dir = (barrel_pair[0, 0, 0], barrel_pair[0, 0, 1]) - elif step > 1: - next_dir = (npath[step][0] - npath[step - 1][0], npath[step][1] - npath[step - 1][1]) - while True: - if barrel_pair[1, 0, 0] == next_dir[0] and barrel_pair[1, 0, 1] == next_dir[1]: - next_barrel = barrel_pair.copy() - break - barrel_pair = np.roll(barrel_pair, 1, axis=1) - n2f_dir = (barrel_pair[0, 0, 0], barrel_pair[0, 0, 1]) - new_locs = [] - if abs(n2f_dir[0]) == 1: - new_locs.append((npath[step][0] + n2f_dir[0], npath[step][1])) - if abs(n2f_dir[1]) == 1: - new_locs.append((npath[step][0], npath[step][1] + n2f_dir[1])) - if len(new_locs) > 1: - new_locs = sorted(new_locs, key=lambda xx: np.hypot((xx[0] - fpath[-1][0]), (xx[1] - fpath[-1][1]))) - break_flag = False - for new_loc in new_locs: - new_loc_nes = [xx for xx in [(new_loc[0] + 1, new_loc[1]), (new_loc[0] - 1, new_loc[1]), - (new_loc[0], new_loc[1] + 1), (new_loc[0], new_loc[1] - 1)]\ - if xx[0] >= 0 and xx[0] < fpath_map.shape[0] and xx[1] >= 0 and xx[1] < fpath_map.shape[1]] - if np.all([(fpath_map[nlne[0], nlne[1]] == -1) for nlne in new_loc_nes]) != True: - break - if npath_map[new_loc[0], new_loc[1]] != -1: - if npath_map[new_loc[0], new_loc[1]] != edge_id: - break_flag = True - break - else: - continue - if valid_map[new_loc[0], new_loc[1]] == 0: - break_flag = True - break - fpath.append(new_loc) - if break_flag is True: - break - if step != len(npath) - 1: - for xx in npath[step:]: - if npath_map[xx[0], xx[1]] == edge_id: - npath_map[xx[0], xx[1]] = -1 - npath = npath[:step] - if len(fpath) > 0: - for fp_node in fpath: - fpath_map[fp_node[0], fp_node[1]] = edge_id - fpaths[edge_id] = fpath - npaths[edge_id] = npath - fpath_map[valid_near_edge != 0] = -1 - if len(fpath) > 0: - iter_fpath = copy.deepcopy(fpaths[edge_id]) - for node in iter_fpath: - if valid_near_edge[node[0], node[1]] != 0: - fpaths[edge_id].remove(node) - - return fpath_map, npath_map, False, npaths, fpaths, invalid_edge_ids - -def plan_path_e2e(mesh, cc, end_pts, global_mesh, input_edge, mask, valid_map, inpaint_id, npath_map=None, fpath_map=None): - my_npath_map = np.zeros_like(input_edge) - 1 - my_fpath_map = np.zeros_like(input_edge) - 1 - sub_mesh = mesh.subgraph(list(cc)).copy() - ends_1, ends_2 = end_pts[0], end_pts[1] - edge_id = global_mesh.nodes[ends_1]['edge_id'] - npath = [*netx.shortest_path(sub_mesh, (ends_1[0], ends_1[1]), (ends_2[0], ends_2[1]), weight='length')] - for np_node in npath: - my_npath_map[np_node[0], np_node[1]] = edge_id - fpath = [] - if global_mesh.nodes[ends_1].get('far') is None: - print("None far") - else: - fnodes = global_mesh.nodes[ends_1].get('far') - dmask = mask + 0 - while True: - dmask = cv2.dilate(dmask, np.ones((3, 3)), iterations=1) - ffnode = [fnode for fnode in fnodes if (dmask[fnode[0], fnode[1]] > 0 and mask[fnode[0], fnode[1]] == 0 and\ - global_mesh.nodes[fnode].get('inpaint_id') != inpaint_id + 1)] - if len(ffnode) > 0: - fnode = ffnode[0] - break - e_fnodes = global_mesh.nodes[ends_2].get('far') - dmask = mask + 0 - while True: - dmask = cv2.dilate(dmask, np.ones((3, 3)), iterations=1) - e_ffnode = [e_fnode for e_fnode in e_fnodes if (dmask[e_fnode[0], e_fnode[1]] > 0 and mask[e_fnode[0], e_fnode[1]] == 0 and\ - global_mesh.nodes[e_fnode].get('inpaint_id') != inpaint_id + 1)] - if len(e_ffnode) > 0: - e_fnode = e_ffnode[0] - break - fpath.append((fnode[0], fnode[1])) - if len(e_ffnode) == 0 or len(ffnode) == 0: - return my_npath_map, my_fpath_map, [], [] - barrel_dir = np.array([[1, 0], [1, 1], [0, 1], [-1, 1], [-1, 0], [-1, -1], [0, -1], [1, -1]]) - n2f_dir = (int(fnode[0] - npath[0][0]), int(fnode[1] - npath[0][1])) - while True: - if barrel_dir[0, 0] == n2f_dir[0] and barrel_dir[0, 1] == n2f_dir[1]: - n2f_barrel = barrel_dir.copy() - break - barrel_dir = np.roll(barrel_dir, 1, axis=0) - for step in range(0, len(npath)): - if step == 0: - continue - elif step == 1: - next_dir = (npath[step][0] - npath[step - 1][0], npath[step][1] - npath[step - 1][1]) - while True: - if barrel_dir[0, 0] == next_dir[0] and barrel_dir[0, 1] == next_dir[1]: - next_barrel = barrel_dir.copy() - break - barrel_dir = np.roll(barrel_dir, 1, axis=0) - barrel_pair = np.stack((n2f_barrel, next_barrel), axis=0) - n2f_dir = (barrel_pair[0, 0, 0], barrel_pair[0, 0, 1]) - elif step > 1: - next_dir = (npath[step][0] - npath[step - 1][0], npath[step][1] - npath[step - 1][1]) - while True: - if barrel_pair[1, 0, 0] == next_dir[0] and barrel_pair[1, 0, 1] == next_dir[1]: - next_barrel = barrel_pair.copy() - break - barrel_pair = np.roll(barrel_pair, 1, axis=1) - n2f_dir = (barrel_pair[0, 0, 0], barrel_pair[0, 0, 1]) - new_locs = [] - if abs(n2f_dir[0]) == 1: - new_locs.append((npath[step][0] + n2f_dir[0], npath[step][1])) - if abs(n2f_dir[1]) == 1: - new_locs.append((npath[step][0], npath[step][1] + n2f_dir[1])) - if len(new_locs) > 1: - new_locs = sorted(new_locs, key=lambda xx: np.hypot((xx[0] - fpath[-1][0]), (xx[1] - fpath[-1][1]))) - break_flag = False - for new_loc in new_locs: - new_loc_nes = [xx for xx in [(new_loc[0] + 1, new_loc[1]), (new_loc[0] - 1, new_loc[1]), - (new_loc[0], new_loc[1] + 1), (new_loc[0], new_loc[1] - 1)]\ - if xx[0] >= 0 and xx[0] < my_fpath_map.shape[0] and xx[1] >= 0 and xx[1] < my_fpath_map.shape[1]] - if fpath_map is not None and np.sum([fpath_map[nlne[0], nlne[1]] for nlne in new_loc_nes]) != 0: - break_flag = True - break - if my_npath_map[new_loc[0], new_loc[1]] != -1: - continue - if npath_map is not None and npath_map[new_loc[0], new_loc[1]] != edge_id: - break_flag = True - break - fpath.append(new_loc) - if break_flag is True: - break - if (e_fnode[0], e_fnode[1]) not in fpath: - fpath.append((e_fnode[0], e_fnode[1])) - if step != len(npath) - 1: - for xx in npath[step:]: - if my_npath_map[xx[0], xx[1]] == edge_id: - my_npath_map[xx[0], xx[1]] = -1 - npath = npath[:step] - if len(fpath) > 0: - for fp_node in fpath: - my_fpath_map[fp_node[0], fp_node[1]] = edge_id - - return my_fpath_map, my_npath_map, npath, fpath - -def plan_path(mesh, info_on_pix, cc, end_pt, global_mesh, input_edge, mask, valid_map, inpaint_id, npath_map=None, fpath_map=None, npath=None): - my_npath_map = np.zeros_like(input_edge) - 1 - my_fpath_map = np.zeros_like(input_edge) - 1 - sub_mesh = mesh.subgraph(list(cc)).copy() - pnodes = netx.periphery(sub_mesh) - ends = [*end_pt] - edge_id = global_mesh.nodes[ends[0]]['edge_id'] - pnodes = sorted(pnodes, - key=lambda x: np.hypot((x[0] - ends[0][0]), (x[1] - ends[0][1])), - reverse=True)[0] - if npath is None: - npath = [*netx.shortest_path(sub_mesh, (ends[0][0], ends[0][1]), pnodes, weight='length')] - else: - if (ends[0][0], ends[0][1]) == npath[0]: - npath = npath - elif (ends[0][0], ends[0][1]) == npath[-1]: - npath = npath[::-1] - else: - import pdb; pdb.set_trace() - for np_node in npath: - my_npath_map[np_node[0], np_node[1]] = edge_id - fpath = [] - if global_mesh.nodes[ends[0]].get('far') is None: - print("None far") - else: - fnodes = global_mesh.nodes[ends[0]].get('far') - dmask = mask + 0 - did = 0 - while True: - did += 1 - if did > 3: - return my_fpath_map, my_npath_map, -1 - dmask = cv2.dilate(dmask, np.ones((3, 3)), iterations=1) - ffnode = [fnode for fnode in fnodes if (dmask[fnode[0], fnode[1]] > 0 and mask[fnode[0], fnode[1]] == 0 and\ - global_mesh.nodes[fnode].get('inpaint_id') != inpaint_id + 1)] - if len(ffnode) > 0: - fnode = ffnode[0] - break - - fpath.append((fnode[0], fnode[1])) - disp_diff = 0. - for n_loc in npath: - if mask[n_loc[0], n_loc[1]] != 0: - disp_diff = abs(abs(1. / info_on_pix[(n_loc[0], n_loc[1])][0]['depth']) - abs(1. / ends[0][2])) - break - barrel_dir = np.array([[1, 0], [1, 1], [0, 1], [-1, 1], [-1, 0], [-1, -1], [0, -1], [1, -1]]) - n2f_dir = (int(fnode[0] - npath[0][0]), int(fnode[1] - npath[0][1])) - while True: - if barrel_dir[0, 0] == n2f_dir[0] and barrel_dir[0, 1] == n2f_dir[1]: - n2f_barrel = barrel_dir.copy() - break - barrel_dir = np.roll(barrel_dir, 1, axis=0) - for step in range(0, len(npath)): - if step == 0: - continue - elif step == 1: - next_dir = (npath[step][0] - npath[step - 1][0], npath[step][1] - npath[step - 1][1]) - while True: - if barrel_dir[0, 0] == next_dir[0] and barrel_dir[0, 1] == next_dir[1]: - next_barrel = barrel_dir.copy() - break - barrel_dir = np.roll(barrel_dir, 1, axis=0) - barrel_pair = np.stack((n2f_barrel, next_barrel), axis=0) - n2f_dir = (barrel_pair[0, 0, 0], barrel_pair[0, 0, 1]) - elif step > 1: - next_dir = (npath[step][0] - npath[step - 1][0], npath[step][1] - npath[step - 1][1]) - while True: - if barrel_pair[1, 0, 0] == next_dir[0] and barrel_pair[1, 0, 1] == next_dir[1]: - next_barrel = barrel_pair.copy() - break - barrel_pair = np.roll(barrel_pair, 1, axis=1) - n2f_dir = (barrel_pair[0, 0, 0], barrel_pair[0, 0, 1]) - new_locs = [] - if abs(n2f_dir[0]) == 1: - new_locs.append((npath[step][0] + n2f_dir[0], npath[step][1])) - if abs(n2f_dir[1]) == 1: - new_locs.append((npath[step][0], npath[step][1] + n2f_dir[1])) - if len(new_locs) > 1: - new_locs = sorted(new_locs, key=lambda xx: np.hypot((xx[0] - fpath[-1][0]), (xx[1] - fpath[-1][1]))) - break_flag = False - for new_loc in new_locs: - new_loc_nes = [xx for xx in [(new_loc[0] + 1, new_loc[1]), (new_loc[0] - 1, new_loc[1]), - (new_loc[0], new_loc[1] + 1), (new_loc[0], new_loc[1] - 1)]\ - if xx[0] >= 0 and xx[0] < my_fpath_map.shape[0] and xx[1] >= 0 and xx[1] < my_fpath_map.shape[1]] - if fpath_map is not None and np.all([(fpath_map[nlne[0], nlne[1]] == -1) for nlne in new_loc_nes]) != True: - break_flag = True - break - if np.all([(my_fpath_map[nlne[0], nlne[1]] == -1) for nlne in new_loc_nes]) != True: - break_flag = True - break - if my_npath_map[new_loc[0], new_loc[1]] != -1: - continue - if npath_map is not None and npath_map[new_loc[0], new_loc[1]] != edge_id: - break_flag = True - break - if valid_map[new_loc[0], new_loc[1]] == 0: - break_flag = True - break - fpath.append(new_loc) - if break_flag is True: - break - if step != len(npath) - 1: - for xx in npath[step:]: - if my_npath_map[xx[0], xx[1]] == edge_id: - my_npath_map[xx[0], xx[1]] = -1 - npath = npath[:step] - if len(fpath) > 0: - for fp_node in fpath: - my_fpath_map[fp_node[0], fp_node[1]] = edge_id - - return my_fpath_map, my_npath_map, disp_diff - -def refresh_node(old_node, old_feat, new_node, new_feat, mesh, stime=False): - mesh.add_node(new_node) - mesh.nodes[new_node].update(new_feat) - mesh.nodes[new_node].update(old_feat) - for ne in mesh.neighbors(old_node): - mesh.add_edge(new_node, ne) - if mesh.nodes[new_node].get('far') is not None: - tmp_far_nodes = mesh.nodes[new_node]['far'] - for far_node in tmp_far_nodes: - if mesh.has_node(far_node) is False: - mesh.nodes[new_node]['far'].remove(far_node) - continue - if mesh.nodes[far_node].get('near') is not None: - for idx in range(len(mesh.nodes[far_node].get('near'))): - if mesh.nodes[far_node]['near'][idx][0] == new_node[0] and mesh.nodes[far_node]['near'][idx][1] == new_node[1]: - if len(mesh.nodes[far_node]['near'][idx]) == len(old_node): - mesh.nodes[far_node]['near'][idx] = new_node - if mesh.nodes[new_node].get('near') is not None: - tmp_near_nodes = mesh.nodes[new_node]['near'] - for near_node in tmp_near_nodes: - if mesh.has_node(near_node) is False: - mesh.nodes[new_node]['near'].remove(near_node) - continue - if mesh.nodes[near_node].get('far') is not None: - for idx in range(len(mesh.nodes[near_node].get('far'))): - if mesh.nodes[near_node]['far'][idx][0] == new_node[0] and mesh.nodes[near_node]['far'][idx][1] == new_node[1]: - if len(mesh.nodes[near_node]['far'][idx]) == len(old_node): - mesh.nodes[near_node]['far'][idx] = new_node - if new_node != old_node: - mesh.remove_node(old_node) - if stime is False: - return mesh - else: - return mesh, None, None - - -def create_placeholder(context, mask, depth, fpath_map, npath_map, mesh, inpaint_id, edge_ccs, extend_edge_cc, all_edge_maps, self_edge_id): - add_node_time = 0 - add_edge_time = 0 - add_far_near_time = 0 - valid_area = context + mask - H, W = mesh.graph['H'], mesh.graph['W'] - edge_cc = edge_ccs[self_edge_id] - num_com = len(edge_cc) + len(extend_edge_cc) - hxs, hys = np.where(mask > 0) - for hx, hy in zip(hxs, hys): - mesh.add_node((hx, hy), inpaint_id=inpaint_id + 1, num_context=num_com) - for hx, hy in zip(hxs, hys): - four_nes = [(x, y) for x, y in [(hx + 1, hy), (hx - 1, hy), (hx, hy + 1), (hx, hy - 1)] if\ - 0 <= x < mesh.graph['H'] and 0 <= y < mesh.graph['W'] and valid_area[x, y] != 0] - for ne in four_nes: - if mask[ne[0], ne[1]] != 0: - if not mesh.has_edge((hx, hy), ne): - mesh.add_edge((hx, hy), ne) - elif depth[ne[0], ne[1]] != 0: - if mesh.has_node((ne[0], ne[1], depth[ne[0], ne[1]])) and\ - not mesh.has_edge((hx, hy), (ne[0], ne[1], depth[ne[0], ne[1]])): - mesh.add_edge((hx, hy), (ne[0], ne[1], depth[ne[0], ne[1]])) - else: - print("Undefined context node.") - import pdb; pdb.set_trace() - near_ids = np.unique(npath_map) - if near_ids[0] == -1: near_ids = near_ids[1:] - for near_id in near_ids: - hxs, hys = np.where((fpath_map == near_id) & (mask > 0)) - if hxs.shape[0] > 0: - mesh.graph['max_edge_id'] = mesh.graph['max_edge_id'] + 1 - else: - break - for hx, hy in zip(hxs, hys): - mesh.nodes[(hx, hy)]['edge_id'] = int(round(mesh.graph['max_edge_id'])) - four_nes = [(x, y) for x, y in [(hx + 1, hy), (hx - 1, hy), (hx, hy + 1), (hx, hy - 1)] if\ - x < mesh.graph['H'] and x >= 0 and y < mesh.graph['W'] and y >= 0 and npath_map[x, y] == near_id] - for xx in four_nes: - xx_n = copy.deepcopy(xx) - if not mesh.has_node(xx_n): - if mesh.has_node((xx_n[0], xx_n[1], depth[xx_n[0], xx_n[1]])): - xx_n = (xx_n[0], xx_n[1], depth[xx_n[0], xx_n[1]]) - if mesh.has_edge((hx, hy), xx_n): - # pass - mesh.remove_edge((hx, hy), xx_n) - if mesh.nodes[(hx, hy)].get('near') is None: - mesh.nodes[(hx, hy)]['near'] = [] - mesh.nodes[(hx, hy)]['near'].append(xx_n) - connect_point_exception = set() - hxs, hys = np.where((npath_map == near_id) & (all_edge_maps > -1)) - for hx, hy in zip(hxs, hys): - unknown_id = int(round(all_edge_maps[hx, hy])) - if unknown_id != near_id and unknown_id != self_edge_id: - unknown_node = set([xx for xx in edge_ccs[unknown_id] if xx[0] == hx and xx[1] == hy]) - connect_point_exception |= unknown_node - hxs, hys = np.where((npath_map == near_id) & (mask > 0)) - if hxs.shape[0] > 0: - mesh.graph['max_edge_id'] = mesh.graph['max_edge_id'] + 1 - else: - break - for hx, hy in zip(hxs, hys): - mesh.nodes[(hx, hy)]['edge_id'] = int(round(mesh.graph['max_edge_id'])) - mesh.nodes[(hx, hy)]['connect_point_id'] = int(round(near_id)) - mesh.nodes[(hx, hy)]['connect_point_exception'] = connect_point_exception - four_nes = [(x, y) for x, y in [(hx + 1, hy), (hx - 1, hy), (hx, hy + 1), (hx, hy - 1)] if\ - x < mesh.graph['H'] and x >= 0 and y < mesh.graph['W'] and y >= 0 and fpath_map[x, y] == near_id] - for xx in four_nes: - xx_n = copy.deepcopy(xx) - if not mesh.has_node(xx_n): - if mesh.has_node((xx_n[0], xx_n[1], depth[xx_n[0], xx_n[1]])): - xx_n = (xx_n[0], xx_n[1], depth[xx_n[0], xx_n[1]]) - if mesh.has_edge((hx, hy), xx_n): - mesh.remove_edge((hx, hy), xx_n) - if mesh.nodes[(hx, hy)].get('far') is None: - mesh.nodes[(hx, hy)]['far'] = [] - mesh.nodes[(hx, hy)]['far'].append(xx_n) - - return mesh, add_node_time, add_edge_time, add_far_near_time - -def clean_far_edge(mask_edge, mask_edge_with_id, context_edge, mask, info_on_pix, global_mesh, anchor): - if isinstance(mask_edge, torch.Tensor): - if mask_edge.is_cuda: - mask_edge = mask_edge.cpu() - mask_edge = mask_edge.data - mask_edge = mask_edge.numpy() - if isinstance(context_edge, torch.Tensor): - if context_edge.is_cuda: - context_edge = context_edge.cpu() - context_edge = context_edge.data - context_edge = context_edge.numpy() - if isinstance(mask, torch.Tensor): - if mask.is_cuda: - mask = mask.cpu() - mask = mask.data - mask = mask.numpy() - mask = mask.squeeze() - mask_edge = mask_edge.squeeze() - context_edge = context_edge.squeeze() - valid_near_edge = np.zeros_like(mask_edge) - far_edge = np.zeros_like(mask_edge) - far_edge_with_id = np.ones_like(mask_edge) * -1 - near_edge_with_id = np.ones_like(mask_edge) * -1 - uncleaned_far_edge = np.zeros_like(mask_edge) - # Detect if there is any valid pixel mask_edge, if not ==> return default value - if mask_edge.sum() == 0: - return far_edge, uncleaned_far_edge, far_edge_with_id, near_edge_with_id - mask_edge_ids = dict(collections.Counter(mask_edge_with_id.flatten())).keys() - for edge_id in mask_edge_ids: - if edge_id < 0: - continue - specific_edge_map = (mask_edge_with_id == edge_id).astype(np.uint8) - _, sub_specific_edge_maps = cv2.connectedComponents(specific_edge_map.astype(np.uint8), connectivity=8) - for sub_edge_id in range(1, sub_specific_edge_maps.max() + 1): - specific_edge_map = (sub_specific_edge_maps == sub_edge_id).astype(np.uint8) - edge_pxs, edge_pys = np.where(specific_edge_map > 0) - edge_mesh = netx.Graph() - for edge_px, edge_py in zip(edge_pxs, edge_pys): - edge_mesh.add_node((edge_px, edge_py)) - for ex in [edge_px-1, edge_px, edge_px+1]: - for ey in [edge_py-1, edge_py, edge_py+1]: - if edge_px == ex and edge_py == ey: - continue - if ex < 0 or ex >= specific_edge_map.shape[0] or ey < 0 or ey >= specific_edge_map.shape[1]: - continue - if specific_edge_map[ex, ey] == 1: - if edge_mesh.has_node((ex, ey)): - edge_mesh.add_edge((ex, ey), (edge_px, edge_py)) - periphery_nodes = netx.periphery(edge_mesh) - path_diameter = netx.diameter(edge_mesh) - start_near_node = None - for node_s in periphery_nodes: - for node_e in periphery_nodes: - if node_s != node_e: - if netx.shortest_path_length(edge_mesh, node_s, node_e) == path_diameter: - if np.any(context_edge[node_s[0]-1:node_s[0]+2, node_s[1]-1:node_s[1]+2].flatten()): - start_near_node = (node_s[0], node_s[1]) - end_near_node = (node_e[0], node_e[1]) - break - if np.any(context_edge[node_e[0]-1:node_e[0]+2, node_e[1]-1:node_e[1]+2].flatten()): - start_near_node = (node_e[0], node_e[1]) - end_near_node = (node_s[0], node_s[1]) - break - if start_near_node is not None: - break - if start_near_node is None: - continue - new_specific_edge_map = np.zeros_like(mask) - for path_node in netx.shortest_path(edge_mesh, start_near_node, end_near_node): - new_specific_edge_map[path_node[0], path_node[1]] = 1 - context_near_pxs, context_near_pys = np.where(context_edge[start_near_node[0]-1:start_near_node[0]+2, start_near_node[1]-1:start_near_node[1]+2] > 0) - distance = np.abs((context_near_pxs - 1)) + np.abs((context_near_pys - 1)) - if (np.where(distance == distance.min())[0].shape[0]) > 1: - closest_pxs = context_near_pxs[np.where(distance == distance.min())[0]] - closest_pys = context_near_pys[np.where(distance == distance.min())[0]] - closest_depths = [] - for closest_px, closest_py in zip(closest_pxs, closest_pys): - if info_on_pix.get((closest_px + start_near_node[0] - 1 + anchor[0], closest_py + start_near_node[1] - 1 + anchor[2])) is not None: - for info in info_on_pix.get((closest_px + start_near_node[0] - 1 + anchor[0], closest_py + start_near_node[1] - 1 + anchor[2])): - if info['synthesis'] is False: - closest_depths.append(abs(info['depth'])) - context_near_px, context_near_py = closest_pxs[np.array(closest_depths).argmax()], closest_pys[np.array(closest_depths).argmax()] - else: - context_near_px, context_near_py = context_near_pxs[distance.argmin()], context_near_pys[distance.argmin()] - context_near_node = (start_near_node[0]-1 + context_near_px, start_near_node[1]-1 + context_near_py) - far_node_list = [] - global_context_near_node = (context_near_node[0] + anchor[0], context_near_node[1] + anchor[2]) - if info_on_pix.get(global_context_near_node) is not None: - for info in info_on_pix[global_context_near_node]: - if info['synthesis'] is False: - context_near_node_3d = (global_context_near_node[0], global_context_near_node[1], info['depth']) - if global_mesh.nodes[context_near_node_3d].get('far') is not None: - for far_node in global_mesh.nodes[context_near_node_3d].get('far'): - far_node = (far_node[0] - anchor[0], far_node[1] - anchor[2], far_node[2]) - if mask[far_node[0], far_node[1]] == 0: - far_node_list.append([far_node[0], far_node[1]]) - if len(far_node_list) > 0: - far_nodes_dist = np.sum(np.abs(np.array(far_node_list) - np.array([[edge_px, edge_py]])), axis=1) - context_far_node = tuple(far_node_list[far_nodes_dist.argmin()]) - corresponding_far_edge = np.zeros_like(mask_edge) - corresponding_far_edge[context_far_node[0], context_far_node[1]] = 1 - surround_map = cv2.dilate(new_specific_edge_map.astype(np.uint8), - np.array([[1,1,1],[1,1,1],[1,1,1]]).astype(np.uint8), - iterations=1) - specific_edge_map_wo_end_pt = new_specific_edge_map.copy() - specific_edge_map_wo_end_pt[end_near_node[0], end_near_node[1]] = 0 - surround_map_wo_end_pt = cv2.dilate(specific_edge_map_wo_end_pt.astype(np.uint8), - np.array([[1,1,1],[1,1,1],[1,1,1]]).astype(np.uint8), - iterations=1) - surround_map_wo_end_pt[new_specific_edge_map > 0] = 0 - surround_map_wo_end_pt[context_near_node[0], context_near_node[1]] = 0 - surround_map = surround_map_wo_end_pt.copy() - _, far_edge_cc = cv2.connectedComponents(surround_map.astype(np.uint8), connectivity=4) - start_far_node = None - accompany_far_node = None - if surround_map[context_far_node[0], context_far_node[1]] == 1: - start_far_node = context_far_node - else: - four_nes = [(context_far_node[0] - 1, context_far_node[1]), - (context_far_node[0] + 1, context_far_node[1]), - (context_far_node[0], context_far_node[1] - 1), - (context_far_node[0], context_far_node[1] + 1)] - candidate_bevel = [] - for ne in four_nes: - if surround_map[ne[0], ne[1]] == 1: - start_far_node = (ne[0], ne[1]) - break - elif (ne[0] != context_near_node[0] or ne[1] != context_near_node[1]) and \ - (ne[0] != start_near_node[0] or ne[1] != start_near_node[1]): - candidate_bevel.append((ne[0], ne[1])) - if start_far_node is None: - for ne in candidate_bevel: - if ne[0] == context_far_node[0]: - bevel_xys = [[ne[0] + 1, ne[1]], [ne[0] - 1, ne[1]]] - if ne[1] == context_far_node[1]: - bevel_xys = [[ne[0], ne[1] + 1], [ne[0], ne[1] - 1]] - for bevel_x, bevel_y in bevel_xys: - if surround_map[bevel_x, bevel_y] == 1: - start_far_node = (bevel_x, bevel_y) - accompany_far_node = (ne[0], ne[1]) - break - if start_far_node is not None: - break - if start_far_node is not None: - for far_edge_id in range(1, far_edge_cc.max() + 1): - specific_far_edge = (far_edge_cc == far_edge_id).astype(np.uint8) - if specific_far_edge[start_far_node[0], start_far_node[1]] == 1: - if accompany_far_node is not None: - specific_far_edge[accompany_far_node] = 1 - far_edge[specific_far_edge > 0] = 1 - far_edge_with_id[specific_far_edge > 0] = edge_id - end_far_candidates = np.zeros_like(far_edge) - end_far_candidates[end_near_node[0], end_near_node[1]] = 1 - end_far_candidates = cv2.dilate(end_far_candidates.astype(np.uint8), - np.array([[0,1,0],[1,1,1],[0,1,0]]).astype(np.uint8), - iterations=1) - end_far_candidates[end_near_node[0], end_near_node[1]] = 0 - invalid_nodes = (((far_edge_cc != far_edge_id).astype(np.uint8) * \ - (far_edge_cc != 0).astype(np.uint8)).astype(np.uint8) + \ - (new_specific_edge_map).astype(np.uint8) + \ - (mask == 0).astype(np.uint8)).clip(0, 1) - end_far_candidates[invalid_nodes > 0] = 0 - far_edge[end_far_candidates > 0] = 1 - far_edge_with_id[end_far_candidates > 0] = edge_id - - far_edge[context_far_node[0], context_far_node[1]] = 1 - far_edge_with_id[context_far_node[0], context_far_node[1]] = edge_id - near_edge_with_id[(mask_edge_with_id == edge_id) > 0] = edge_id - uncleaned_far_edge = far_edge.copy() - far_edge[mask == 0] = 0 - - return far_edge, uncleaned_far_edge, far_edge_with_id, near_edge_with_id - -def get_MiDaS_samples(image_folder, depth_folder, config, specific=None, aft_certain=None): - lines = [os.path.splitext(os.path.basename(xx))[0] for xx in glob.glob(os.path.join(image_folder, '*' + config['img_format']))] - samples = [] - generic_pose = np.eye(4) - assert len(config['traj_types']) == len(config['x_shift_range']) ==\ - len(config['y_shift_range']) == len(config['z_shift_range']) == len(config['video_postfix']), \ - "The number of elements in 'traj_types', 'x_shift_range', 'y_shift_range', 'z_shift_range' and \ - 'video_postfix' should be equal." - tgt_pose = [[generic_pose * 1]] - tgts_poses = [] - for traj_idx in range(len(config['traj_types'])): - tgt_poses = [] - sx, sy, sz = path_planning(config['num_frames'], config['x_shift_range'][traj_idx], config['y_shift_range'][traj_idx], - config['z_shift_range'][traj_idx], path_type=config['traj_types'][traj_idx]) - for xx, yy, zz in zip(sx, sy, sz): - tgt_poses.append(generic_pose * 1.) - tgt_poses[-1][:3, -1] = np.array([xx, yy, zz]) - tgts_poses += [tgt_poses] - tgt_pose = generic_pose * 1 - - aft_flag = True - if aft_certain is not None and len(aft_certain) > 0: - aft_flag = False - for seq_dir in lines: - if specific is not None and len(specific) > 0: - if specific != seq_dir: - continue - if aft_certain is not None and len(aft_certain) > 0: - if aft_certain == seq_dir: - aft_flag = True - if aft_flag is False: - continue - samples.append({}) - sdict = samples[-1] - sdict['depth_fi'] = os.path.join(depth_folder, seq_dir + config['depth_format']) - sdict['ref_img_fi'] = os.path.join(image_folder, seq_dir + config['img_format']) - H, W = imageio.imread(sdict['ref_img_fi']).shape[:2] - sdict['int_mtx'] = np.array([[max(H, W), 0, W//2], [0, max(H, W), H//2], [0, 0, 1]]).astype(np.float32) - if sdict['int_mtx'].max() > 1: - sdict['int_mtx'][0, :] = sdict['int_mtx'][0, :] / float(W) - sdict['int_mtx'][1, :] = sdict['int_mtx'][1, :] / float(H) - sdict['ref_pose'] = np.eye(4) - sdict['tgt_pose'] = tgt_pose - sdict['tgts_poses'] = tgts_poses - sdict['video_postfix'] = config['video_postfix'] - sdict['tgt_name'] = [os.path.splitext(os.path.basename(sdict['depth_fi']))[0]] - sdict['src_pair_name'] = sdict['tgt_name'][0] - - return samples - -def get_valid_size(imap): - x_max = np.where(imap.sum(1).squeeze() > 0)[0].max() + 1 - x_min = np.where(imap.sum(1).squeeze() > 0)[0].min() - y_max = np.where(imap.sum(0).squeeze() > 0)[0].max() + 1 - y_min = np.where(imap.sum(0).squeeze() > 0)[0].min() - size_dict = {'x_max':x_max, 'y_max':y_max, 'x_min':x_min, 'y_min':y_min} - - return size_dict - -def dilate_valid_size(isize_dict, imap, dilate=[0, 0]): - osize_dict = copy.deepcopy(isize_dict) - osize_dict['x_min'] = max(0, osize_dict['x_min'] - dilate[0]) - osize_dict['x_max'] = min(imap.shape[0], osize_dict['x_max'] + dilate[0]) - osize_dict['y_min'] = max(0, osize_dict['y_min'] - dilate[0]) - osize_dict['y_max'] = min(imap.shape[1], osize_dict['y_max'] + dilate[1]) - - return osize_dict - -def crop_maps_by_size(size, *imaps): - omaps = [] - for imap in imaps: - omaps.append(imap[size['x_min']:size['x_max'], size['y_min']:size['y_max']].copy()) - - return omaps - -def smooth_cntsyn_gap(init_depth_map, mask_region, context_region, init_mask_region=None): - if init_mask_region is not None: - curr_mask_region = init_mask_region * 1 - else: - curr_mask_region = mask_region * 0 - depth_map = init_depth_map.copy() - for _ in range(2): - cm_mask = context_region + curr_mask_region - depth_s1 = np.roll(depth_map, 1, 0) - depth_s2 = np.roll(depth_map, -1, 0) - depth_s3 = np.roll(depth_map, 1, 1) - depth_s4 = np.roll(depth_map, -1, 1) - mask_s1 = np.roll(cm_mask, 1, 0) - mask_s2 = np.roll(cm_mask, -1, 0) - mask_s3 = np.roll(cm_mask, 1, 1) - mask_s4 = np.roll(cm_mask, -1, 1) - fluxin_depths = (depth_s1 * mask_s1 + depth_s2 * mask_s2 + depth_s3 * mask_s3 + depth_s4 * mask_s4) / \ - ((mask_s1 + mask_s2 + mask_s3 + mask_s4) + 1e-6) - fluxin_mask = (fluxin_depths != 0) * mask_region - init_mask = (fluxin_mask * (curr_mask_region >= 0).astype(np.float32) > 0).astype(np.uint8) - depth_map[init_mask > 0] = fluxin_depths[init_mask > 0] - if init_mask.shape[-1] > curr_mask_region.shape[-1]: - curr_mask_region[init_mask.sum(-1, keepdims=True) > 0] = 1 - else: - curr_mask_region[init_mask > 0] = 1 - depth_map[fluxin_mask > 0] = fluxin_depths[fluxin_mask > 0] - - return depth_map - -def read_MiDaS_depth(disp_fi, disp_rescale=10., h=None, w=None): - if 'npy' in os.path.splitext(disp_fi)[-1]: - disp = np.load(disp_fi) - else: - disp = imageio.imread(disp_fi).astype(np.float32) - disp = disp - disp.min() - disp = cv2.blur(disp / disp.max(), ksize=(3, 3)) * disp.max() - disp = (disp / disp.max()) * disp_rescale - if h is not None and w is not None: - disp = resize(disp / disp.max(), (h, w), order=1) * disp.max() - depth = 1. / np.maximum(disp, 0.05) - - return depth - -def follow_image_aspect_ratio(depth, image): - H, W = image.shape[:2] - image_aspect_ratio = H / W - dH, dW = depth.shape[:2] - depth_aspect_ratio = dH / dW - if depth_aspect_ratio > image_aspect_ratio: - resize_H = dH - resize_W = dH / image_aspect_ratio - else: - resize_W = dW - resize_H = dW * image_aspect_ratio - depth = resize(depth / depth.max(), - (int(resize_H), - int(resize_W)), - order=0) * depth.max() - - return depth - -def depth_resize(depth, origin_size, image_size): - if origin_size[0] is not 0: - max_depth = depth.max() - depth = depth / max_depth - depth = resize(depth, origin_size, order=1, mode='edge') - depth = depth * max_depth - else: - max_depth = depth.max() - depth = depth / max_depth - depth = resize(depth, image_size, order=1, mode='edge') - depth = depth * max_depth - - return depth - -def filter_irrelevant_edge(self_edge, other_edges, other_edges_with_id, current_edge_id, context, edge_ccs, mesh, anchor): - other_edges = other_edges.squeeze() - other_edges_with_id = other_edges_with_id.squeeze() - - self_edge = self_edge.squeeze() - dilate_self_edge = cv2.dilate(self_edge.astype(np.uint8), np.array([[1,1,1],[1,1,1],[1,1,1]]).astype(np.uint8), iterations=1) - edge_ids = collections.Counter(other_edges_with_id.flatten()).keys() - other_edges_info = [] - # import ipdb - # ipdb.set_trace() - for edge_id in edge_ids: - edge_id = int(edge_id) - if edge_id >= 0: - condition = ((other_edges_with_id == edge_id) * other_edges * context).astype(np.uint8) - if dilate_self_edge[condition > 0].sum() == 0: - other_edges[other_edges_with_id == edge_id] = 0 - else: - num_condition, condition_labels = cv2.connectedComponents(condition, connectivity=8) - for condition_id in range(1, num_condition): - isolate_condition = ((condition_labels == condition_id) > 0).astype(np.uint8) - num_end_group, end_group = cv2.connectedComponents(((dilate_self_edge * isolate_condition) > 0).astype(np.uint8), connectivity=8) - if num_end_group == 1: - continue - for end_id in range(1, num_end_group): - end_pxs, end_pys = np.where((end_group == end_id)) - end_px, end_py = end_pxs[0], end_pys[0] - other_edges_info.append({}) - other_edges_info[-1]['edge_id'] = edge_id - # other_edges_info[-1]['near_depth'] = None - other_edges_info[-1]['diff'] = None - other_edges_info[-1]['edge_map'] = np.zeros_like(self_edge) - other_edges_info[-1]['end_point_map'] = np.zeros_like(self_edge) - other_edges_info[-1]['end_point_map'][(end_group == end_id)] = 1 - other_edges_info[-1]['forbidden_point_map'] = np.zeros_like(self_edge) - other_edges_info[-1]['forbidden_point_map'][(end_group != end_id) * (end_group != 0)] = 1 - other_edges_info[-1]['forbidden_point_map'] = cv2.dilate(other_edges_info[-1]['forbidden_point_map'], kernel=np.array([[1,1,1],[1,1,1],[1,1,1]]), iterations=2) - for x in edge_ccs[edge_id]: - nx = x[0] - anchor[0] - ny = x[1] - anchor[1] - if nx == end_px and ny == end_py: - # other_edges_info[-1]['near_depth'] = abs(nx) - if mesh.nodes[x].get('far') is not None and len(mesh.nodes[x].get('far')) == 1: - other_edges_info[-1]['diff'] = abs(1./abs([*mesh.nodes[x].get('far')][0][2]) - 1./abs(x[2])) - else: - other_edges_info[-1]['diff'] = 0 - # if end_group[nx, ny] != end_id and end_group[nx, ny] > 0: - # continue - try: - if isolate_condition[nx, ny] == 1: - other_edges_info[-1]['edge_map'][nx, ny] = 1 - except: - pass - try: - other_edges_info = sorted(other_edges_info, key=lambda x : x['diff'], reverse=True) - except: - import pdb - pdb.set_trace() - # import pdb - # pdb.set_trace() - # other_edges = other_edges[..., None] - for other_edge in other_edges_info: - if other_edge['end_point_map'] is None: - import pdb - pdb.set_trace() - - other_edges = other_edges * context - - return other_edges, other_edges_info - -def require_depth_edge(context_edge, mask): - dilate_mask = cv2.dilate(mask, np.array([[1,1,1],[1,1,1],[1,1,1]]).astype(np.uint8), iterations=1) - if (dilate_mask * context_edge).max() == 0: - return False - else: - return True - -def refine_color_around_edge(mesh, info_on_pix, edge_ccs, config, spdb=False): - H, W = mesh.graph['H'], mesh.graph['W'] - tmp_edge_ccs = copy.deepcopy(edge_ccs) - for edge_id, edge_cc in enumerate(edge_ccs): - if len(edge_cc) == 0: - continue - near_maps = np.zeros((H, W)).astype(np.bool) - far_maps = np.zeros((H, W)).astype(np.bool) - tmp_far_nodes = set() - far_nodes = set() - near_nodes = set() - end_nodes = set() - for i in range(5): - if i == 0: - for edge_node in edge_cc: - if mesh.nodes[edge_node].get('depth_edge_dilate_2_color_flag') is not True: - break - if mesh.nodes[edge_node].get('inpaint_id') == 1: - near_nodes.add(edge_node) - tmp_node = mesh.nodes[edge_node].get('far') - tmp_node = set(tmp_node) if tmp_node is not None else set() - tmp_far_nodes |= tmp_node - rmv_tmp_far_nodes = set() - for far_node in tmp_far_nodes: - if not(mesh.has_node(far_node) and mesh.nodes[far_node].get('inpaint_id') == 1): - rmv_tmp_far_nodes.add(far_node) - if len(tmp_far_nodes - rmv_tmp_far_nodes) == 0: - break - else: - for near_node in near_nodes: - near_maps[near_node[0], near_node[1]] = True - mesh.nodes[near_node]['refine_rgbd'] = True - mesh.nodes[near_node]['backup_depth'] = near_node[2] \ - if mesh.nodes[near_node].get('real_depth') is None else mesh.nodes[near_node]['real_depth'] - mesh.nodes[near_node]['backup_color'] = mesh.nodes[near_node]['color'] - for far_node in tmp_far_nodes: - if mesh.has_node(far_node) and mesh.nodes[far_node].get('inpaint_id') == 1: - far_nodes.add(far_node) - far_maps[far_node[0], far_node[1]] = True - mesh.nodes[far_node]['refine_rgbd'] = True - mesh.nodes[far_node]['backup_depth'] = far_node[2] \ - if mesh.nodes[far_node].get('real_depth') is None else mesh.nodes[far_node]['real_depth'] - mesh.nodes[far_node]['backup_color'] = mesh.nodes[far_node]['color'] - tmp_far_nodes = far_nodes - tmp_near_nodes = near_nodes - else: - tmp_far_nodes = new_tmp_far_nodes - tmp_near_nodes = new_tmp_near_nodes - new_tmp_far_nodes = None - new_tmp_near_nodes = None - new_tmp_far_nodes = set() - new_tmp_near_nodes = set() - for node in tmp_near_nodes: - for ne_node in mesh.neighbors(node): - if far_maps[ne_node[0], ne_node[1]] == False and \ - near_maps[ne_node[0], ne_node[1]] == False: - if mesh.nodes[ne_node].get('inpaint_id') == 1: - new_tmp_near_nodes.add(ne_node) - near_maps[ne_node[0], ne_node[1]] = True - mesh.nodes[ne_node]['refine_rgbd'] = True - mesh.nodes[ne_node]['backup_depth'] = ne_node[2] \ - if mesh.nodes[ne_node].get('real_depth') is None else mesh.nodes[ne_node]['real_depth'] - mesh.nodes[ne_node]['backup_color'] = mesh.nodes[ne_node]['color'] - else: - mesh.nodes[ne_node]['backup_depth'] = ne_node[2] \ - if mesh.nodes[ne_node].get('real_depth') is None else mesh.nodes[ne_node]['real_depth'] - mesh.nodes[ne_node]['backup_color'] = mesh.nodes[ne_node]['color'] - end_nodes.add(node) - near_nodes.update(new_tmp_near_nodes) - for node in tmp_far_nodes: - for ne_node in mesh.neighbors(node): - if far_maps[ne_node[0], ne_node[1]] == False and \ - near_maps[ne_node[0], ne_node[1]] == False: - if mesh.nodes[ne_node].get('inpaint_id') == 1: - new_tmp_far_nodes.add(ne_node) - far_maps[ne_node[0], ne_node[1]] = True - mesh.nodes[ne_node]['refine_rgbd'] = True - mesh.nodes[ne_node]['backup_depth'] = ne_node[2] \ - if mesh.nodes[ne_node].get('real_depth') is None else mesh.nodes[ne_node]['real_depth'] - mesh.nodes[ne_node]['backup_color'] = mesh.nodes[ne_node]['color'] - else: - mesh.nodes[ne_node]['backup_depth'] = ne_node[2] \ - if mesh.nodes[ne_node].get('real_depth') is None else mesh.nodes[ne_node]['real_depth'] - mesh.nodes[ne_node]['backup_color'] = mesh.nodes[ne_node]['color'] - end_nodes.add(node) - far_nodes.update(new_tmp_far_nodes) - if len(far_nodes) == 0: - tmp_edge_ccs[edge_id] = set() - continue - for node in new_tmp_far_nodes | new_tmp_near_nodes: - for ne_node in mesh.neighbors(node): - if far_maps[ne_node[0], ne_node[1]] == False and near_maps[ne_node[0], ne_node[1]] == False: - end_nodes.add(node) - mesh.nodes[ne_node]['backup_depth'] = ne_node[2] \ - if mesh.nodes[ne_node].get('real_depth') is None else mesh.nodes[ne_node]['real_depth'] - mesh.nodes[ne_node]['backup_color'] = mesh.nodes[ne_node]['color'] - tmp_end_nodes = end_nodes - - refine_nodes = near_nodes | far_nodes - remain_refine_nodes = copy.deepcopy(refine_nodes) - accum_idx = 0 - while len(remain_refine_nodes) > 0: - accum_idx += 1 - if accum_idx > 100: - break - new_tmp_end_nodes = None - new_tmp_end_nodes = set() - survive_tmp_end_nodes = set() - for node in tmp_end_nodes: - re_depth, re_color, re_count = 0, np.array([0., 0., 0.]), 0 - for ne_node in mesh.neighbors(node): - if mesh.nodes[ne_node].get('refine_rgbd') is True: - if ne_node not in tmp_end_nodes: - new_tmp_end_nodes.add(ne_node) - else: - try: - re_depth += mesh.nodes[ne_node]['backup_depth'] - re_color += mesh.nodes[ne_node]['backup_color'].astype(np.float32) - re_count += 1. - except: - import pdb; pdb.set_trace() - if re_count > 0: - re_depth = re_depth / re_count - re_color = re_color / re_count - mesh.nodes[node]['backup_depth'] = re_depth - mesh.nodes[node]['backup_color'] = re_color - mesh.nodes[node]['refine_rgbd'] = False - else: - survive_tmp_end_nodes.add(node) - for node in tmp_end_nodes - survive_tmp_end_nodes: - if node in remain_refine_nodes: - remain_refine_nodes.remove(node) - tmp_end_nodes = new_tmp_end_nodes - if spdb == True: - bfrd_canvas = np.zeros((H, W)) - bfrc_canvas = np.zeros((H, W, 3)).astype(np.uint8) - aftd_canvas = np.zeros((H, W)) - aftc_canvas = np.zeros((H, W, 3)).astype(np.uint8) - for node in refine_nodes: - bfrd_canvas[node[0], node[1]] = abs(node[2]) - aftd_canvas[node[0], node[1]] = abs(mesh.nodes[node]['backup_depth']) - bfrc_canvas[node[0], node[1]] = mesh.nodes[node]['color'].astype(np.uint8) - aftc_canvas[node[0], node[1]] = mesh.nodes[node]['backup_color'].astype(np.uint8) - f, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, sharex=True, sharey=True); - ax1.imshow(bfrd_canvas); - ax2.imshow(aftd_canvas); - ax3.imshow(bfrc_canvas); - ax4.imshow(aftc_canvas); - plt.show() - import pdb; pdb.set_trace() - for node in refine_nodes: - if mesh.nodes[node].get('refine_rgbd') is not None: - mesh.nodes[node].pop('refine_rgbd') - mesh.nodes[node]['color'] = mesh.nodes[node]['backup_color'] - for info in info_on_pix[(node[0], node[1])]: - if info['depth'] == node[2]: - info['color'] = mesh.nodes[node]['backup_color'] - - return mesh, info_on_pix - -def refine_depth_around_edge(mask_depth, far_edge, uncleaned_far_edge, near_edge, mask, all_depth, config): - if isinstance(mask_depth, torch.Tensor): - if mask_depth.is_cuda: - mask_depth = mask_depth.cpu() - mask_depth = mask_depth.data - mask_depth = mask_depth.numpy() - if isinstance(far_edge, torch.Tensor): - if far_edge.is_cuda: - far_edge = far_edge.cpu() - far_edge = far_edge.data - far_edge = far_edge.numpy() - if isinstance(uncleaned_far_edge, torch.Tensor): - if uncleaned_far_edge.is_cuda: - uncleaned_far_edge = uncleaned_far_edge.cpu() - uncleaned_far_edge = uncleaned_far_edge.data - uncleaned_far_edge = uncleaned_far_edge.numpy() - if isinstance(near_edge, torch.Tensor): - if near_edge.is_cuda: - near_edge = near_edge.cpu() - near_edge = near_edge.data - near_edge = near_edge.numpy() - if isinstance(mask, torch.Tensor): - if mask.is_cuda: - mask = mask.cpu() - mask = mask.data - mask = mask.numpy() - mask = mask.squeeze() - uncleaned_far_edge = uncleaned_far_edge.squeeze() - far_edge = far_edge.squeeze() - near_edge = near_edge.squeeze() - mask_depth = mask_depth.squeeze() - dilate_far_edge = cv2.dilate(uncleaned_far_edge.astype(np.uint8), kernel=np.array([[0,1,0],[1,1,1],[0,1,0]]).astype(np.uint8), iterations=1) - near_edge[dilate_far_edge == 0] = 0 - dilate_near_edge = cv2.dilate(near_edge.astype(np.uint8), kernel=np.array([[0,1,0],[1,1,1],[0,1,0]]).astype(np.uint8), iterations=1) - far_edge[dilate_near_edge == 0] = 0 - init_far_edge = far_edge.copy() - init_near_edge = near_edge.copy() - for i in range(config['depth_edge_dilate_2']): - init_far_edge = cv2.dilate(init_far_edge, kernel=np.array([[0,1,0],[1,1,1],[0,1,0]]).astype(np.uint8), iterations=1) - init_far_edge[init_near_edge == 1] = 0 - init_near_edge = cv2.dilate(init_near_edge, kernel=np.array([[0,1,0],[1,1,1],[0,1,0]]).astype(np.uint8), iterations=1) - init_near_edge[init_far_edge == 1] = 0 - init_far_edge[mask == 0] = 0 - init_near_edge[mask == 0] = 0 - hole_far_edge = 1 - init_far_edge - hole_near_edge = 1 - init_near_edge - change = None - while True: - change = False - hole_far_edge[init_near_edge == 1] = 0 - hole_near_edge[init_far_edge == 1] = 0 - far_pxs, far_pys = np.where((hole_far_edge == 0) * (init_far_edge == 1) > 0) - current_hole_far_edge = hole_far_edge.copy() - for far_px, far_py in zip(far_pxs, far_pys): - min_px = max(far_px - 1, 0) - max_px = min(far_px + 2, mask.shape[0]-1) - min_py = max(far_py - 1, 0) - max_py = min(far_py + 2, mask.shape[1]-1) - hole_far = current_hole_far_edge[min_px: max_px, min_py: max_py] - tmp_mask = mask[min_px: max_px, min_py: max_py] - all_depth_patch = all_depth[min_px: max_px, min_py: max_py] * 0 - all_depth_mask = (all_depth_patch != 0).astype(np.uint8) - cross_element = np.array([[0,1,0],[1,1,1],[0,1,0]])[min_px - (far_px - 1): max_px - (far_px - 1), min_py - (far_py - 1): max_py - (far_py - 1)] - combine_mask = (tmp_mask + all_depth_mask).clip(0, 1) * hole_far * cross_element - tmp_patch = combine_mask * (mask_depth[min_px: max_px, min_py: max_py] + all_depth_patch) - number = np.count_nonzero(tmp_patch) - if number > 0: - mask_depth[far_px, far_py] = np.sum(tmp_patch).astype(np.float32) / max(number, 1e-6) - hole_far_edge[far_px, far_py] = 1 - change = True - near_pxs, near_pys = np.where((hole_near_edge == 0) * (init_near_edge == 1) > 0) - current_hole_near_edge = hole_near_edge.copy() - for near_px, near_py in zip(near_pxs, near_pys): - min_px = max(near_px - 1, 0) - max_px = min(near_px + 2, mask.shape[0]-1) - min_py = max(near_py - 1, 0) - max_py = min(near_py + 2, mask.shape[1]-1) - hole_near = current_hole_near_edge[min_px: max_px, min_py: max_py] - tmp_mask = mask[min_px: max_px, min_py: max_py] - all_depth_patch = all_depth[min_px: max_px, min_py: max_py] * 0 - all_depth_mask = (all_depth_patch != 0).astype(np.uint8) - cross_element = np.array([[0,1,0],[1,1,1],[0,1,0]])[min_px - near_px + 1:max_px - near_px + 1, min_py - near_py + 1:max_py - near_py + 1] - combine_mask = (tmp_mask + all_depth_mask).clip(0, 1) * hole_near * cross_element - tmp_patch = combine_mask * (mask_depth[min_px: max_px, min_py: max_py] + all_depth_patch) - number = np.count_nonzero(tmp_patch) - if number > 0: - mask_depth[near_px, near_py] = np.sum(tmp_patch) / max(number, 1e-6) - hole_near_edge[near_px, near_py] = 1 - change = True - if change is False: - break - - return mask_depth - - - -def vis_depth_edge_connectivity(depth, config): - disp = 1./depth - u_diff = (disp[1:, :] - disp[:-1, :])[:-1, 1:-1] - b_diff = (disp[:-1, :] - disp[1:, :])[1:, 1:-1] - l_diff = (disp[:, 1:] - disp[:, :-1])[1:-1, :-1] - r_diff = (disp[:, :-1] - disp[:, 1:])[1:-1, 1:] - u_over = (np.abs(u_diff) > config['depth_threshold']).astype(np.float32) - b_over = (np.abs(b_diff) > config['depth_threshold']).astype(np.float32) - l_over = (np.abs(l_diff) > config['depth_threshold']).astype(np.float32) - r_over = (np.abs(r_diff) > config['depth_threshold']).astype(np.float32) - concat_diff = np.stack([u_diff, b_diff, r_diff, l_diff], axis=-1) - concat_over = np.stack([u_over, b_over, r_over, l_over], axis=-1) - over_diff = concat_diff * concat_over - pos_over = (over_diff > 0).astype(np.float32).sum(-1).clip(0, 1) - neg_over = (over_diff < 0).astype(np.float32).sum(-1).clip(0, 1) - neg_over[(over_diff > 0).astype(np.float32).sum(-1) > 0] = 0 - _, edge_label = cv2.connectedComponents(pos_over.astype(np.uint8), connectivity=8) - T_junction_maps = np.zeros_like(pos_over) - for edge_id in range(1, edge_label.max() + 1): - edge_map = (edge_label == edge_id).astype(np.uint8) - edge_map = np.pad(edge_map, pad_width=((1,1),(1,1)), mode='constant') - four_direc = np.roll(edge_map, 1, 1) + np.roll(edge_map, -1, 1) + np.roll(edge_map, 1, 0) + np.roll(edge_map, -1, 0) - eight_direc = np.roll(np.roll(edge_map, 1, 1), 1, 0) + np.roll(np.roll(edge_map, 1, 1), -1, 0) + \ - np.roll(np.roll(edge_map, -1, 1), 1, 0) + np.roll(np.roll(edge_map, -1, 1), -1, 0) - eight_direc = (eight_direc + four_direc)[1:-1,1:-1] - pos_over[eight_direc > 2] = 0 - T_junction_maps[eight_direc > 2] = 1 - _, edge_label = cv2.connectedComponents(pos_over.astype(np.uint8), connectivity=8) - edge_label = np.pad(edge_label, 1, mode='constant') - - return edge_label - - - -def max_size(mat, value=0): - if not (mat and mat[0]): return (0, 0) - it = iter(mat) - prev = [(el==value) for el in next(it)] - max_size = max_rectangle_size(prev) - for row in it: - hist = [(1+h) if el == value else 0 for h, el in zip(prev, row)] - max_size = max(max_size, max_rectangle_size(hist), key=get_area) - prev = hist - return max_size - -def max_rectangle_size(histogram): - Info = namedtuple('Info', 'start height') - stack = [] - top = lambda: stack[-1] - max_size = (0, 0) # height, width of the largest rectangle - pos = 0 # current position in the histogram - for pos, height in enumerate(histogram): - start = pos # position where rectangle starts - while True: - if not stack or height > top().height: - stack.append(Info(start, height)) # push - if stack and height < top().height: - max_size = max(max_size, (top().height, (pos-top().start)), - key=get_area) - start, _ = stack.pop() - continue - break # height == top().height goes here - - pos += 1 - for start, height in stack: - max_size = max(max_size, (height, (pos-start)), - key=get_area) - - return max_size - -def get_area(size): - return reduce(mul, size) - -def find_anchors(matrix): - matrix = [[*x] for x in matrix] - mh, mw = max_size(matrix) - matrix = np.array(matrix) - # element = np.zeros((mh, mw)) - for i in range(matrix.shape[0] + 1 - mh): - for j in range(matrix.shape[1] + 1 - mw): - if matrix[i:i + mh, j:j + mw].max() == 0: - return i, i + mh, j, j + mw - -def find_largest_rect(dst_img, bg_color=(128, 128, 128)): - valid = np.any(dst_img[..., :3] != bg_color, axis=-1) - dst_h, dst_w = dst_img.shape[:2] - ret, labels = cv2.connectedComponents(np.uint8(valid == False)) - red_mat = np.zeros_like(labels) - # denoise - for i in range(1, np.max(labels)+1, 1): - x, y, w, h = cv2.boundingRect(np.uint8(labels==i)) - if x == 0 or (x+w) == dst_h or y == 0 or (y+h) == dst_w: - red_mat[labels==i] = 1 - # crop - t, b, l, r = find_anchors(red_mat) - - return t, b, l, r diff --git a/spaces/evawade17/Skin_cancer_detecter/app.py b/spaces/evawade17/Skin_cancer_detecter/app.py deleted file mode 100644 index f4837e96d15aee4132c76690e806ad2470efc2b8..0000000000000000000000000000000000000000 --- a/spaces/evawade17/Skin_cancer_detecter/app.py +++ /dev/null @@ -1,20 +0,0 @@ -from fastai.vision.all import * -import gradio as gr - -learn = load_learner("export.pkl") - -categories = ("Benign", "Malignant") - -def classify_image(img): - pred,idx,probs = learn.predict(img) - return dict(zip(categories, map(float,probs))) - -image = gr.inputs.Image(shape=(192,192)) -label = gr.outputs.Label() -examples = ['benign.jpeg','malignant.jpeg'] -title = 'Skin Cancer Predictor' -description = 'This app predicts skin cancer. For reference only.' -article = "Author: Eva Wade. " - -intf = gr.Interface(fn=classify_image, inputs=image, outputs=label, examples=examples, title=title, description=description, article=article) -intf.launch(inline=False) diff --git a/spaces/f2api/gpt-academic/toolbox.py b/spaces/f2api/gpt-academic/toolbox.py deleted file mode 100644 index 10e5a8759b710c8e6190d1de6793fe1290a24313..0000000000000000000000000000000000000000 --- a/spaces/f2api/gpt-academic/toolbox.py +++ /dev/null @@ -1,786 +0,0 @@ -import markdown -import importlib -import traceback -import inspect -import re -import os -from latex2mathml.converter import convert as tex2mathml -from functools import wraps, lru_cache - -""" -======================================================================== -第一部分 -函数插件输入输出接驳区 - - ChatBotWithCookies: 带Cookies的Chatbot类,为实现更多强大的功能做基础 - - ArgsGeneralWrapper: 装饰器函数,用于重组输入参数,改变输入参数的顺序与结构 - - update_ui: 刷新界面用 yield from update_ui(chatbot, history) - - CatchException: 将插件中出的所有问题显示在界面上 - - HotReload: 实现插件的热更新 - - trimmed_format_exc: 打印traceback,为了安全而隐藏绝对地址 -======================================================================== -""" - -class ChatBotWithCookies(list): - def __init__(self, cookie): - self._cookies = cookie - - def write_list(self, list): - for t in list: - self.append(t) - - def get_list(self): - return [t for t in self] - - def get_cookies(self): - return self._cookies - - -def ArgsGeneralWrapper(f): - """ - 装饰器函数,用于重组输入参数,改变输入参数的顺序与结构。 - """ - def decorated(cookies, max_length, llm_model, txt, txt2, top_p, temperature, chatbot, history, system_prompt, plugin_advanced_arg, *args): - txt_passon = txt - if txt == "" and txt2 != "": txt_passon = txt2 - # 引入一个有cookie的chatbot - cookies.update({ - 'top_p':top_p, - 'temperature':temperature, - }) - llm_kwargs = { - 'api_key': cookies['api_key'], - 'llm_model': llm_model, - 'top_p':top_p, - 'max_length': max_length, - 'temperature':temperature, - } - plugin_kwargs = { - "advanced_arg": plugin_advanced_arg, - } - chatbot_with_cookie = ChatBotWithCookies(cookies) - chatbot_with_cookie.write_list(chatbot) - yield from f(txt_passon, llm_kwargs, plugin_kwargs, chatbot_with_cookie, history, system_prompt, *args) - return decorated - - -def update_ui(chatbot, history, msg='正常', **kwargs): # 刷新界面 - """ - 刷新用户界面 - """ - assert isinstance(chatbot, ChatBotWithCookies), "在传递chatbot的过程中不要将其丢弃。必要时,可用clear将其清空,然后用for+append循环重新赋值。" - yield chatbot.get_cookies(), chatbot, history, msg - -def trimmed_format_exc(): - import os, traceback - str = traceback.format_exc() - current_path = os.getcwd() - replace_path = "." - return str.replace(current_path, replace_path) - -def CatchException(f): - """ - 装饰器函数,捕捉函数f中的异常并封装到一个生成器中返回,并显示到聊天当中。 - """ - - @wraps(f) - def decorated(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT): - try: - yield from f(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT) - except Exception as e: - from check_proxy import check_proxy - from toolbox import get_conf - proxies, = get_conf('proxies') - tb_str = '```\n' + trimmed_format_exc() + '```' - if len(chatbot) == 0: - chatbot.clear() - chatbot.append(["插件调度异常", "异常原因"]) - chatbot[-1] = (chatbot[-1][0], - f"[Local Message] 实验性函数调用出错: \n\n{tb_str} \n\n当前代理可用性: \n\n{check_proxy(proxies)}") - yield from update_ui(chatbot=chatbot, history=history, msg=f'异常 {e}') # 刷新界面 - return decorated - - -def HotReload(f): - """ - HotReload的装饰器函数,用于实现Python函数插件的热更新。 - 函数热更新是指在不停止程序运行的情况下,更新函数代码,从而达到实时更新功能。 - 在装饰器内部,使用wraps(f)来保留函数的元信息,并定义了一个名为decorated的内部函数。 - 内部函数通过使用importlib模块的reload函数和inspect模块的getmodule函数来重新加载并获取函数模块, - 然后通过getattr函数获取函数名,并在新模块中重新加载函数。 - 最后,使用yield from语句返回重新加载过的函数,并在被装饰的函数上执行。 - 最终,装饰器函数返回内部函数。这个内部函数可以将函数的原始定义更新为最新版本,并执行函数的新版本。 - """ - @wraps(f) - def decorated(*args, **kwargs): - fn_name = f.__name__ - f_hot_reload = getattr(importlib.reload(inspect.getmodule(f)), fn_name) - yield from f_hot_reload(*args, **kwargs) - return decorated - - -""" -======================================================================== -第二部分 -其他小工具: - - write_results_to_file: 将结果写入markdown文件中 - - regular_txt_to_markdown: 将普通文本转换为Markdown格式的文本。 - - report_execption: 向chatbot中添加简单的意外错误信息 - - text_divide_paragraph: 将文本按照段落分隔符分割开,生成带有段落标签的HTML代码。 - - markdown_convertion: 用多种方式组合,将markdown转化为好看的html - - format_io: 接管gradio默认的markdown处理方式 - - on_file_uploaded: 处理文件的上传(自动解压) - - on_report_generated: 将生成的报告自动投射到文件上传区 - - clip_history: 当历史上下文过长时,自动截断 - - get_conf: 获取设置 - - select_api_key: 根据当前的模型类别,抽取可用的api-key -======================================================================== -""" - -def get_reduce_token_percent(text): - """ - * 此函数未来将被弃用 - """ - try: - # text = "maximum context length is 4097 tokens. However, your messages resulted in 4870 tokens" - pattern = r"(\d+)\s+tokens\b" - match = re.findall(pattern, text) - EXCEED_ALLO = 500 # 稍微留一点余地,否则在回复时会因余量太少出问题 - max_limit = float(match[0]) - EXCEED_ALLO - current_tokens = float(match[1]) - ratio = max_limit/current_tokens - assert ratio > 0 and ratio < 1 - return ratio, str(int(current_tokens-max_limit)) - except: - return 0.5, '不详' - - -def write_results_to_file(history, file_name=None): - """ - 将对话记录history以Markdown格式写入文件中。如果没有指定文件名,则使用当前时间生成文件名。 - """ - import os - import time - if file_name is None: - # file_name = time.strftime("chatGPT分析报告%Y-%m-%d-%H-%M-%S", time.localtime()) + '.md' - file_name = 'chatGPT分析报告' + \ - time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.md' - os.makedirs('./gpt_log/', exist_ok=True) - with open(f'./gpt_log/{file_name}', 'w', encoding='utf8') as f: - f.write('# chatGPT 分析报告\n') - for i, content in enumerate(history): - try: - if type(content) != str: content = str(content) - except: - continue - if i % 2 == 0: - f.write('## ') - try: - f.write(content) - except: - # remove everything that cannot be handled by utf8 - f.write(content.encode('utf-8', 'ignore').decode()) - f.write('\n\n') - res = '以上材料已经被写入' + os.path.abspath(f'./gpt_log/{file_name}') - print(res) - return res - - -def regular_txt_to_markdown(text): - """ - 将普通文本转换为Markdown格式的文本。 - """ - text = text.replace('\n', '\n\n') - text = text.replace('\n\n\n', '\n\n') - text = text.replace('\n\n\n', '\n\n') - return text - - - - -def report_execption(chatbot, history, a, b): - """ - 向chatbot中添加错误信息 - """ - chatbot.append((a, b)) - history.append(a) - history.append(b) - - -def text_divide_paragraph(text): - """ - 将文本按照段落分隔符分割开,生成带有段落标签的HTML代码。 - """ - if '```' in text: - # careful input - return text - else: - # wtf input - lines = text.split("\n") - for i, line in enumerate(lines): - lines[i] = lines[i].replace(" ", " ") - text = "
".join(lines) - return text - -@lru_cache(maxsize=128) # 使用 lru缓存 加快转换速度 -def markdown_convertion(txt): - """ - 将Markdown格式的文本转换为HTML格式。如果包含数学公式,则先将公式转换为HTML格式。 - """ - pre = '
' - suf = '
' - if txt.startswith(pre) and txt.endswith(suf): - # print('警告,输入了已经经过转化的字符串,二次转化可能出问题') - return txt # 已经被转化过,不需要再次转化 - - markdown_extension_configs = { - 'mdx_math': { - 'enable_dollar_delimiter': True, - 'use_gitlab_delimiters': False, - }, - } - find_equation_pattern = r'\n', '') - return content - - def no_code(txt): - if '```' not in txt: - return True - else: - if '```reference' in txt: return True # newbing - else: return False - - if ('$' in txt) and no_code(txt): # 有$标识的公式符号,且没有代码段```的标识 - # convert everything to html format - split = markdown.markdown(text='---') - convert_stage_1 = markdown.markdown(text=txt, extensions=['mdx_math', 'fenced_code', 'tables', 'sane_lists'], extension_configs=markdown_extension_configs) - convert_stage_1 = markdown_bug_hunt(convert_stage_1) - # re.DOTALL: Make the '.' special character match any character at all, including a newline; without this flag, '.' will match anything except a newline. Corresponds to the inline flag (?s). - # 1. convert to easy-to-copy tex (do not render math) - convert_stage_2_1, n = re.subn(find_equation_pattern, replace_math_no_render, convert_stage_1, flags=re.DOTALL) - # 2. convert to rendered equation - convert_stage_2_2, n = re.subn(find_equation_pattern, replace_math_render, convert_stage_1, flags=re.DOTALL) - # cat them together - return pre + convert_stage_2_1 + f'{split}' + convert_stage_2_2 + suf - else: - return pre + markdown.markdown(txt, extensions=['fenced_code', 'codehilite', 'tables', 'sane_lists']) + suf - - -def close_up_code_segment_during_stream(gpt_reply): - """ - 在gpt输出代码的中途(输出了前面的```,但还没输出完后面的```),补上后面的``` - - Args: - gpt_reply (str): GPT模型返回的回复字符串。 - - Returns: - str: 返回一个新的字符串,将输出代码片段的“后面的```”补上。 - - """ - if '```' not in gpt_reply: - return gpt_reply - if gpt_reply.endswith('```'): - return gpt_reply - - # 排除了以上两个情况,我们 - segments = gpt_reply.split('```') - n_mark = len(segments) - 1 - if n_mark % 2 == 1: - # print('输出代码片段中!') - return gpt_reply+'\n```' - else: - return gpt_reply - - -def format_io(self, y): - """ - 将输入和输出解析为HTML格式。将y中最后一项的输入部分段落化,并将输出部分的Markdown和数学公式转换为HTML格式。 - """ - if y is None or y == []: - return [] - i_ask, gpt_reply = y[-1] - i_ask = text_divide_paragraph(i_ask) # 输入部分太自由,预处理一波 - gpt_reply = close_up_code_segment_during_stream(gpt_reply) # 当代码输出半截的时候,试着补上后个``` - y[-1] = ( - None if i_ask is None else markdown.markdown(i_ask, extensions=['fenced_code', 'tables']), - None if gpt_reply is None else markdown_convertion(gpt_reply) - ) - return y - - -def find_free_port(): - """ - 返回当前系统中可用的未使用端口。 - """ - import socket - from contextlib import closing - with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s: - s.bind(('', 0)) - s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - return s.getsockname()[1] - - -def extract_archive(file_path, dest_dir): - import zipfile - import tarfile - import os - # Get the file extension of the input file - file_extension = os.path.splitext(file_path)[1] - - # Extract the archive based on its extension - if file_extension == '.zip': - with zipfile.ZipFile(file_path, 'r') as zipobj: - zipobj.extractall(path=dest_dir) - print("Successfully extracted zip archive to {}".format(dest_dir)) - - elif file_extension in ['.tar', '.gz', '.bz2']: - with tarfile.open(file_path, 'r:*') as tarobj: - tarobj.extractall(path=dest_dir) - print("Successfully extracted tar archive to {}".format(dest_dir)) - - # 第三方库,需要预先pip install rarfile - # 此外,Windows上还需要安装winrar软件,配置其Path环境变量,如"C:\Program Files\WinRAR"才可以 - elif file_extension == '.rar': - try: - import rarfile - with rarfile.RarFile(file_path) as rf: - rf.extractall(path=dest_dir) - print("Successfully extracted rar archive to {}".format(dest_dir)) - except: - print("Rar format requires additional dependencies to install") - return '\n\n需要安装pip install rarfile来解压rar文件' - - # 第三方库,需要预先pip install py7zr - elif file_extension == '.7z': - try: - import py7zr - with py7zr.SevenZipFile(file_path, mode='r') as f: - f.extractall(path=dest_dir) - print("Successfully extracted 7z archive to {}".format(dest_dir)) - except: - print("7z format requires additional dependencies to install") - return '\n\n需要安装pip install py7zr来解压7z文件' - else: - return '' - return '' - - -def find_recent_files(directory): - """ - me: find files that is created with in one minutes under a directory with python, write a function - gpt: here it is! - """ - import os - import time - current_time = time.time() - one_minute_ago = current_time - 60 - recent_files = [] - - for filename in os.listdir(directory): - file_path = os.path.join(directory, filename) - if file_path.endswith('.log'): - continue - created_time = os.path.getmtime(file_path) - if created_time >= one_minute_ago: - if os.path.isdir(file_path): - continue - recent_files.append(file_path) - - return recent_files - - -def on_file_uploaded(files, chatbot, txt, txt2, checkboxes): - """ - 当文件被上传时的回调函数 - """ - if len(files) == 0: - return chatbot, txt - import shutil - import os - import time - import glob - from toolbox import extract_archive - try: - shutil.rmtree('./private_upload/') - except: - pass - time_tag = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) - os.makedirs(f'private_upload/{time_tag}', exist_ok=True) - err_msg = '' - for file in files: - file_origin_name = os.path.basename(file.orig_name) - shutil.copy(file.name, f'private_upload/{time_tag}/{file_origin_name}') - err_msg += extract_archive(f'private_upload/{time_tag}/{file_origin_name}', - dest_dir=f'private_upload/{time_tag}/{file_origin_name}.extract') - moved_files = [fp for fp in glob.glob('private_upload/**/*', recursive=True)] - if "底部输入区" in checkboxes: - txt = "" - txt2 = f'private_upload/{time_tag}' - else: - txt = f'private_upload/{time_tag}' - txt2 = "" - moved_files_str = '\t\n\n'.join(moved_files) - chatbot.append(['我上传了文件,请查收', - f'[Local Message] 收到以下文件: \n\n{moved_files_str}' + - f'\n\n调用路径参数已自动修正到: \n\n{txt}' + - f'\n\n现在您点击任意“红颜色”标识的函数插件时,以上文件将被作为输入参数'+err_msg]) - return chatbot, txt, txt2 - - -def on_report_generated(files, chatbot): - from toolbox import find_recent_files - report_files = find_recent_files('gpt_log') - if len(report_files) == 0: - return None, chatbot - # files.extend(report_files) - chatbot.append(['报告如何远程获取?', '报告已经添加到右侧“文件上传区”(可能处于折叠状态),请查收。']) - return report_files, chatbot - -def is_openai_api_key(key): - API_MATCH_ORIGINAL = re.match(r"sk-[a-zA-Z0-9]{48}$", key) - API_MATCH_AZURE = re.match(r"[a-zA-Z0-9]{32}$", key) - return bool(API_MATCH_ORIGINAL) or bool(API_MATCH_AZURE) - -def is_api2d_key(key): - if key.startswith('fk') and len(key) == 41: - return True - else: - return False - -def is_any_api_key(key): - if ',' in key: - keys = key.split(',') - for k in keys: - if is_any_api_key(k): return True - return False - else: - return is_openai_api_key(key) or is_api2d_key(key) - -def what_keys(keys): - avail_key_list = {'OpenAI Key':0, "API2D Key":0} - key_list = keys.split(',') - - for k in key_list: - if is_openai_api_key(k): - avail_key_list['OpenAI Key'] += 1 - - for k in key_list: - if is_api2d_key(k): - avail_key_list['API2D Key'] += 1 - - return f"检测到: OpenAI Key {avail_key_list['OpenAI Key']} 个,API2D Key {avail_key_list['API2D Key']} 个" - -def select_api_key(keys, llm_model): - import random - avail_key_list = [] - key_list = keys.split(',') - - if llm_model.startswith('gpt-'): - for k in key_list: - if is_openai_api_key(k): avail_key_list.append(k) - - if llm_model.startswith('api2d-'): - for k in key_list: - if is_api2d_key(k): avail_key_list.append(k) - - if len(avail_key_list) == 0: - raise RuntimeError(f"您提供的api-key不满足要求,不包含任何可用于{llm_model}的api-key。您可能选择了错误的模型或请求源。") - - api_key = random.choice(avail_key_list) # 随机负载均衡 - return api_key - -def read_env_variable(arg, default_value): - """ - 环境变量可以是 `GPT_ACADEMIC_CONFIG`(优先),也可以直接是`CONFIG` - 例如在windows cmd中,既可以写: - set USE_PROXY=True - set API_KEY=sk-j7caBpkRoxxxxxxxxxxxxxxxxxxxxxxxxxxxx - set proxies={"http":"http://127.0.0.1:10085", "https":"http://127.0.0.1:10085",} - set AVAIL_LLM_MODELS=["gpt-3.5-turbo", "chatglm"] - set AUTHENTICATION=[("username", "password"), ("username2", "password2")] - 也可以写: - set GPT_ACADEMIC_USE_PROXY=True - set GPT_ACADEMIC_API_KEY=sk-j7caBpkRoxxxxxxxxxxxxxxxxxxxxxxxxxxxx - set GPT_ACADEMIC_proxies={"http":"http://127.0.0.1:10085", "https":"http://127.0.0.1:10085",} - set GPT_ACADEMIC_AVAIL_LLM_MODELS=["gpt-3.5-turbo", "chatglm"] - set GPT_ACADEMIC_AUTHENTICATION=[("username", "password"), ("username2", "password2")] - """ - from colorful import print亮红, print亮绿 - arg_with_prefix = "GPT_ACADEMIC_" + arg - if arg_with_prefix in os.environ: - env_arg = os.environ[arg_with_prefix] - elif arg in os.environ: - env_arg = os.environ[arg] - else: - raise KeyError - print(f"[ENV_VAR] 尝试加载{arg},默认值:{default_value} --> 修正值:{env_arg}") - try: - if isinstance(default_value, bool): - env_arg = env_arg.strip() - if env_arg == 'True': r = True - elif env_arg == 'False': r = False - else: print('enter True or False, but have:', env_arg); r = default_value - elif isinstance(default_value, int): - r = int(env_arg) - elif isinstance(default_value, float): - r = float(env_arg) - elif isinstance(default_value, str): - r = env_arg.strip() - elif isinstance(default_value, dict): - r = eval(env_arg) - elif isinstance(default_value, list): - r = eval(env_arg) - elif default_value is None: - assert arg == "proxies" - r = eval(env_arg) - else: - print亮红(f"[ENV_VAR] 环境变量{arg}不支持通过环境变量设置! ") - raise KeyError - except: - print亮红(f"[ENV_VAR] 环境变量{arg}加载失败! ") - raise KeyError(f"[ENV_VAR] 环境变量{arg}加载失败! ") - - print亮绿(f"[ENV_VAR] 成功读取环境变量{arg}") - return r - -@lru_cache(maxsize=128) -def read_single_conf_with_lru_cache(arg): - from colorful import print亮红, print亮绿, print亮蓝 - try: - # 优先级1. 获取环境变量作为配置 - default_ref = getattr(importlib.import_module('config'), arg) # 读取默认值作为数据类型转换的参考 - r = read_env_variable(arg, default_ref) - except: - try: - # 优先级2. 获取config_private中的配置 - r = getattr(importlib.import_module('config_private'), arg) - except: - # 优先级3. 获取config中的配置 - r = getattr(importlib.import_module('config'), arg) - - # 在读取API_KEY时,检查一下是不是忘了改config - if arg == 'API_KEY': - print亮蓝(f"[API_KEY] 本项目现已支持OpenAI和API2D的api-key。也支持同时填写多个api-key,如API_KEY=\"openai-key1,openai-key2,api2d-key3\"") - print亮蓝(f"[API_KEY] 您既可以在config.py中修改api-key(s),也可以在问题输入区输入临时的api-key(s),然后回车键提交后即可生效。") - if is_any_api_key(r): - print亮绿(f"[API_KEY] 您的 API_KEY 是: {r[:15]}*** API_KEY 导入成功") - else: - print亮红( "[API_KEY] 正确的 API_KEY 是'sk'开头的51位密钥(OpenAI),或者 'fk'开头的41位密钥,请在config文件中修改API密钥之后再运行。") - if arg == 'proxies': - if r is None: - print亮红('[PROXY] 网络代理状态:未配置。无代理状态下很可能无法访问OpenAI家族的模型。建议:检查USE_PROXY选项是否修改。') - else: - print亮绿('[PROXY] 网络代理状态:已配置。配置信息如下:', r) - assert isinstance(r, dict), 'proxies格式错误,请注意proxies选项的格式,不要遗漏括号。' - return r - - -def get_conf(*args): - # 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到 - res = [] - for arg in args: - r = read_single_conf_with_lru_cache(arg) - res.append(r) - return res - - -def clear_line_break(txt): - txt = txt.replace('\n', ' ') - txt = txt.replace(' ', ' ') - txt = txt.replace(' ', ' ') - return txt - - -class DummyWith(): - """ - 这段代码定义了一个名为DummyWith的空上下文管理器, - 它的作用是……额……就是不起作用,即在代码结构不变得情况下取代其他的上下文管理器。 - 上下文管理器是一种Python对象,用于与with语句一起使用, - 以确保一些资源在代码块执行期间得到正确的初始化和清理。 - 上下文管理器必须实现两个方法,分别为 __enter__()和 __exit__()。 - 在上下文执行开始的情况下,__enter__()方法会在代码块被执行前被调用, - 而在上下文执行结束时,__exit__()方法则会被调用。 - """ - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, traceback): - return - -def run_gradio_in_subpath(demo, auth, port, custom_path): - """ - 把gradio的运行地址更改到指定的二次路径上 - """ - def is_path_legal(path: str)->bool: - ''' - check path for sub url - path: path to check - return value: do sub url wrap - ''' - if path == "/": return True - if len(path) == 0: - print("ilegal custom path: {}\npath must not be empty\ndeploy on root url".format(path)) - return False - if path[0] == '/': - if path[1] != '/': - print("deploy on sub-path {}".format(path)) - return True - return False - print("ilegal custom path: {}\npath should begin with \'/\'\ndeploy on root url".format(path)) - return False - - if not is_path_legal(custom_path): raise RuntimeError('Ilegal custom path') - import uvicorn - import gradio as gr - from fastapi import FastAPI - app = FastAPI() - if custom_path != "/": - @app.get("/") - def read_main(): - return {"message": f"Gradio is running at: {custom_path}"} - app = gr.mount_gradio_app(app, demo, path=custom_path) - uvicorn.run(app, host="0.0.0.0", port=port) # , auth=auth - - -def clip_history(inputs, history, tokenizer, max_token_limit): - """ - reduce the length of history by clipping. - this function search for the longest entries to clip, little by little, - until the number of token of history is reduced under threshold. - 通过裁剪来缩短历史记录的长度。 - 此函数逐渐地搜索最长的条目进行剪辑, - 直到历史记录的标记数量降低到阈值以下。 - """ - import numpy as np - from request_llm.bridge_all import model_info - def get_token_num(txt): - return len(tokenizer.encode(txt, disallowed_special=())) - input_token_num = get_token_num(inputs) - if input_token_num < max_token_limit * 3 / 4: - # 当输入部分的token占比小于限制的3/4时,裁剪时 - # 1. 把input的余量留出来 - max_token_limit = max_token_limit - input_token_num - # 2. 把输出用的余量留出来 - max_token_limit = max_token_limit - 128 - # 3. 如果余量太小了,直接清除历史 - if max_token_limit < 128: - history = [] - return history - else: - # 当输入部分的token占比 > 限制的3/4时,直接清除历史 - history = [] - return history - - everything = [''] - everything.extend(history) - n_token = get_token_num('\n'.join(everything)) - everything_token = [get_token_num(e) for e in everything] - - # 截断时的颗粒度 - delta = max(everything_token) // 16 - - while n_token > max_token_limit: - where = np.argmax(everything_token) - encoded = tokenizer.encode(everything[where], disallowed_special=()) - clipped_encoded = encoded[:len(encoded)-delta] - everything[where] = tokenizer.decode(clipped_encoded)[:-1] # -1 to remove the may-be illegal char - everything_token[where] = get_token_num(everything[where]) - n_token = get_token_num('\n'.join(everything)) - - history = everything[1:] - return history - -""" -======================================================================== -第三部分 -其他小工具: - - zip_folder: 把某个路径下所有文件压缩,然后转移到指定的另一个路径中(gpt写的) - - gen_time_str: 生成时间戳 -======================================================================== -""" - -def zip_folder(source_folder, dest_folder, zip_name): - import zipfile - import os - # Make sure the source folder exists - if not os.path.exists(source_folder): - print(f"{source_folder} does not exist") - return - - # Make sure the destination folder exists - if not os.path.exists(dest_folder): - print(f"{dest_folder} does not exist") - return - - # Create the name for the zip file - zip_file = os.path.join(dest_folder, zip_name) - - # Create a ZipFile object - with zipfile.ZipFile(zip_file, 'w', zipfile.ZIP_DEFLATED) as zipf: - # Walk through the source folder and add files to the zip file - for foldername, subfolders, filenames in os.walk(source_folder): - for filename in filenames: - filepath = os.path.join(foldername, filename) - zipf.write(filepath, arcname=os.path.relpath(filepath, source_folder)) - - # Move the zip file to the destination folder (if it wasn't already there) - if os.path.dirname(zip_file) != dest_folder: - os.rename(zip_file, os.path.join(dest_folder, os.path.basename(zip_file))) - zip_file = os.path.join(dest_folder, os.path.basename(zip_file)) - - print(f"Zip file created at {zip_file}") - -def gen_time_str(): - import time - return time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) - - -class ProxyNetworkActivate(): - """ - 这段代码定义了一个名为TempProxy的空上下文管理器, 用于给一小段代码上代理 - """ - def __enter__(self): - from toolbox import get_conf - proxies, = get_conf('proxies') - if 'no_proxy' in os.environ: os.environ.pop('no_proxy') - os.environ['HTTP_PROXY'] = proxies['http'] - os.environ['HTTPS_PROXY'] = proxies['https'] - return self - - def __exit__(self, exc_type, exc_value, traceback): - os.environ['no_proxy'] = '*' - if 'HTTP_PROXY' in os.environ: os.environ.pop('HTTP_PROXY') - if 'HTTPS_PROXY' in os.environ: os.environ.pop('HTTPS_PROXY') - return \ No newline at end of file diff --git a/spaces/falterWliame/Face_Mask_Detection/2011 Arm Realview Rvds 41 BEST Crack.md b/spaces/falterWliame/Face_Mask_Detection/2011 Arm Realview Rvds 41 BEST Crack.md deleted file mode 100644 index bc1ca0f7fe2767e01318e24b5d1b55780b505479..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/2011 Arm Realview Rvds 41 BEST Crack.md +++ /dev/null @@ -1,15 +0,0 @@ -

2011 Arm Realview Rvds 41 Crack


Download ★★★ https://urlca.com/2uDdRx



- -September 30, 2011 . For RVDS, the firmware files are located in: . In RVDS, select: Start → All Programs → ARM → RealView ICE vN.n → RealView ICE. h and click View. . -Download the archive with the firmware and unpack it into the RVDS folder (for example, RVDS1.bin). . -We start WinRAR. . -In it, select "File" → "Add to archive ..." and specify the path to the file. . -We start RVDS. . -Select "File" → "Options" → "General" → "File system" and specify the path to RVDS1.bin. . -Then select "File system" → "Add". -In the window that appears, select "Package files", click OK. . -To the question "Add files to the archive?" press "Yes". . -After that, click "OK". 8a78ff9644
-
-
-

diff --git a/spaces/falterWliame/Face_Mask_Detection/Driver Oprek All Mtk Android !!BETTER!!.md b/spaces/falterWliame/Face_Mask_Detection/Driver Oprek All Mtk Android !!BETTER!!.md deleted file mode 100644 index 64ebdf1a9f0793ae5ab79598097a91e2c9b91c01..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Driver Oprek All Mtk Android !!BETTER!!.md +++ /dev/null @@ -1,6 +0,0 @@ -

Driver Oprek All Mtk Android


Download File ✔ https://urlca.com/2uDdFF



- -firmware stb zte b760h 7z Cara penggunaan: Install MTK VCOM Driver jika belum ... Jual Stb Zte B860h Root All App All Channel Second Kab Bekasi Tezzatezza 28 ... Download Firmware Pulpstone ROM Android STB ZTE B860H V3 Final. ... Kembali lagi bersama saya disini di blog suka oprek,berhubung banyak sobat ... 1fdad05405
-
-
-

diff --git a/spaces/falterWliame/Face_Mask_Detection/Pst Walker License Keygen Crack ((TOP)).md b/spaces/falterWliame/Face_Mask_Detection/Pst Walker License Keygen Crack ((TOP)).md deleted file mode 100644 index 1abe5685cc5b971f27e95588087a27cbaf332322..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Pst Walker License Keygen Crack ((TOP)).md +++ /dev/null @@ -1,87 +0,0 @@ -
-

PST Walker License Keygen Crack: How to Access and Export Outlook Data Without Outlook

-

Outlook is one of the most widely used email clients in the world, but it is not always available or convenient to use. Sometimes you may need to access, view and export your Outlook data without installing Outlook on your computer. For example, you may have a corrupted or damaged PST or OST file that you want to recover, or you may want to migrate to another email client and need to export your mailbox to a different format.

-

In such cases, you can use PST Walker, a tool that can open and process Outlook files and mailboxes, such as PST, OST, DBX, EML and MSG. PST Walker can also recover data from corrupted or damaged PST or OST files, and find orphaned or deleted items inside the input mailboxes. PST Walker can work as a simple mailbox reader in case you lose connection to your mail server.

-

pst walker license keygen crack


Download File ✒ https://urlca.com/2uDcPq



-

However, PST Walker is not a free tool. You need to purchase a license to use it fully and without any limitations. A license costs $49 for a single user and $199 for a site license. If you don't want to spend that much money, you can try to use a license keygen crack instead.

-

A license keygen crack is a program that can generate a valid license code for a software product. By using a license keygen crack, you can bypass the copy protection and unlock all the features of PST Walker. In this article, we will show you how to download, install and update the PST Walker license keygen crack on your computer. We will also show you how to use PST Walker to access and export your Outlook data without Outlook.

-

Step 1: Download PST Walker License Keygen Crack

-

The first step is to download the PST Walker license keygen crack from a reliable source. There are many websites that offer free downloads of license keygen cracks for various software products, but not all of them are safe and trustworthy. Some of them may contain viruses, malware or spyware that can harm your computer or steal your personal information.

-

Therefore, you need to be careful and choose a reputable website that has positive reviews and feedback from other users. You can also use an antivirus program or a firewall to scan the downloaded file before opening it. Here are some of the websites that we recommend for downloading the PST Walker license keygen crack:

-
    -
  • https://crack4windows.com/crack/?s=pst-walker&id=23937
  • -
  • https://www.filefixation.com/pst-walker-4.21-crack-serial-keygen-download.html
  • -
  • https://carebiv2011.wixsite.com/dazamima/post/pst-walker-license-keygen-crack
  • -
-

Once you have downloaded the PST Walker license keygen crack file, you need to extract it using a program such as WinRAR or 7-Zip. You will get a folder that contains the license keygen crack executable file and some instructions on how to use it.

-

Step 2: Install PST Walker License Keygen Crack

-

The next step is to install the PST Walker license keygen crack on your computer. To do this, you need to run the license keygen crack executable file as an administrator. You will see a window that asks you to enter some information, such as your name, email address and phone number.

-

You can enter any fake or random information that you want, as it does not affect the activation process. The only important thing is to enter a valid email address that you can access later. After entering the information, click on the Generate button to create a license code for PST Walker.

-

You will see a message that says "License code generated successfully". You will also receive an email with the license code attached as a text file. You need to copy this license code and paste it into the activation window of PST Walker.

-

-

To open the activation window of PST Walker, you need to launch PST Walker on your computer. You will see a window that says "PST Walker Trial Version". Click on the Register button to open the activation window. You will see a window that asks you to enter your name and license code.

-

You can enter any name that you want, but make sure to enter the license code that you copied from the email or the text file. After entering the name and license code, click on the Activate button to activate PST Walker.

-

You will see a message that says "PST Walker activated successfully". You will also see that the window title changes from "PST Walker Trial Version" to "PST Walker". This means that you have successfully installed and activated PST Walker with the license keygen crack.

-

Step 3: Update PST Walker License Keygen Crack

-

The last step is to update PST Walker with the latest version available. This will ensure that PST Walker works properly and smoothly with any compatible software application. To update PST Walker with the latest version available, you need to follow these steps:

-
    -
  1. Go to https://www.pstwalker.com/download.html and download the latest version of PST Walker.
  2. -
  3. Run the downloaded file as an administrator and follow the installation wizard.
  4. -
  5. When asked to enter your name and license code, enter the same name and license code that you used before.
  6. -
  7. Click on the Update button to update PST Walker with the latest version available.
  8. -
  9. Restart your computer for the changes to take effect.
  10. -
-

That's it! You have successfully updated PST Walker with the latest version available. You can now use PST Walker with any compatible software application without any problems.

-

Step 4: Use PST Walker with Different Software Applications

-

PST Walker is a versatile tool that can work with different software applications for various purposes. Here are some of

-

Step 5: Benefits of Using PST Walker License Keygen Crack

-

By using PST Walker license keygen crack, you can enjoy many benefits that PST Walker offers. Here are some of them:

-
    -
  • You can access and view your Outlook data without Outlook. This is useful if you don't have Outlook installed on your computer, or if you have a different version of Outlook than the one that created the PST or OST file.
  • -
  • You can recover data from corrupted or damaged PST or OST files. This is helpful if you have lost some important emails or attachments due to a virus attack, a power outage, a hard disk failure or any other reason.
  • -
  • You can find orphaned or deleted items inside the input mailboxes. This is handy if you want to restore some emails or attachments that you accidentally deleted or that were removed by a third-party program.
  • -
  • You can export your mailbox to EML or MSG format. This is convenient if you want to migrate to another email client or backup your data to a different location.
  • -
  • You can save money by not buying a license for PST Walker. This is beneficial if you don't want to spend $49 for a single user license or $199 for a site license.
  • -
-

These are just some of the benefits of using PST Walker license keygen crack. You can discover more benefits by trying it yourself.

- -

Step 6: Risks of Using PST Walker License Keygen Crack

-

However, using PST Walker license keygen crack also comes with some risks that you should be aware of. Here are some of them:

-
    -
  • You may violate the copyright law and the terms of service of PST Walker. This is illegal and unethical, and you may face legal consequences or penalties if you get caught.
  • -
  • You may download a fake or malicious license keygen crack that can harm your computer or steal your personal information. This is dangerous and risky, and you may lose your data or compromise your security if you are not careful.
  • -
  • You may not get the latest updates and features of PST Walker. This is disappointing and frustrating, and you may miss out on some important improvements or bug fixes that PST Walker provides.
  • -
  • You may not get any technical support or customer service from PST Walker. This is inconvenient and annoying, and you may not get any help or assistance if you encounter any problems or issues with PST Walker.
  • -
-

These are just some of the risks of using PST Walker license keygen crack. You should weigh these risks against the benefits before deciding whether to use it or not.

- -

Conclusion

-

PST Walker is a tool that can open and process Outlook files and mailboxes, such as PST, OST, DBX, EML and MSG. It can also recover data from corrupted or damaged PST or OST files, and find orphaned or deleted items inside the input mailboxes. PST Walker can work as a simple mailbox reader in case you lose connection to your mail server.

-

To use PST Walker fully and without any limitations, you need to purchase a license for $49 for a single user or $199 for a site license. Alternatively, you can use a license keygen crack to generate a valid license code for PST Walker. By using a license keygen crack, you can bypass the copy protection and unlock all the features of PST Walker.

-

In this article, we showed you how to download, install and update the PST Walker license keygen crack on your computer. We also showed you how to use PST Walker to access and export your Outlook data without Outlook. We also discussed the benefits and risks of using PST Walker license keygen crack.

-

We hope this article was helpful and informative for you. If you have any questions or comments, please feel free to leave them below. Thank you for reading!

-

Step 7: Alternatives to PST Walker License Keygen Crack

-

If you are not comfortable with using PST Walker license keygen crack, or if you want to try some other options, you can consider some alternatives to PST Walker. Here are some of them:

-
    -
  • You can use a free or open source tool that can open and process Outlook files and mailboxes, such as Mail Viewer, Kernel Outlook PST Viewer, MailsDaddy Free PST Viewer, etc. These tools may not have all the features and functions of PST Walker, but they can still help you access and view your Outlook data without Outlook.
  • -
  • You can use a paid or premium tool that can open and process Outlook files and mailboxes, such as Stellar Repair for Outlook, DataNumen Outlook Repair, SysTools Outlook Recovery, etc. These tools may have more features and functions than PST Walker, but they also cost more money. You can compare the prices and features of these tools and choose the one that suits your needs and budget.
  • -
  • You can use an online or cloud-based service that can open and process Outlook files and mailboxes, such as Outlook PST Viewer Online, Online OST to PST Converter, Online Email Viewer, etc. These services may not require any installation or download on your computer, but they may also have some limitations and risks. You may need to upload your Outlook files to their servers, which may compromise your privacy and security. You may also need to pay for some services or features that are not free.
  • -
-

These are just some of the alternatives to PST Walker license keygen crack. You can explore more alternatives by searching online or asking for recommendations from other users.

- -

Step 8: Tips and Tricks for Using PST Walker

-

To make the most out of PST Walker, you can use some tips and tricks that can enhance your experience and performance with PST Walker. Here are some of them:

-
    -
  • You can use the search function of PST Walker to find specific emails or attachments in your mailbox. You can search by keywords, dates, senders, recipients, subjects, etc. You can also use advanced search options to refine your search results.
  • -
  • You can use the preview function of PST Walker to view the content and properties of your emails or attachments before exporting them. You can also print or reply to your emails using the default mail client.
  • -
  • You can use the export function of PST Walker to export your mailbox to EML or MSG format. You can also choose to export only selected folders or items instead of the whole mailbox. You can also specify the destination folder and file name for your exported files.
  • -
  • You can use the recovery function of PST Walker to recover data from corrupted or damaged PST or OST files. You can also configure PST Walker to search for orphaned or deleted items in your mailbox and display them in its main window.
  • -
  • You can use the MAPI properties function of PST Walker to view and edit the MAPI properties of your emails or attachments. MAPI properties are metadata that store information about your emails or attachments, such as sender name, recipient name, subject line, message class, etc. You can modify these properties to change the appearance or behavior of your emails or attachments.
  • -
-

These are just some of the tips and tricks for using PST Walker. You can learn more tips and tricks by reading the user manual or help file of PST Walker.

-

In conclusion, PST Walker is a tool that can open and process Outlook files and mailboxes, such as PST, OST, DBX, EML and MSG. It can also recover data from corrupted or damaged PST or OST files, and find orphaned or deleted items inside the input mailboxes. PST Walker can work as a simple mailbox reader in case you lose connection to your mail server.

-

To use PST Walker fully and without any limitations, you need to purchase a license for $49 for a single user or $199 for a site license. Alternatively, you can use a license keygen crack to generate a valid license code for PST Walker. By using a license keygen crack, you can bypass the copy protection and unlock all the features of PST Walker.

-

In this article, we showed you how to download, install and update the PST Walker license keygen crack on your computer. We also showed you how to use PST Walker to access and export your Outlook data without Outlook. We also discussed the benefits and risks of using PST Walker license keygen crack. We also suggested some alternatives to PST Walker license keygen crack and some tips and tricks for using PST Walker.

-

We hope this article was helpful and informative for you. If you have any questions or comments, please feel free to leave them below. Thank you for reading!

3cee63e6c2
-
-
\ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Dinozavrlar Mezozoy erasnn mhtm mxluqlar.md b/spaces/fatiXbelha/sd/Dinozavrlar Mezozoy erasnn mhtm mxluqlar.md deleted file mode 100644 index 09fd08723af7f9a95f8d46956139ef0ba5f222f9..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Dinozavrlar Mezozoy erasnn mhtm mxluqlar.md +++ /dev/null @@ -1,99 +0,0 @@ -
-

Dinozavrlar haqida ma'lumot

-

Dinozavrlar Yer kurrasida 245 million yildan 66 million yil oldin yashagan xordalilar tipiga mansub hayvonlar kladidir. Ular Trias davridan Təbaşir davrigacha Yerda hakim bo'lgan va quruda yashagan onurg'ali hayvonlardir. Ular o'lchami, shakli va turmush tarzi jihatidan juda turli xil bo'lgan. Ayrimlari katta va o'tyeygan, ayrimlari kichik va go'shtyeygan va ayrimlari parvozli va qushlar bilan aloqador bo'lgan. Dinozavrlar 66 million yil oldin məhv bo'lganlar, ehtimolliki asteroidning Yerga to'qnashidan so'ng iqlim va atrof-muhitda sodir bo'lgan katta o'zgarishlar tufayli.

-

dinozavrlar


DOWNLOAD ✶ https://urllie.com/2uNIpI



-

Dinozavrlar haqida ma'lumot berish uchun biz quyidagi savollarga javob beramiz:

-
    -
  • Dinozavrlarning asosiy turlari va xususiyatlari qanday?
  • -
  • Dinozavrlar uchun fosillar dalili qanday va ular qanday topilgan?
  • -
  • Dinozavrlarning məhv bo'lishining mumkin bo'lgan sabablari va ta'sirlari nima?
  • -
-

Dinozavrlarning asosiy turlari va xususiyatlari

-

Dinozavrlar ilmiy jihatdan Dinosauria deb nomlanadi. Bu nom ingliz paleontoloqi Riçard Ouen tomonidan 1842-yilda taklif etilgan va grek tilida \"qo'rqinchli katta kərtənkələ\" degan ma'noni anglatadi. Dinozavrlar bir necha asosiy guruhga bo'linadi:

- - - - - - - - - -
GuruhXususiyatiMisol
SauropodlarUzoq bo'yinli, uzun quyruqli va semiz tanali o'tyeygan dinozavrlar.Brachiosaurus, Apatosaurus, Diplodocus.
TiranozavrlarKatta, kuchli, go'shtyeygan dinozavrlar.Tyrannosaurus rex, Albertosaurus, Daspletosaurus.
RaptorlarKichik, tezkor, go'shtyeygan dinozavrlar.Velociraptor, Deinonychus, Utahraptor.
CeratopsiyalarBoshida qaychi, qo'ng'iroq va qirrali bo'lgan o'tyeygan dinozavrlar.Triceratops, Styracosaurus, Protoceratops.
AnkylozavrlarTanasi va quyruqi to'siq bo'lgan o'tyeygan dinozavrlar.Ankylosaurus, Euoplocephalus, Nodosaurus.
StegozavrlarOrqa tomonida to'siq va qaychi bo'lgan o'tyeygan dinozavrlar.Stegosaurus, Kentrosaurus, Huayangosaurus.
PterozavrlarParvozli dinozavrlar. Ular aslida Dinosauria kladiga mansub emas, lekin ularning yaqin qarindoshlari hisoblanadi.Pteranodon, Quetzalcoatlus, Pterodactylus.
-

Dinozavrlarning barcha turlari haqida to'liq ma'lumot olish uchun [bu saytni] ko'ring.

-

dinozavrlar haqida malumot
-dinozavrlar haqida faktlar
-dinozavrlar haqida video
-dinozavrlar haqida kitoblar
-dinozavrlar haqida rasmlar
-dinozavrlar haqida qiziqarli ma'lumotlar
-dinozavrlar haqida multfilm
-dinozavrlar haqida kino
-dinozavrlar haqida referat
-dinozavrlar haqida o'yinlar
-dinozavrlar davri qachon boshlangan
-dinozavrlar davri qachon tugagan
-dinozavrlar davri nima uchun tugagan
-dinozavrlar davri nima bo'lgan
-dinozavrlar davri filmi
-dinozavrlar davri multfilmi
-dinozavrlar davri o'yinlari
-dinozavrlar davri rasmlari
-dinozavrlar davri kitobi
-dinozavrlar davri video
-dunyodagi eng katta dinozavr qaysi
-dunyodagi eng kuchli dinozavr qaysi
-dunyodagi eng vaxshiy dinozavr qaysi
-dunyodagi eng kichik dinozavr qaysi
-dunyodagi eng g'alati dinozavr qaysi
-dunyodagi eng tezkor dinozavr qaysi
-dunyodagi eng ajoyib dinozavr qaysi
-dunyodagi eng go'zal dinozavr qaysi
-dunyodagi eng xavfli dinozavr qaysi
-dunyodagi eng maroqli dinozavr qaysi
-otyeyen dinozavrlar qanday bo'lgan
-otyeyen dinozavrlar nima yegan
-otyeyen dinozavrlar nima bilan himoyalangan
-otyeyen dinozavrlar nima uchun yo'qolgan
-otyeyen dinozavrlar nima uchun quvonmagan
-otyeyen dinozavrlar nima uchun yashagan
-otyeyen dinozavrlar nima uchun mohir bo'lgan
-otyeyen dinozavrlar nima uchun katta bo'lgan
-otyeyen dinozavrlar nima uchun rangli bo'lgan
-otyeyen dinozavrlar nima uchun guruhda yashagan
-etyeyen dinozavrlar qanday bo'lgan
-etyeyen dinozavrlar nima yegan
-etyeyen dinozavrlar nima bilan himoyalangan
-etyeyen dinozavrlar nima uchun yo'qolgan
-etyeyen dinozavrlar nima uchun quvonmagan
-etyeyen dinozavrlar nima uchun yashagan
-etyeyen dinozavrlar nima uchun mohir bo'lgan
-etyeyen dinozavrlar nima uchun kichik bo'lgan
-etyeyen dinozavrlar nima uchun rangsiz bo'lgan
-etyeyen dinozavrlar nima uchun yolg'iz yashagan

-

Dinozavrlar uchun fosillar dalili va ular qanday topilgan?

-

Dinozavrlar haqida ma'lumot beradigan asosiy manba fosillardir. Fosillar hayvon yoki o'simlikning jismoniy qoldiqlari yoki izlari bo'lib, ular Yer sirtidagi qatlamlarda saqlanib qolgan. Fosillarni topish uchun paleontologlar (fosillarni o'rganuvchi ilmiy mutaxassislar) turli yerlarda kazib chiqarishlar olib boradilar. Ular fosillarni tahlil qilish orqali dinozavrlarning turi, yoshi, o'lchami, shakli, turmush tarzi va evolyutsiyasi haqida xulosalar chiqaradilar.

-

Dinozavrlarning fosillari dunyo bo'ylab keng tarqalgan. Eng ko'p fosillarni Osiyo, Shimoliy Amerika va Yevropa qit'alarida topilgan. Dinozavrlarning fosillari O'zbekistonda ham topilgan. Masalan, 2017-yilda Navoiy viloyatidagi Zarafshon tumani hududida tiranozavrning quyruq to'sig'i topilgan. Shuningdek, Qashqadaryo viloyatidagi Kitob tumani hududida dinozavrlarning izlari ham topilgan. Bu izlar 150 million yil oldin yashagan dinozavrlarga tegishli bo'lishi mumkin.

-

Dinozavrlarning məhv bo'lishining mumkin bo'lgan sabablari va ta'sirlari

-

Shuningdek, asteroidning to'sig'i atmosferaga kirib, uning haroratini oshirib, Yer sirtini qoplab yurgan toz bulutlarini hosil qilgan. Bu bulutlar quyosh nurlarini bloklab, Yer sirtining sovushib ketishiga olib kelgan. Bu o'zgarishlar natijasida dinozavrlar va boshqa ko'p hayvon va o'simlik turlari məhv bo'lgan. Bu hodisani K-T (Kretase-Tersiyer) chegarasi deb atashadi, chunki bu vaqtda Kretase davri tugaydi va Tersiyer davri boshlanadi.

-

Dinozavrlarning məhv bo'lishining boshqa nazariyalari ham mavjud. Masalan, dinozavrlarning evolyutsiyasi va genetikasi bilan bog'liq nazariyalar, dinozavrlarning tanasining o'sishi va miqdorining ortishi bilan bog'liq nazariyalar, dinozavrlarning ovqatlanishi va kasalliklari bilan bog'liq nazariyalar va dinozavrlarning iqlim o'zgarishlari va tektonik harakatlarga moslashishlari bilan bog'liq nazariyalar. Biroq, bu nazariyalar asteroidning to'qnashidan so'ng sodir bo'lgan iqlim va atrof-muhitdagi katta o'zgarishlarni e'tiborsiz qoldirishadi.

-

Xulosa

-

Dinozavrlar Yer tarixida juda muhim rol o'ynagan hayvonlar kladidir. Ular 245 million yildan 66 million yil oldin Yerda yashagan va ularning turlari juda turli xil bo'lgan. Ularning fosillari dunyo bo'ylab topilgan va ular haqida ko'plab ilmiy tadqiqotlar olib borilgan. Dinozavrlar 66 million yil oldin asteroidning Yerga to'qnashidan so'ng iqlim va atrof-muhitda sodir bo'lgan katta o'zgarishlar tufayli məhv bo'lganlar.

-

Dinozavrlar haqida quyidagi ba'zi qiziqarli faktlarni bilasizmi?

-
    -
  • Dinozavrlarning nomi ingliz tilida \"terrible lizard\" degan ma'noga ega, lekin ular aslida kərtənkələ emas, balki xordalilar tipiga mansub hayvonlardir.
  • -
  • Dinozavrlarning eng kichigi Kolibri qushidan ham kichik bo'lgan Lesothosaurus bo'lib, u 30 sm uzunlikka ega edi. Dinozavrlarning eng kattasi esa Brachiosaurus bo'lib, u 25 metr balandlikka va 80 tonna og'irlikka ega edi.
  • -
  • Dinozavrlarning bir qismi parvoz qila olgan bo'lsa-da, ular aslida parvozli dinozavrlar emas, balki Pterozavr guruhiga mansub hayvonlardir. Parvozli dinozavrlar esa Arkeopteriks kabi turlarga tegishli bo'lib, ular qushlar bilan yaqin aloqada bo'lgan.
  • -
  • Dinozavrlarning bir qismi paxta terisi bilan qoplangan edi. Bu teri dinozavrlarni issiqda saqlab turishga yordam berdi. Shuningdek, bu teri rang-barang bo'lishi mumkin edi va bu dinozavrlarni ovchilaridan yoki ovuldan yashirishga yoki xotin-xotinlarini jalb qilishga yordam berdi.
  • -

    Dinozavrlarning məhv bo'lishidan keyin ham ularning bir qismi tirik qolgan. Ularning eng mashhur namunasi qushlardir. Qushlar dinozavrlarning evolyutsiyaviy bo'lagi hisoblanadi va ularning xususiyatlari dinozavrlarga o'xshaydi. Masalan, qushlarning tana qurilishi, oyoqlari, tirmalari, yumurtalari va paxta terisi dinozavrlarning bilan mos keladi.

    -

    FAQ

    -

    Dinozavrlar haqida ko'p beriladigan savollarga quyidagi javoblar beriladi:

    -
      -
    1. Dinozavrlar nima uchun məhv bo'ldi? Dinozavrlar 66 million yil oldin asteroidning Yerga to'qnashidan so'ng iqlim va atrof-muhitda sodir bo'lgan katta o'zgarishlar tufayli məhv bo'ldilar. Bu o'zgarishlar natijasida dinozavrlar uchun zarur ovqat, suv va joy kamayib ketdi.
    2. -
    3. Dinozavrlar qachon yashagan? Dinozavrlar 245 million yildan 66 million yil oldin Yerda yashagan. Ular Trias, Jura va Kretase davrlarida Yerda hakim bo'lgan.
    4. -
    5. Dinozavrlar qaysi davrda yashagan? Dinozavrlar Trias, Jura va Kretase davrlarida yashagan. Trias davri 252-201 million yil oldin, Jura davri 201-145 million yil oldin va Kretase davri 145-66 million yil oldin bo'lgan.
    6. -
    7. Dinozavrlarning eng katta turi qaysi? Dinozavrlarning eng katta turi Sauropod guruhiga mansub bo'lgan Brachiosaurus bo'lib, u 25 metr balandlikka va 80 tonna og'irlikka ega edi.
    8. -
    9. Dinozavrlarning eng kichik turi qaysi? Dinozavrlarning eng kichik turi Lesothosaurus bo'lib, u 30 sm uzunlikka ega edi. U Raptor guruhiga mansub bo'lgan go'shtyeygan dinozavr edi.
    10. -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Download 8 Ball Pool on PC and Enjoy the Best Pool Game with BlueStacks.md b/spaces/fatiXbelha/sd/Download 8 Ball Pool on PC and Enjoy the Best Pool Game with BlueStacks.md deleted file mode 100644 index 5fa074d04e3e030f9b3cd5dc025b991cf18a01e1..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Download 8 Ball Pool on PC and Enjoy the Best Pool Game with BlueStacks.md +++ /dev/null @@ -1,131 +0,0 @@ -
    -

    How to Download and Play 8 Ball Pool on PC with BlueStacks

    -

    If you are a fan of billiard games, you might have heard of 8 Ball Pool, one of the most popular and addictive pool games on mobile devices. But did you know that you can also play it on your PC with BlueStacks, a powerful Android emulator that lets you run Android apps and games on your computer or laptop? In this article, we will show you how to download and play 8 Ball Pool on PC with BlueStacks, as well as some of the features and benefits of both the game and the emulator.

    -

    What is 8 Ball Pool?

    -

    8 Ball Pool is a sports game developed by Miniclip.com, a leading online games company. It is one of the most realistic and immersive pool games on mobile devices, with stunning graphics, smooth gameplay, and various game modes. You can play solo or online against millions of players from around the world, as well as challenge your friends and show off your skills. You can also customize your cue and table, participate in tournaments, earn coins and cash, and rank up on the leaderboard. Whether you are a beginner or a pro, you will find something to enjoy in 8 Ball Pool.

    -

    download 8 ball pool bluestacks


    Download File ••• https://urllie.com/2uNIlx



    -

    Features of 8 Ball Pool

    -
      -
    • Play different types of pool games, such as 8 ball, 9 ball, or no guideline.
    • -
    • Compete with real players in 1-on-1 matches or join a club and play with your friends.
    • -
    • Enter tournaments and win exclusive prizes and trophies.
    • -
    • Collect and upgrade over 100 cues with different stats and designs.
    • -
    • Choose from various tables with different themes and settings.
    • -
    • Earn coins and cash by winning matches or spinning the wheel.
    • -
    • Use coins and cash to buy items from the Pool Shop or enter higher stakes matches.
    • -
    • Increase your level and VIP status by playing more games and earning more points.
    • -
    • Track your progress and stats on the profile page.
    • -
    • Chat with other players using emojis, phrases, or custom messages.
    • -
    -

    Benefits of playing 8 Ball Pool on PC

    -
      -
    • You can enjoy a larger screen and better graphics quality.
    • -
    • You can use a mouse or a keyboard for more precise aiming and shooting.
    • -
    • You can avoid battery drain, overheating, or lag issues that might affect your mobile device.
    • -
    • You can save storage space on your mobile device by playing on your PC.
    • -
    • You can access other apps or tools on your PC while playing without interrupting your game.
    • -
    -

    What is BlueStacks?

    -

    BlueStacks is a software that allows you to run Android applications on your computer or laptop. It is one of the most popular and trusted Android emulators on the market, with over 500 million users worldwide. It is compatible with most Android apps and games, and offers a range of features and benefits that enhance your gaming experience.

    -

    Features of BlueStacks

    -
      -
    • It has a fast and smooth performance, with up to 6 times faster than the fastest Android device.
    • -
    • It supports high-definition graphics, with up to 4K resolution and 240 FPS.
    • -
    • It has a user-friendly interface, with easy access to the Game Center, the Play Store, and the settings.
    • -
    • It has a multi-instance feature, which allows you to run multiple apps or games at the same time on different windows or tabs.
    • -
    • It has an advanced keymapping feature, which allows you to customize your controls and settings for each app or game.
    • -
    • It has a macro functionality, which allows you to record and execute complex actions with a single keystroke or button.
    • -
    • It has a gamepad support, which allows you to use your favorite controller for playing games.
    • -
    • It has a streaming mode, which allows you to broadcast your gameplay to platforms like Twitch, YouTube, or Facebook.
    • -
    -

    Benefits of using BlueStacks for 8 Ball Pool

    -
      -
    • You can play 8 Ball Pool on PC with better graphics and performance than on your mobile device.
    • -
    • You can use the mouse or the keyboard for more accurate and comfortable controls than on the touchscreen.
    • -
    • You can use the advanced keymapping feature to assign different keys or buttons for different actions, such as aiming, shooting, spinning, or chatting.
    • -
    • You can use the macro functionality to automate tasks like spinning the wheel, collecting rewards, or entering matches.
    • -
    • You can use the multi-instance feature to play 8 Ball Pool on multiple accounts or devices simultaneously.
    • -
    • You can stream your gameplay to your audience and interact with them in real-time.
    • -
    -

    How to download and install BlueStacks on PC

    -

    Downloading and installing BlueStacks on PC is very easy and fast. Just follow these simple steps:

    -

    Step 1: Visit the official website and click on "Download BlueStacks"

    -

    Go to https://www.bluestacks.com/ and click on the "Download BlueStacks" button. This will start downloading the installer file on your PC.

    -

    Step 2: Launch the installer and click on "Install now"

    -

    Once the download is complete, open the installer file and click on "Install now". This will begin installing BlueStacks on your PC. You can also customize the installation location and preferences if you want.

    -

    Step 3: Wait for the installation to complete and launch BlueStacks

    -

    The installation process may take a few minutes depending on your PC's specifications. Once it is done, BlueStacks will launch automatically. You can also find the BlueStacks icon on your desktop or start menu.

    -

    How to download and install 8 Ball Pool on PC with BlueStacks

    -

    Now that you have BlueStacks on your PC, you can easily download and install 8 Ball Pool on it. Just follow these simple steps:

    -

    Step 1: Search for 8 Ball Pool in the Game Center and click on it

    -

    On the home screen of BlueStacks, you will see the Game Center tab, where you can find various games recommended by BlueStacks. You can also use the search bar to look for any game you want. Type "8 Ball Pool" in the search bar and click on it. This will take you to the game's page in the Game Center.

    -

    Step 2: Click on the "Install" button on the Play Store page

    -

    On the game's page, you will see a button that says "Install". Click on it. This will open the Google Play Store page of 8 Ball Pool. You may need to sign in with your Google account if you haven't done so before. Then, click on the "Install" button again. This will start downloading and installing 8 Ball Pool on your PC.

    -

    How to download 8 ball pool on bluestacks
    -Download 8 ball pool bluestacks for pc
    -Download 8 ball pool bluestacks for mac
    -Download 8 ball pool bluestacks for windows 10
    -Download 8 ball pool bluestacks for android
    -Download 8 ball pool bluestacks offline installer
    -Download 8 ball pool bluestacks latest version
    -Download 8 ball pool bluestacks apk
    -Download 8 ball pool bluestacks mod
    -Download 8 ball pool bluestacks hack
    -Download 8 ball pool bluestacks cheat engine
    -Download 8 ball pool bluestacks without google account
    -Download 8 ball pool bluestacks free coins
    -Download 8 ball pool bluestacks unlimited money
    -Download 8 ball pool bluestacks with facebook login
    -Download 8 ball pool bluestacks online multiplayer
    -Download 8 ball pool bluestacks not working
    -Download 8 ball pool bluestacks error
    -Download 8 ball pool bluestacks fix
    -Download 8 ball pool bluestacks update
    -Download 8 ball pool bluestacks review
    -Download 8 ball pool bluestacks guide
    -Download 8 ball pool bluestacks tips and tricks
    -Download 8 ball pool bluestacks best settings
    -Download 8 ball pool bluestacks system requirements
    -Download 8 ball pool bluestacks alternative
    -Download 8 ball pool nox player
    -Download 8 ball pool ldplayer
    -Download 8 ball pool memu play
    -Download 8 ball pool gameloop
    -Download 8 ball pool smartgaga
    -Download 8 ball pool koplayer
    -Download 8 ball pool droid4x
    -Download 8 ball pool genymotion
    -Download 8 ball pool andy emulator
    -Download 8 ball pool remix os player
    -Download 8 ball pool phoenix os
    -Download 8 ball pool prime os
    -Download 8 ball pool bliss os
    -Download 8 ball pool open thos os

    -

    Step 3: Wait for the download and installation to finish and click on the game icon

    -

    The download and installation process may take a few minutes depending on your internet speed and PC's performance. Once it is done, you will see a game icon on your home screen or in your app drawer. Click on it to launch 8 Ball Pool on your PC.

    -

    How to customize your controls and settings for 8 Ball Pool on PC with BlueStacks

    -

    One of the best things about playing 8 Ball Pool on PC with BlueStacks is that you can customize your controls and settings to suit your preferences and style. Here are some of the features that you can use to enhance your gameplay:

    -

    How to use the Advanced Keymapping feature

    -

    The Advanced Keymapping feature allows you to assign different keys or buttons for different actions in the game. For example, you can use the mouse to aim and shoot, the arrow keys to adjust the spin, or the spacebar to chat. You can also create your own custom keymap or use the default one provided by BlueStacks. To access the Advanced Keymapping feature, click on the keyboard icon on the right side of the screen. Then, you can drag and drop the keys or buttons on the screen, or edit them in the settings. You can also save and switch between different keymaps for different games.

    -

    How to use the Macro functionality

    -

    The Macro functionality allows you to record and execute complex actions with a single keystroke or button. For example, you can create a macro that automatically spins the wheel, collects rewards, or enters matches. You can also edit, delete, or share your macros with other users. To access the Macro functionality, click on the macro icon on the right side of the screen. Then, you can click on the record button to start recording your actions, or click on the play button to execute a macro. You can also manage your macros in the settings.

    -

    How to use the Multiple Instances feature

    -

    The Multiple Instances feature allows you to run multiple apps or games at the same time on different windows or tabs. For example, you can play 8 Ball Pool on multiple accounts or devices simultaneously, or play other games while waiting for your turn. You can also sync your actions across all instances with the Multi-Instance Sync feature. To access the Multiple Instances feature, click on the multi-instance icon on the right side of the screen. Then, you can create a new instance or clone an existing one, or switch between them in the manager.

    -

    Conclusion

    -

    8 Ball Pool is a fun and exciting pool game that you can play on your PC with BlueStacks. You can enjoy better graphics and performance, as well as customize your controls and settings with various features. You can also compete with real players from around the world, or play with your friends and family. If you are looking for a realistic and immersive pool game on PC, you should definitely try 8 Ball Pool with BlueStacks.

    -

    FAQs

    -
      -
    • Q: Is 8 Ball Pool free to play?
    • -
    • A: Yes, 8 Ball Pool is free to download and play on both mobile devices and PC. However, it also offers in-app purchases for coins and cash, which you can use to buy items or enter higher stakes matches.
    • -
    • Q: Is BlueStacks safe to use?
    • -
    • A: Yes, BlueStacks is safe and secure to use. It does not contain any malware or viruses, and it does not access any personal data or files on your PC. It also complies with Google's policies and standards for Android emulators.
    • -
    • Q: How do I update 8 Ball Pool on PC with BlueStacks?
    • -
    • A: To update 8 Ball Pool on PC with BlueStacks, you just need to open the Google Play Store app on BlueStacks and look for 8 Ball Pool. If there is an update available, you will see an "Update" button next to it. Click on it and wait for the update to finish.
    • -
    • Q: How do I uninstall 8 Ball Pool or BlueStacks from my PC?
    • -
    • A: To uninstall 8 Ball Pool from your PC, you just need to right-click on its icon on BlueStacks and select "Uninstall". To uninstall BlueStacks from your PC, you just need to go to your Control Panel and select "Uninstall a program". Then, find BlueStacks and click on "Uninstall".
    • -
    • Q: How do I contact customer support for 8 Ball Pool or BlueStacks?
    • -
    • A: To contact customer support for 8 Ball Pool, you can visit their official website at https://www.miniclip.com/ and click on "Support". There, you can find FAQs, guides, forums, and contact forms for various issues. To contact customer support for BlueStacks, you can visit their official website at https://www.bluestacks.com/ and click on "Support". There, you can find FAQs, guides, blogs, and contact forms for various issues.
    • -
    -

    I hope you found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and happy gaming!

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Euro Truck Simulator 3 MOD APK Tips and Tricks for Beginners.md b/spaces/fatiXbelha/sd/Euro Truck Simulator 3 MOD APK Tips and Tricks for Beginners.md deleted file mode 100644 index ee994e8c1e7477075bf6f4bfc4af5c61402ee6ef..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Euro Truck Simulator 3 MOD APK Tips and Tricks for Beginners.md +++ /dev/null @@ -1,97 +0,0 @@ - -

    Euro Truck Simulator 3 Mod APK: Everything You Need to Know

    -

    Do you love driving trucks across Europe? Do you want to experience the thrill of delivering cargo to different destinations? Do you want to customize your own truck and explore new maps? If you answered yes to any of these questions, then you might be interested in Euro Truck Simulator 3, the latest installment of the popular truck simulation game series. And if you want to make your gaming experience even more exciting, you might want to try Euro Truck Simulator 3 Mod APK, a modified version of the original game that gives you unlimited money, trucks, and maps. In this article, we will tell you everything you need to know about Euro Truck Simulator 3 Mod APK, including what it is, how to download and install it, what features it offers, and what tips and tricks you can use to become the king of the road.

    -

    What is Euro Truck Simulator 3?

    -

    Euro Truck Simulator 3 is a realistic truck driving simulation game developed by SCS Software, the same studio behind the successful Euro Truck Simulator 2. In this game, you can travel across Europe as a trucker who delivers important cargo across impressive distances. You can choose from dozens of cities to explore, such as London, Rome, Berlin, Madrid, Prague, and many more. You can also drive various types of trucks from different European manufacturers, such as Volvo, Scania, Mercedes-Benz, MAN, Renault, DAF, and Iveco. You can customize your truck with different parts, paint jobs, accessories, and decals. You can also upgrade your skills and reputation as a driver by completing different types of jobs, such as urgent deliveries, fragile cargo, long hauls, special transports, etc.

    -

    euro truck simulator 3 mod apk


    Download File →→→ https://urllie.com/2uNI9y



    -

    Euro Truck Simulator 3 is a sequel to the popular Euro Truck Simulator 2, which was released in 2012. The new game features improved graphics, gameplay, and features that make it more realistic and immersive than ever. For example, the game has a dynamic weather system that affects the road conditions and visibility. The game also has a realistic traffic system that includes cars, buses, trucks, motorcycles, bicycles, pedestrians, police cars, ambulances, etc. The game also has a realistic physics system that simulates the weight, inertia, friction, and aerodynamics of the trucks. The game also has a realistic sound system that captures the engine noise, horn sound, brake sound, tire sound, etc. of the trucks.

    -

    What is Euro Truck Simulator 3 Mod APK?

    -

    Euro Truck Simulator 3 Mod APK is a modified version of the original game that gives you access to unlimited money, trucks, and maps. This means that you can buy any truck you want without worrying about the price. You can also upgrade your truck with any part you want without worrying about the cost. You can also unlock all the maps in the game and explore different regions and countries without any restrictions. You can also enjoy the game without any ads or in-app purchases. Euro Truck Simulator 3 Mod APK is a way to enhance your gaming experience and have more fun with the game. You can customize your truck to your liking and show it off to other players online. You can also challenge yourself with more difficult and rewarding jobs and earn more money and reputation. You can also discover new places and landmarks and enjoy the scenery and culture of Europe.

    -

    euro truck simulator 3 mod apk unlimited money
    -euro truck simulator 3 mod apk download for android
    -euro truck simulator 3 mod apk latest version
    -euro truck simulator 3 mod apk with all trucks unlocked
    -euro truck simulator 3 mod apk offline
    -euro truck simulator 3 mod apk free shopping
    -euro truck simulator 3 mod apk realistic physics
    -euro truck simulator 3 mod apk with all trailers
    -euro truck simulator 3 mod apk no ads
    -euro truck simulator 3 mod apk with all maps
    -euro truck simulator 3 mod apk with multiplayer mode
    -euro truck simulator 3 mod apk with custom skins
    -euro truck simulator 3 mod apk with steering wheel support
    -euro truck simulator 3 mod apk with traffic lights
    -euro truck simulator 3 mod apk with weather effects
    -euro truck simulator 3 mod apk with day and night cycle
    -euro truck simulator 3 mod apk with realistic sounds
    -euro truck simulator 3 mod apk with dynamic lights
    -euro truck simulator 3 mod apk with gps navigation
    -euro truck simulator 3 mod apk with voice chat
    -euro truck simulator 3 mod apk with radio stations
    -euro truck simulator 3 mod apk with speed limit
    -euro truck simulator 3 mod apk with fuel consumption
    -euro truck simulator 3 mod apk with cargo damage
    -euro truck simulator 3 mod apk with engine breakdowns
    -euro truck simulator 3 mod apk with police and traffic fines
    -euro truck simulator 3 mod apk with parking challenges
    -euro truck simulator 3 mod apk with toll booths
    -euro truck simulator 3 mod apk with ferry crossings
    -euro truck simulator 3 mod apk with road signs and signals
    -euro truck simulator 3 mod apk with different camera angles
    -euro truck simulator 3 mod apk with interior view and mirrors
    -euro truck simulator 3 mod apk with cruise control and indicators
    -euro truck simulator 3 mod apk with horn and headlights
    -euro truck simulator 3 mod apk with wipers and hazards
    -euro truck simulator 3 mod apk with dashboard and gauges
    -euro truck simulator 3 mod apk with seat adjustment and seat belt
    -euro truck simulator 3 mod apk with manual and automatic transmission
    -euro truck simulator 3 mod apk with clutch and brake pedals
    -euro truck simulator 3 mod apk with tilt and touch controls

    -

    How to Download and Install Euro Truck Simulator 3 Mod APK?

    -

    If you want to try Euro Truck Simulator 3 Mod APK, you need to follow these simple steps:

    -

    Step 1: Find a reliable download link

    -

    There are many websites that claim to offer Euro Truck Simulator 3 Mod APK, but not all of them are safe and trustworthy. Some of them may contain viruses, malware, or spyware that can harm your device or steal your personal information. Therefore, you need to be careful and find a reliable download link from a reputable source. You can use Google or any other search engine to look for reviews, ratings, comments, and feedback from other users who have tried the mod before. You can also check the file size, version, and compatibility of the mod before downloading it.

    -

    Step 2: Enable unknown sources on your device

    -

    Since Euro Truck Simulator 3 Mod APK is not available on the official Google Play Store, you need to enable unknown sources on your device to install it. This is a security feature that prevents you from installing apps from sources other than the Play Store. To enable unknown sources, you need to go to your device settings, then security, then unknown sources, and then toggle it on. You may also see a warning message that tells you about the risks of installing apps from unknown sources, but you can ignore it if you trust the source of the mod.

    -

    Step 3: Download and install the APK file

    -

    Once you have enabled unknown sources on your device, you can proceed to download and install the APK file of Euro Truck Simulator 3 Mod APK. You need to tap on the download link and wait for the file to be downloaded to your device. Then, you need to locate the file in your file manager or downloads folder and tap on it to start the installation process. You may also see a pop-up message that asks for your permission to install the app, but you can grant it if you trust the source of the mod.

    -

    Step 4: Launch the game and enjoy

    -

    After the installation is complete, you can launch the game and enjoy Euro Truck Simulator 3 Mod APK. You will see that you have unlimited money, trucks, and maps in the game. You can also access all the features and options of the game without any limitations. You can also play online with other players who have the mod or the original game.

    -

    What are the Features of Euro Truck Simulator 3 Mod APK?

    -

    Euro Truck Simulator 3 Mod APK offers many features that make it different from the original game. Here are some of them:

    -

    Unlimited Money

    -

    One of the main features of Euro Truck Simulator 3 Mod APK is that it gives you unlimited money in the game. This means that you can buy any truck you want without worrying about the price. You can also upgrade your truck with any part you want without worrying about the cost. You can also spend your money on anything else you want in the game, such as fuel, repairs, tolls, fines, etc.

    -

    All Trucks Unlocked

    -

    Another feature of Euro Truck Simulator 3 Mod APK is that it unlocks all trucks in the game. This means that you can drive any truck you want without having to unlock it first. You can choose from dozens of trucks from different European manufacturers, such as Volvo, Scania, Mercedes-Benz, MAN, Renault, DAF, and Iveco. You can also customize your truck with different parts, paint jobs, accessories, and decals.

    -

    All Maps Unlocked

    -

    A third feature of Euro Truck Simulator 3 Mod APK is that it unlocks all maps in the game. This means that you can explore any region or country you want without having to unlock it first. You can choose from dozens of cities to visit, such as London, Rome, Berlin, Madrid, Prague, and many more. You can also drive on different types of roads, such as highways, country roads, city streets, etc.

    -

    Realistic Physics and Graphics

    -

    A fourth feature of Euro Truck Simulator 3 Mod APK is that it maintains the realistic physics and graphics of the original game. This means that you can enjoy the same level of realism and immersion as the original game. You can feel the weight, inertia, friction, and aerodynamics of the trucks. You can also see the dynamic weather, the realistic traffic, the detailed environments, and the stunning lighting effects of the game.

    -

    What are the Tips and Tricks for Euro Truck Simulator 3?

    -

    If you want to become a better truck driver and enjoy Euro Truck Simulator 3 more, you can use these tips and tricks:

    -

    Master the controls and settings

    -

    One of the first things you should do is to master the controls and settings of the game. You can choose from different control options, such as keyboard, mouse, gamepad, steering wheel, etc. You can also adjust the sensitivity, the camera angle, the sound volume, the graphics quality, etc. You can also enable or disable various features, such as speed limiter, automatic transmission, traffic offenses, fatigue simulation, etc. You should find the best combination of controls and settings that suit your preference and style.

    -

    Choose the right truck and trailer for each job

    -

    Another thing you should do is to choose the right truck and trailer for each job. You should consider factors such as the power, the speed, the fuel efficiency, the maneuverability, the reliability, and the capacity of the truck. You should also consider factors such as the weight, the size, the shape, and the fragility of the trailer. You should choose a truck and a trailer that match the requirements and the challenges of each job.

    -

    Follow the traffic rules and avoid fines

    -

    A third thing you should do is to follow the traffic rules and avoid fines. You should obey the speed limits, the traffic lights, the road signs, and the lane markings. You should also respect other road users, such as cars, buses, trucks, motorcycles, bicycles, pedestrians, police cars, ambulances, etc. You should also avoid accidents, collisions, damages, injuries, or deaths. You should also avoid violations such as speeding, running red lights, driving on wrong lanes or directions, overtaking, blocking, etc. You should also pay attention to the fuel level, the damage level, the fatigue level, and the time limit of each job. You should avoid fines as they can reduce your money and reputation.

    -

    Explore different cities and routes

    -

    A fourth thing you should do is to explore different cities and routes. You should not stick to the same routes or destinations all the time. You should try to visit new places and landmarks and enjoy the scenery and culture of Europe. You should also try to drive on different types of roads, such as highways, country roads, city streets, etc. You should also try to drive in different weather conditions, such as sunny, rainy, foggy, snowy, etc. You should also try to drive in different times of the day, such as morning, afternoon, evening, night, etc. You should explore different cities and routes as they can give you more variety and fun.

    -

    Upgrade your skills and reputation

    -

    A fifth thing you should do is to upgrade your skills and reputation. You should not neglect your skills and reputation as they can affect your performance and income. You should upgrade your skills by completing different types of jobs, such as urgent deliveries, fragile cargo, long hauls, special transports, etc. You should also upgrade your reputation by delivering cargo on time, without damage, without fines, etc. You should also upgrade your skills and reputation by completing achievements and challenges in the game. You should upgrade your skills and reputation as they can give you more opportunities and rewards.

    -

    Conclusion

    -

    Euro Truck Simulator 3 is a realistic truck driving simulation game that lets you travel across Europe as a trucker who delivers important cargo across impressive distances. Euro Truck Simulator 3 Mod APK is a modified version of the original game that gives you unlimited money, trucks, and maps. You can download and install Euro Truck Simulator 3 Mod APK by following some simple steps. You can also enjoy the features of Euro Truck Simulator 3 Mod APK, such as unlimited money, all trucks unlocked, all maps unlocked, and realistic physics and graphics. You can also use some tips and tricks for Euro Truck Simulator 3, such as mastering the controls and settings, choosing the right truck and trailer for each job, following the traffic rules and avoiding fines, exploring different cities and routes, and upgrading your skills and reputation.

    -

    FAQs

    -

    Here are some frequently asked questions about Euro Truck Simulator 3 Mod APK:

    -

    Q: Is Euro Truck Simulator 3 Mod APK safe to use?

    -

    A: Euro Truck Simulator 3 Mod APK is safe to use if you download it from a reliable source. However, you should always be careful when downloading apps from unknown sources as they may contain viruses, malware, or spyware that can harm your device or steal your personal information.

    -

    Q: Is Euro Truck Simulator 3 Mod APK compatible with my device?

    -

    A: Euro Truck Simulator 3 Mod APK is compatible with most Android devices that have Android 4.1 or higher. However, you should check the file size, version, and compatibility of the mod before downloading it.

    -

    Q: Is Euro Truck Simulator 3 Mod APK legal to use?

    -

    A: Euro Truck Simulator 3 Mod APK is not legal to use as it violates the terms and conditions of the original game. Therefore, you should use it at your own risk and discretion. You may also face some issues or problems when playing online with other players who have the original game.

    -

    Q: Can I update Euro Truck Simulator 3 Mod APK?

    -

    A: Euro Truck Simulator 3 Mod APK may not be updated automatically as it is not available on the official Google Play Store. Therefore, you may need to download and install the latest version of the mod manually whenever there is a new update available.

    -

    Q: Can I uninstall Euro Truck Simulator 3 Mod APK?

    -

    A: Yes, you can uninstall Euro Truck Simulator 3 Mod APK anytime you want. You just need to go to your device settings, then apps, then Euro Truck Simulator 3 Mod APK, and then tap on uninstall. You may also need to delete the data and cache of the app from your device storage.

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/FIFA Mobile MOD APK Unlock All the Features and Modes for the World Cup 2022.md b/spaces/fatiXbelha/sd/FIFA Mobile MOD APK Unlock All the Features and Modes for the World Cup 2022.md deleted file mode 100644 index 389dc8bb7d9b4c4e6ed76eff8897dc402ca6b732..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/FIFA Mobile MOD APK Unlock All the Features and Modes for the World Cup 2022.md +++ /dev/null @@ -1,112 +0,0 @@ - -

    FIFA Mobile FIFA World Cup Mod APK: Everything You Need to Know

    -

    If you are a fan of soccer games, you must have heard of FIFA Mobile, the official mobile game of the FIFA World Cup 2022. This game lets you build your ultimate team of soccer stars, compete in various modes, and relive the world's greatest soccer tournament. But what if you want to enjoy more features, options, and resources in the game without spending real money or waiting for hours? Well, there is a solution for that: FIFA Mobile FIFA World Cup Mod APK.

    -

    In this article, we will tell you everything you need to know about this modded version of the game, including its features, how to download and install it, its pros and cons, and some frequently asked questions. So, let's get started!

    -

    fifa mobile fifa world cup mod apk


    Download ✅ https://urllie.com/2uNIAt



    -

    What is FIFA Mobile FIFA World Cup Mod APK?

    -

    FIFA Mobile FIFA World Cup Mod APK is a modified version of the original game that gives you access to unlimited money and coins, unlocked all players, teams, and modes, and a menu mod with customization options. With this mod apk, you can enjoy the game without any limitations or restrictions. You can buy any player you want, upgrade your team, play in any mode you like, and customize your game settings according to your preferences.

    -

    This mod apk is not an official product of EA Sports or FIFA. It is created by third-party developers who modify the original game files to add or remove certain features. Therefore, it is not available on the Google Play Store or any other official app store. You have to download it from a trusted source online.

    -

    Features of FIFA Mobile FIFA World Cup Mod APK

    -

    Here are some of the main features of FIFA Mobile FIFA World Cup Mod APK that make it different from the original game:

    -

    Unlimited Money and Coins

    -

    Money and coins are the main currencies in the game that you need to buy players, upgrade your team, unlock modes, and more. However, earning them in the game can be time-consuming and tedious. You have to play matches, complete tasks, watch ads, or spend real money to get them.

    -

    With FIFA Mobile FIFA World Cup Mod APK, you don't have to worry about that. You will get unlimited money and coins in your account as soon as you install the mod apk. You can use them to buy anything you want in the game without any hassle.

    -

    Unlocked All Players, Teams, and Modes

    -

    FIFA Mobile has over 15,000 authentic soccer stars to choose from, including world-class talent like Kylian Mbappé, Christian Pulisic, Vinicius Jr and Son Heung-min. It also has over 600 teams from different leagues and countries, such as Chelsea, Paris SG, Real Madrid, Liverpool and Juventus. Moreover, it has various modes to play in, such as Head-to-Head, VS Attack, Manager Mode, and World Cup Mode.

    -

    fifa mobile world cup 2022 mod apk
    -fifa mobile mod apk unlimited money and coins
    -fifa mobile mod apk download latest version
    -fifa mobile mod apk with manager mode
    -fifa mobile mod apk offline
    -fifa mobile 23 season mod apk
    -fifa mobile soccer stars mod apk
    -fifa mobile ultimate team mod apk
    -fifa mobile champions league mod apk
    -fifa mobile icons and heroes mod apk
    -fifa mobile realistic soccer simulation mod apk
    -fifa mobile 60 fps mod apk
    -fifa mobile commentary mod apk
    -fifa mobile stadium sfx mod apk
    -fifa mobile kylian mbappe mod apk
    -fifa mobile virgil van dijk mod apk
    -fifa mobile son heung-min mod apk
    -fifa mobile kai havertz mod apk
    -fifa mobile christian pulisic mod apk
    -fifa mobile vinicius jr mod apk
    -fifa mobile pedri mod apk
    -fifa mobile joao felix mod apk
    -fifa mobile jude bellingham mod apk
    -fifa mobile alphonso davies mod apk
    -fifa mobile dusan vlahovic mod apk
    -fifa mobile world cup kits and badges mod apk
    -fifa mobile world cup stadiums mod apk
    -fifa mobile world cup match ball mod apk
    -fifa mobile world cup commentary mod apk
    -fifa mobile paolo maldini mod apk
    -fifa mobile ronaldinho mod apk
    -fifa mobile soccer legends mod apk
    -fifa mobile uefa champions league contender mod apk
    -fifa mobile strategy and tactics mod apk
    -fifa mobile auto-play mode mod apk
    -fifa mobile 5play app download link[^1^]
    -fifa mobile v18.1.03 unlocked all features[^1^]
    -fifa mobile ea sports official game[^1^]
    -fifa mobile updated players, kits, clubs and leagues[^1^]
    -fifa mobile 15,000 authentic soccer stars[^1^]

    -

    However, not all of these players, teams, and modes are available from the start. You have to unlock them by playing the game, completing achievements, or spending money and coins. This can be frustrating and boring for some players who want to enjoy the game with their favorite players, teams, and modes. With FIFA Mobile FIFA World Cup Mod APK, you don't have to do that. You will get all the players, teams, and modes unlocked from the start. You can choose any player you want, play with any team you like, and switch between any mode you prefer. You can also create your own custom team with your favorite players and compete with other players online.

    -

    Menu Mod with Customization Options

    -

    FIFA Mobile FIFA World Cup Mod APK also comes with a menu mod that gives you more control and customization options over the game. You can access this menu by tapping on the icon on the top left corner of the screen. From there, you can adjust various settings such as:

    -
      -
    • Game speed: You can increase or decrease the game speed to make it more challenging or easier.
    • -
    • Auto win: You can enable or disable this option to automatically win any match you play.
    • -
    • No ads: You can enable or disable this option to remove any ads from the game.
    • -
    • No root: You can enable or disable this option to play the game without rooting your device.
    • -
    • Anti-ban: You can enable or disable this option to prevent your account from getting banned by EA Sports for using a modded version of the game.
    • -
    -

    You can also change the language, sound, graphics, and other options of the game from this menu.

    -

    How to Download and Install FIFA Mobile FIFA World Cup Mod APK?

    -

    Now that you know what FIFA Mobile FIFA World Cup Mod APK is and what it offers, you might be wondering how to download and install it on your device. Well, it's not very difficult, but you have to follow some steps carefully. Here are the steps you need to follow:

    -

    Step 1: Enable Unknown Sources on Your Device

    -

    Since FIFA Mobile FIFA World Cup Mod APK is not an official app, you have to enable unknown sources on your device to allow it to install apps from outside the Google Play Store. To do that, go to your device settings, then security, then unknown sources, and turn it on. This will allow you to install any apk file on your device.

    -

    Step 2: Download the Mod APK File from a Trusted Source

    -

    The next step is to download the mod apk file from a trusted source online. There are many websites that claim to offer this mod apk, but not all of them are reliable or safe. Some of them may contain malware or viruses that can harm your device or steal your personal information. Therefore, you have to be careful and choose a reputable source that has positive reviews and feedback from other users.

    -

    One of the sources that we recommend is [FIFA Mobile FIFA World Cup Mod APK Download]. This website has a direct link to download the latest version of the mod apk file without any surveys or pop-ups. It also has a detailed description of the mod apk features and how to install it. You can visit this website and click on the download button to get the mod apk file on your device.

    -

    Step 3: Install the Mod APK File on Your Device

    -

    Once you have downloaded the mod apk file, you have to install it on your device. To do that, locate the file in your device storage and tap on it. You will see a prompt asking you to confirm the installation. Tap on install and wait for a few seconds until the installation is complete.

    -

    Step 4: Launch the Game and Enjoy

    -

    After installing the mod apk file, you are ready to launch the game and enjoy its features. You will see a new icon on your home screen or app drawer with the name FIFA Mobile FIFA World Cup Mod APK. Tap on it and start playing the game with unlimited money and coins, unlocked all players, teams, and modes, and menu mod with customization options.

    -

    Pros and Cons of FIFA Mobile FIFA World Cup Mod APK

    -

    FIFA Mobile FIFA World Cup Mod APK has many advantages over the original game, but it also has some disadvantages that you should be aware of before using it. Here are some of the pros and cons of this mod apk:

    -

    Pros

    -
      -
    • Enhanced Gameplay Experience with More Features and Options: With this mod apk, you can enjoy the game without any limitations or restrictions. You can buy any player you want, upgrade your team, play in any mode you like, and customize your game settings according to your preferences.
    • -
    • No Need to Spend Real Money or Wait for Progression: With this mod apk, you don't have to spend any real money or wait for hours to get money and coins in the game. You will get unlimited money and coins in your account as soon as you install the mod apk. You can use them to buy anything you want in the game without any hassle.
    • -
    • Compatible with Most Android Devices and Versions: This mod apk is compatible with most Android devices and versions. You don't need a high-end device or a specific Android version to play this mod apk. You can play it on any device that supports the original game.
    • -
    -

    Cons

    -
      -
    • Risk of Getting Banned by EA Sports for Using a Modified Version of the Game: This mod apk is not an official product of EA Sports or FIFA. It is created by third-party developers who modify the original game files to add or remove certain features. Therefore, it is not approved or endorsed by EA Sports or FIFA. If you use this mod apk, you may violate the terms and conditions of the game and risk getting banned by EA Sports for using a modified version of the game. This may result in losing your account, progress, and data in the game.
    • -
    • Potential Malware or Virus Infection from Unreliable Sources: This mod apk is not available on the Google Play Store or any other official app store. You have to download it from a trusted source online. However, not all sources are reliable or safe. Some of them may contain malware or viruses that can harm your device or steal your personal information. Therefore, you have to be careful and choose a reputable source that has positive reviews and feedback from other users.
    • -
    • Possible Bugs or Glitches that May Affect the Game Performance or Stability: This mod apk is not a perfect product. It may have some bugs or glitches that may affect the game performance or stability. For example, some players may experience crashes, freezes, lags, errors, or other issues while playing the game. These issues may be caused by the mod apk itself or by the compatibility issues with your device or Android version.
    • -
    -

    Conclusion

    -

    FIFA Mobile FIFA World Cup Mod APK is a modified version of the original game that gives you access to unlimited money and coins, unlocked all players, teams, and modes, and a menu mod with customization options. With this mod apk, you can enjoy the game without any limitations or restrictions. You can buy any player you want, upgrade your team, play in any mode you like, and customize your game settings according to your preferences.

    -

    However, this mod apk also has some disadvantages that you should be aware of before using it. You may risk getting banned by EA Sports for using a modified version of the game, get infected by malware or viruses from unreliable sources, or encounter bugs or glitches that may affect the game performance or stability.

    -

    Therefore, you have to weigh the pros and cons of this mod apk and decide whether you want to use it or not. If you do decide to use it, make sure you download it from a trusted source online and follow the steps we provided above to install it on your device.

    -

    We hope this article was helpful and informative for you. If you have any questions or feedback about FIFA Mobile FIFA World Cup Mod APK, feel free to leave a comment below. We would love to hear from you!

    -

    FAQs

    -

    Here are some of the frequently asked questions about FIFA Mobile FIFA World Cup Mod APK:

    -
      -
    1. Is FIFA Mobile FIFA World Cup Mod APK safe to use?
    2. -

      It depends on where you download it from. If you download it from a trusted source online that has positive reviews and feedback from other users, then it should be safe to use. However, if you download it from an unreliable source that may contain malware or viruses, then it may not be safe to use.

      -
    3. Can I play online with other players using FIFA Mobile FIFA World Cup Mod APK?
    4. -

      Yes, you can play online with other players using this mod apk. However, you may face some issues such as lagging, disconnecting, or mismatching with other players who are using the original game or a different version of the mod apk.

      -
    5. Will I lose my progress if I uninstall FIFA Mobile FIFA World Cup Mod APK?
    6. -

      If you uninstall this mod apk, you will lose all your progress and data in the game. You will also lose all the money and coins that you got from this mod apk. Therefore, if you want to keep your progress and data in the game, make sure you back up your data before uninstalling this mod apk.

      -
    7. Can I update FIFA Mobile FIFA World Cup Mod APK?
    8. -

      No, you cannot update this mod apk from the Google Play Store or any other official app store. You have to download the latest version of the mod apk file from the same source where you downloaded the previous version. You also have to uninstall the previous version before installing the new one.

      -
    9. How can I contact the developers of FIFA Mobile FIFA World Cup Mod APK?
    10. -

      If you have any questions, feedback, or suggestions for the developers of this mod apk, you can contact them through their website or social media accounts. You can find the links to their website and social media accounts on the download page of this mod apk.

      -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/feng2022/Time-TravelRephotography/Time_TravelRephotography/utils/ffhq_dataset/landmarks_detector.py b/spaces/feng2022/Time-TravelRephotography/Time_TravelRephotography/utils/ffhq_dataset/landmarks_detector.py deleted file mode 100644 index 824dae9314d41eabe7091bce095bca1c0ce61ad0..0000000000000000000000000000000000000000 --- a/spaces/feng2022/Time-TravelRephotography/Time_TravelRephotography/utils/ffhq_dataset/landmarks_detector.py +++ /dev/null @@ -1,71 +0,0 @@ -import dlib -import cv2 - - -class LandmarksDetector: - def __init__(self, predictor_model_path): - """ - :param predictor_model_path: path to shape_predictor_68_face_landmarks.dat file - """ - self.detector = dlib.get_frontal_face_detector() # cnn_face_detection_model_v1 also can be used - self.shape_predictor = dlib.shape_predictor(predictor_model_path) - - def get_landmarks(self, image): - img = dlib.load_rgb_image(image) - dets = self.detector(img, 1) - #print('face bounding boxes', dets) - - for detection in dets: - face_landmarks = [(item.x, item.y) for item in self.shape_predictor(img, detection).parts()] - #print('face landmarks', face_landmarks) - yield face_landmarks - - def draw(img, landmarks): - for (x, y) in landmarks: - cv2.circle(img, (x, y), 1, (0, 0, 255), -1) - return img - - -class DNNLandmarksDetector: - def __init__(self, predictor_model_path, DNN='TF'): - """ - :param - DNN: "TF" or "CAFFE" - predictor_model_path: path to shape_predictor_68_face_landmarks.dat file - """ - if DNN == "CAFFE": - modelFile = "res10_300x300_ssd_iter_140000_fp16.caffemodel" - configFile = "deploy.prototxt" - net = cv2.dnn.readNetFromCaffe(configFile, modelFile) - else: - modelFile = "opencv_face_detector_uint8.pb" - configFile = "opencv_face_detector.pbtxt" - net = cv2.dnn.readNetFromTensorflow(modelFile, configFile) - - self.shape_predictor = dlib.shape_predictor(predictor_model_path) - - def detect_faces(self, image, conf_threshold=0): - H, W = image.shape[:2] - blob = cv2.dnn.blobFromImage(image, 1.0, (300, 300), [104, 117, 123], False, False) - net.setInput(blob) - detections = net.forward() - bboxes = [] - for i in range(detections.shape[2]): - confidence = detections[0, 0, i, 2] - if confidence > conf_threshold: - x1 = int(detections[0, 0, i, 3] * W) - y1 = int(detections[0, 0, i, 4] * H) - x2 = int(detections[0, 0, i, 5] * W) - y2 = int(detections[0, 0, i, 6] * H) - bboxes.append(dlib.rectangle(x1, y1, x2, y2)) - return bboxes - - def get_landmarks(self, image): - img = cv2.imread(image) - dets = self.detect_faces(img, 0) - print('face bounding boxes', dets) - - for detection in dets: - face_landmarks = [(item.x, item.y) for item in self.shape_predictor(img, detection).parts()] - print('face landmarks', face_landmarks) - yield face_landmarks diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Chess for Windows A Fun and Challenging Chess Game for All Levels.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Chess for Windows A Fun and Challenging Chess Game for All Levels.md deleted file mode 100644 index 4932263adf3692d66a9b8f602dabed0969ee5574..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Chess for Windows A Fun and Challenging Chess Game for All Levels.md +++ /dev/null @@ -1,100 +0,0 @@ -
    -

    Chess Download for Windows 10: How to Play the Classic Game on Your PC

    -

    Introduction

    -

    Chess is one of the oldest and most popular board games in the world. It is a game of strategy, logic, and skill that can be enjoyed by people of all ages and backgrounds. Chess has been proven to have many benefits for the brain, such as enhancing concentration, creativity, problem-solving, and memory.

    -

    If you love chess and want to play it on your Windows 10 PC, you might be wondering how to download it and what features it offers. In this article, we will show you how to download chess for Windows 10, what are the features of Chess for Windows, and what are the benefits of playing chess on your PC. Let's get started!

    -

    chess download for windows 10


    Download ✦ https://gohhs.com/2uPnco



    -

    How to Download Chess for Windows 10

    -

    Downloading chess for Windows 10 is very easy and fast. You just need to follow these simple steps:

    -
      -
    1. Go to the Microsoft Store app on your PC or click here to open it in your browser.
    2. -
    3. Search for "Chess for Windows" or click here to go directly to the game page.
    4. -
    5. Click on the "Get" button and wait for the game to download and install on your PC.
    6. -
    7. Launch the game from the Start menu or the Microsoft Store app.
    8. -
    9. Enjoy playing chess on your PC!
    10. -
    -

    You can also download other chess games from the Microsoft Store, such as Chess Tactics Pro, Chess By Post, or Chess Riddles Deluxe. You can find them by searching for "chess" in the Microsoft Store app or clicking here.

    -

    Features of Chess for Windows

    -

    Chess for Windows is a free chess game that offers a lot of features for chess lovers. Here are some of them:

    -

    Different Game Modes and Difficulty Levels

    -

    You can choose from different game modes depending on your preference and skill level. You can play against the computer, against another player on the same PC, or online against other players from around the world. You can also adjust the difficulty level of the computer opponent from easy to expert.

    -

    Customizable Board and Pieces

    -

    You can customize the appearance of the board and pieces according to your taste. You can choose from different colors, themes, styles, and sounds. You can also zoom in and out, rotate, and move the board as you like.

    -

    Online Multiplayer and Chat

    -

    You can play online with other players from around the world and chat with them during the game. You can also create your own profile, add friends, send messages, and join tournaments. You can also see your online ranking and statistics.

    -

    Statistics and Achievements

    -

    You can track your progress and performance by viewing your statistics and achievements. You can see how many games you have played, won, lost, or drawn, as well as your rating, elo, accuracy, best moves, blunders, etc. You can also unlock various achievements by completing different challenges and tasks.

    -

    chess game for windows 10 free download
    -chess for windows 10 microsoft store
    -chess for windows 10 pc
    -chess for windows 10 surface hub
    -chess for windows 10 cgt
    -chess for windows 10 softonic
    -chess for windows 10 offline
    -chess for windows 10 online
    -chess for windows 10 3d
    -chess for windows 10 pro
    -chess for windows 10 app
    -chess for windows 10 best
    -chess for windows 10 review
    -chess for windows 10 latest version
    -chess for windows 10 update
    -chess for windows 10 download size
    -chess for windows 10 system requirements
    -chess for windows 10 age rating
    -chess for windows 10 free trial
    -chess for windows 10 full version
    -chess for windows 10 multiplayer
    -chess for windows 10 single player
    -chess for windows 10 tutorial
    -chess for windows 10 tips and tricks
    -chess for windows 10 cheats and hacks
    -chess for windows 10 features and benefits
    -chess for windows 10 screenshots and videos
    -chess for windows 10 ratings and reviews
    -chess for windows 10 customer support
    -chess for windows 10 privacy policy
    -chess for windows 10 terms of service
    -chess for windows 10 refund policy
    -chess for windows 10 installation guide
    -chess for windows 10 uninstallation guide
    -chess for windows 10 troubleshooting guide
    -chess for windows 10 feedback and suggestions
    -chess for windows 10 alternatives and competitors
    -chess for windows 10 comparison and contrast
    -chess for windows 10 pros and cons
    -chess for windows 10 advantages and disadvantages
    -chess for windows 10 strengths and weaknesses
    -chess for windows 10 recommendations and testimonials
    -chess for windows 10 coupons and discounts
    -chess for windows 10 deals and offers
    -chess for windows 10 promotions and giveaways
    -chess for windows 10 awards and achievements
    -chess for windows 10 news and updates
    -chess for windows 10 blogs and forums
    -chess for windows 10 social media and community

    -

    Benefits of Playing Chess for Windows

    -

    Playing chess for Windows is not only fun but also beneficial for your brain and well-being. Here are some of the benefits of playing chess on your PC:

    -

    Improve Your Cognitive Skills and Memory

    -

    Chess is a game that requires a lot of mental skills, such as concentration, logic, analysis, planning, calculation, visualization, creativity, etc

    Chess is a game that requires a lot of mental skills, such as concentration, logic, analysis, planning, calculation, visualization, creativity, etc. By playing chess regularly, you can improve these skills and enhance your cognitive abilities. Chess can also help you improve your memory by stimulating the growth of new brain cells and strengthening the connections between them.

    -

    Have Fun and Relax

    -

    Playing chess for Windows is a great way to have fun and relax. You can enjoy the game at your own pace and time, without any pressure or stress. You can also play with your friends or family, or meet new people online. Chess can help you reduce your anxiety and depression by providing you with a positive and rewarding activity.

    -

    Challenge Yourself and Others

    -

    Playing chess for Windows is also a great way to challenge yourself and others. You can test your skills and knowledge against different opponents and levels. You can also learn new strategies and tactics by watching other players or reading tutorials. Chess can help you boost your confidence and self-esteem by improving your performance and achieving your goals.

    -

    Conclusion

    -

    Chess is a classic game that has many benefits for the brain and well-being. If you want to play chess on your Windows 10 PC, you can easily download it from the Microsoft Store for free. You can enjoy various features of Chess for Windows, such as different game modes, customizable board and pieces, online multiplayer and chat, statistics and achievements, etc. You can also improve your cognitive skills and memory, have fun and relax, and challenge yourself and others by playing chess on your PC.

    -

    So what are you waiting for? Download Chess for Windows today and start playing the game of kings!

    -

    FAQs

    -

    Here are some frequently asked questions about Chess for Windows:

    -

    Q: How do I play chess online with other players?

    -

    A: To play chess online with other players, you need to create an account or sign in with your Microsoft account. Then, you can choose the online mode from the main menu and select a game type, such as quick match, ranked match, custom match, or tournament. You can also invite your friends to play with you by clicking on the invite button.

    -

    Q: How do I change the difficulty level of the computer opponent?

    -

    A: To change the difficulty level of the computer opponent, you need to choose the computer mode from the main menu and select a level from 1 to 10. The higher the level, the harder the computer opponent will be.

    -

    Q: How do I customize the board and pieces?

    -

    A: To customize the board and pieces, you need to go to the settings menu from the main menu and select the appearance option. There, you can choose from different colors, themes, styles, and sounds for the board and pieces. You can also zoom in and out, rotate, and move the board as you like.

    -

    Q: How do I view my statistics and achievements?

    -

    A: To view your statistics and achievements, you need to go to the profile menu from the main menu and select the statistics or achievements option. There, you can see how many games you have played, won, lost, or drawn, as well as your rating, elo, accuracy, best moves, blunders, etc. You can also see what achievements you have unlocked and what challenges you need to complete.

    -

    Q: How do I learn more about chess rules and strategies?

    -

    A: To learn more about chess rules and strategies, you can go to the help menu from the main menu and select the tutorial or tips option. There, you can watch videos or read articles that explain the basic rules and moves of chess, as well as some advanced strategies and tactics.

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Forza Horizon 4 for Android - APK OBB 2018 - Amazing Gameplay and Features.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Forza Horizon 4 for Android - APK OBB 2018 - Amazing Gameplay and Features.md deleted file mode 100644 index a8c3d9483c786e85d7ec275921fbe21a33c46e2c..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Forza Horizon 4 for Android - APK OBB 2018 - Amazing Gameplay and Features.md +++ /dev/null @@ -1,89 +0,0 @@ - -
    - Benefits of playing Forza Horizon 4 on Android: portability, compatibility, performance, etc. | | H2: Forza Horizon 4 Android Gameplay: What to Expect from the Mobile Version | - Features of Forza Horizon 4 Android: graphics, controls, modes, customization, etc.
    - Comparison of Forza Horizon 4 Android with other mobile racing games: advantages and disadvantages | | H2: Forza Horizon 4 Android Download: How to Get the Game on Your Smartphone or Tablet | - Requirements for Forza Horizon 4 Android: device specifications, storage space, internet connection, etc.
    - Steps for Forza Horizon 4 Android download: where to find the apk and obb files, how to install them, how to launch the game, etc. | | H2: Forza Horizon 4 Android Tips and Tricks: How to Improve Your Gaming Experience | - Tips for Forza Horizon 4 Android gameplay: how to optimize the settings, how to master the controls, how to earn credits and influence, etc.
    - Tricks for Forza Horizon 4 Android download: how to avoid scams, viruses, and errors, how to update the game, how to backup your data, etc. | | H2: Conclusion: Why You Should Try Forza Horizon 4 Android Today | - Summary of the main points of the article
    - Call to action: encourage the readers to download and play Forza Horizon 4 Android | Table 2: Article with HTML formatting

    Forza Horizon 4 Android: How to Play the Ultimate Racing Game on Your Mobile Device

    -

    If you are a fan of racing games, you have probably heard of Forza Horizon 4, the latest installment in the acclaimed open-world racing series. Forza Horizon 4 is set in a stunning recreation of Great Britain, where you can explore, race, and customize hundreds of cars in various seasons and weather conditions. The game has received rave reviews from critics and players alike for its amazing graphics, realistic physics, diverse gameplay, and online multiplayer features.

    -

    forza horizon 4 android gameplay + download apk obb 2018


    Download Zip ✏ https://gohhs.com/2uPqRU



    -

    But what if you want to enjoy Forza Horizon 4 on your mobile device? Is it possible to play this awesome game on your Android smartphone or tablet? The answer is yes! Thanks to some dedicated developers and modders, you can now download and install Forza Horizon 4 Android on your device and experience the thrill of racing anytime and anywhere. In this article, we will tell you everything you need to know about Forza Horizon 4 Android gameplay and download. Read on and find out why you should try this game today!

    -

    Forza Horizon 4 Android Gameplay: What to Expect from the Mobile Version

    -

    Forza Horizon 4 Android is a modified version of the original game that has been optimized for mobile devices. It has all the features and content of the PC and console versions, but with some adjustments and improvements to make it run smoothly and look great on smaller screens. Here are some of the things you can expect from Forza Horizon 4 Android gameplay:

    -
      -
    • Graphics: Forza Horizon 4 Android has stunning graphics that rival those of the original game. The game uses Unreal Engine 4 technology to render realistic lighting, shadows, reflections, and textures. The game also supports dynamic weather and seasons, which change the appearance and feel of the environment. You can admire the beautiful scenery of Great Britain as you drive through countryside roads, urban streets, mountain trails, and coastal highways.
    • -
    • Controls: Forza Horizon 4 Android has intuitive and responsive controls that let you steer your car with ease. You can choose between different control options, such as tilt, touch, or virtual buttons. You can also customize the sensitivity and layout of the controls according to your preference. The game also supports external controllers and gamepads for a more immersive experience.
    • -
    • Modes: Forza Horizon 4 Android has various modes that cater to different tastes and moods. You can play solo or with friends in online or offline mode. You can join or create your own club and compete with other players in team events. You can participate in races, stunts, challenges, showcases, and festivals that offer different rewards and objectives. You can also explore the map freely and discover hidden secrets and collectibles
    • Customization: Forza Horizon 4 Android has a rich and varied customization system that lets you personalize your cars and your character. You can choose from over 450 licensed cars from different manufacturers, such as Ferrari, Lamborghini, Aston Martin, and more. You can also upgrade and tune your cars to improve their performance and appearance. You can change the color, paint, wheels, decals, and accessories of your cars. You can also customize your character's clothing, hairstyle, accessories, and emotes.
    • -
    -

    As you can see, Forza Horizon 4 Android gameplay is very similar to the original game, but with some enhancements and modifications to suit the mobile platform. You will not miss out on anything if you play this game on your Android device. In fact, you might even enjoy it more than the PC or console versions, because you can play it anytime and anywhere you want.

    -

    But how do you get Forza Horizon 4 Android on your device? How do you download and install this game without any hassle or risk? That's what we will explain in the next section.

    -

    Forza Horizon 4 Android Download: How to Get the Game on Your Smartphone or Tablet

    -

    Forza Horizon 4 Android download is not as complicated as you might think. You don't need to root your device or use any complicated software or tools. You just need to follow some simple steps and you will be able to enjoy this game in no time. Here are the steps for Forza Horizon 4 Android download:

    -
      -
    1. Check the requirements: Before you download Forza Horizon 4 Android, you need to make sure that your device meets the minimum requirements for the game. Your device should have at least 4 GB of RAM, 64 GB of storage space, and Android 8.0 or higher. You also need a stable internet connection to download and play the game.
    2. -
    3. Find the apk and obb files: The next step is to find the apk and obb files for Forza Horizon 4 Android. These are the files that contain the game data and installation package. You can find these files on various websites and platforms that offer Forza Horizon 4 Android download links. However, you need to be careful and avoid any fake or malicious links that might harm your device or steal your data. We recommend that you use our trusted and verified link below to get the apk and obb files safely and securely.
    4. -
    5. Install the apk and obb files: Once you have downloaded the apk and obb files, you need to install them on your device. To do this, you need to enable the installation of unknown sources on your device settings. Then, you need to locate the apk file on your device storage and tap on it to start the installation process. After that, you need to copy the obb file to the Android/obb folder on your device storage. This will ensure that the game data is properly stored and accessed by the game.
    6. -
    7. Launch the game: The final step is to launch the game and enjoy Forza Horizon 4 Android gameplay. You can find the game icon on your device home screen or app drawer. Tap on it to open the game and wait for it to load. You might need to verify your identity or complete a captcha before you can play the game. This is a security measure to prevent bots and hackers from accessing the game. Once you have verified yourself, you can start playing Forza Horizon 4 Android and have fun!
    8. -
    -

    That's it! You have successfully downloaded and installed Forza Horizon 4 Android on your device. Now you can experience the ultimate racing game on your mobile device whenever you want.

    -

    forza horizon 4 mobile apk + obb free download 2018
    -how to play forza horizon 4 on android with apk and obb files
    -forza horizon 4 android mod apk + data full version 2018
    -download forza horizon 4 for android offline apk + obb
    -forza horizon 4 android gameplay video + download link apk obb
    -forza horizon 4 mobile game apk + obb download for android
    -forza horizon 4 android apk + obb highly compressed 2018
    -forza horizon 4 on android - download & gameplay protech
    -forza horizon 4 android edition apk + obb latest update 2018
    -forza horizon 4 mobile mod apk + obb full download sportsextral
    -forza horizon 4 obb apk - download (android) - apkcombo
    -forza horizon 4 android gameplay hd + apk obb download free
    -forza horizon 4 apk + obb file download for android device
    -forza horizon 4 android game review + download apk and obb
    -forza horizon 4 mobile - how to download and install apk + obb on android
    -best racing games for android like forza horizon 4 apk + obb
    -forza horizon 4 android gameplay trailer + download link apk obb
    -forza horizon 4 mobile apk + obb no verification required 2018
    -forza horizon 4 android game features + download apk and obb data
    -forza horizon 4 mobile - official android gameplay + apk obb download
    -forza horizon 4 android mod menu apk + obb unlimited money 2018
    -forza horizon 4 android gameplay walkthrough part 1 + apk obb download
    -forza horizon 4 mobile - realistic graphics mod apk + obb for android
    -forza horizon 4 android game size and requirements + download apk obb
    -forza horizon 4 mobile - new update apk + obb download for android
    -how to fix forza horizon 4 android apk + obb not working or crashing
    -forza horizon 4 mobile - best cars and customization apk + obb for android
    -forza horizon 4 android gameplay comparison with pc and xbox one
    -forza horizon 4 mobile - open world racing game apk + obb for android
    -how to get forza horizon 4 on android without human verification or survey
    -forza horizon 4 mobile - online multiplayer mode apk + obb for android
    -how to update forza horizon 4 android game to latest version apk + obb
    -forza horizon 4 mobile - tips and tricks to improve your gameplay on android
    -how to transfer your progress from pc or xbox one to forza horizon 4 android game
    -forza horizon 4 mobile - season change and weather effects apk + obb for android
    -how to unlock all cars and tracks in forza horizon 4 android game mod apk + obb
    -forza horizon 4 mobile - new events and challenges apk + obb update for android
    -how to install and run forza horizon 4 on any android device with apk and obb files
    -forza horizon 4 mobile - best settings and performance optimization on android
    -how to play forza horizon 4 on android with controller or keyboard and mouse support

    -

    But wait, there's more! We have some tips and tricks for you that will help you improve your gaming experience and avoid any problems or issues with Forza Horizon 4 Android gameplay and download. Read on and learn more!

    -

    Forza Horizon 4 Android Tips and Tricks: How to Improve Your Gaming Experience

    -

    Forza Horizon 4 Android is a great game that will keep you entertained for hours. However, there are some things that you can do to make it even better and more enjoyable. Here are some tips and tricks for Forza Horizon 4 Android gameplay and download that will help you optimize your settings, master your controls, earn more credits and influence, avoid scams, viruses, and errors, update your game, backup your data, and more.

    -
      -
    • Optimize your settings: Forza Horizon 4 Android has various settings that let you adjust the graphics quality, sound volume, language, etc. You can access these settings from the main menu or pause menu of the game. You should optimize your settings according to your device's capabilities and your personal preference. You can lower the graphics quality if your device is lagging or overheating, or you can increase it if you want to enjoy the game's visuals more. You can also turn off the sound or music if you want to save battery or play in a quiet environment, or you can turn it on if you want to hear the game's soundtrack and sound effects. You can also change the language of the game if you want to play in a different language than the default one.
    • -
    • Master your controls: Forza Horizon 4 Android has different control options that let you steer your car with ease. You can choose between tilt, touch, or virtual buttons, depending on what suits you best. You can also customize the sensitivity and layout of the controls according to your preference. You should practice your controls and get familiar with them before you start racing. You should also learn how to use the different buttons and functions of the game, such as the brake, the handbrake, the nitro, the camera, the map, etc. You should also experiment with different camera angles and views to find the one that gives you the best visibility and perspective.
    • -
    • Earn more credits and influence: Credits and influence are the two main currencies of Forza Horizon 4 Android. Credits are used to buy and upgrade cars, while influence is used to unlock new events and features. You can earn credits and influence by completing races, stunts, challenges, showcases, festivals, and other activities in the game. You can also earn them by exploring the map and finding hidden secrets and collectibles. You can also earn them by joining or creating clubs and competing with other players in team events. You should try to earn as many credits and influence as possible, as they will help you progress faster and access more content in the game.
    • -
    • Avoid scams, viruses, and errors: Forza Horizon 4 Android download is not available on the official Google Play Store or any other official app store. Therefore, you need to be careful and avoid any fake or malicious links that might harm your device or steal your data. You should only use our trusted and verified link below to get the apk and obb files safely and securely. You should also scan the files with a reliable antivirus software before installing them on your device. You should also avoid any websites or platforms that ask you to pay money, fill surveys, provide personal information, or download additional apps or software to get Forza Horizon 4 Android download. These are scams that will not give you the game but will only waste your time and money.
    • -
    • Update your game: Forza Horizon 4 Android is constantly updated by the developers and modders to fix bugs, improve performance, add new features, and enhance gameplay. Therefore, you should always check for updates and install them as soon as they are available. You can check for updates from the game's main menu or pause menu. You can also check for updates from our website or platform where you downloaded the game. Updating your game will ensure that you have the latest version of Forza Horizon 4 Android and that you can enjoy it without any problems or issues.
    • -
    • Backup your data: Forza Horizon 4 Android is a large and complex game that requires a lot of storage space and data on your device. Therefore, you should backup your data regularly to avoid losing it in case of any accident or error. You can backup your data by using a cloud service, such as Google Drive or Dropbox, or by using an external storage device, such as a USB flash drive or a memory card. Backing up your data will ensure that you can restore it if you need to reinstall the game or switch devices.
    • -
    -

    These are some of the tips and tricks for Forza Horizon 4 Android gameplay and download that will help you improve your gaming experience and avoid any problems or issues with Forza Horizon 4 Android gameplay and download. If you follow these tips and tricks, you will have a smooth and enjoyable time playing this game on your mobile device.

    -

    Conclusion: Why You Should Try Forza Horizon 4 Android Today

    -

    Forza Horizon 4 Android is a fantastic game that lets you play the ultimate racing game on your mobile device. It has all the features and content of the original game, but with some adjustments and improvements to make it run smoothly and look great on smaller screens. It has stunning graphics, realistic physics, diverse gameplay, online multiplayer features, rich customization system, intuitive controls, various modes, etc. It is easy to download and install on your device without any hassle or risk. It is also updated regularly by the developers and modders to fix bugs, improve performance, add new features, and enhance gameplay.

    -

    If you are a fan of racing games, you should not miss out on Forza Horizon 4 Android. It is one of the best mobile racing games ever made and it will give you hours of fun and excitement. You can download and play Forza Horizon 4 Android today by using our trusted and verified link below. Don't wait any longer and join the millions of players who are enjoying this game on their mobile devices. You will not regret it!

    -

    FAQs

    -

    Here are some of the frequently asked questions about Forza Horizon 4 Android gameplay and download:

    -
      -
    1. Is Forza Horizon 4 Android free? Yes, Forza Horizon 4 Android is free to download and play. You don't need to pay any money to get this game on your device. However, you might need to watch some ads or complete some offers to verify yourself before you can play the game. This is a security measure to prevent bots and hackers from accessing the game.
    2. -
    3. Is Forza Horizon 4 Android safe? Yes, Forza Horizon 4 Android is safe to download and play. The game does not contain any viruses, malware, or spyware that might harm your device or steal your data. The game also does not require any root access or permissions that might compromise your device's security. However, you should always scan the files with a reliable antivirus software before installing them on your device.
    4. -
    5. Is Forza Horizon 4 Android compatible with my device? Forza Horizon 4 Android is compatible with most Android devices that have at least 4 GB of RAM, 64 GB of storage space, and Android 8.0 or higher. However, some devices might not be able to run the game smoothly or at all due to their hardware limitations or software issues. You should check the requirements for Forza Horizon 4 Android before you download and install the game on your device.
    6. -
    7. How can I update Forza Horizon 4 Android? You can update Forza Horizon 4 Android by checking for updates from the game's main menu or pause menu. You can also check for updates from our website or platform where you downloaded the game. Updating your game will ensure that you have the latest version of Forza Horizon 4 Android and that you can enjoy it without any problems or issues.
    8. -
    9. How can I contact the developers or modders of Forza Horizon 4 Android? You can contact the developers or modders of Forza Horizon 4 Android by visiting their official website or social media pages. You can also leave a comment or feedback on our website or platform where you downloaded the game. The developers or modders of Forza Horizon 4 Android are always happy to hear from their fans and users and they will try to answer your questions and solve your problems as soon as possible.
    10. -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/@types/node/ts4.8/vm.d.ts b/spaces/fffiloni/controlnet-animation-doodle/node_modules/@types/node/ts4.8/vm.d.ts deleted file mode 100644 index c96513a50555debf6fd50aa0e414a18d1d342efb..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/@types/node/ts4.8/vm.d.ts +++ /dev/null @@ -1,509 +0,0 @@ -/** - * The `vm` module enables compiling and running code within V8 Virtual - * Machine contexts. - * - * **The `vm` module is not a security** - * **mechanism. Do not use it to run untrusted code.** - * - * JavaScript code can be compiled and run immediately or - * compiled, saved, and run later. - * - * A common use case is to run the code in a different V8 Context. This means - * invoked code has a different global object than the invoking code. - * - * One can provide the context by `contextifying` an - * object. The invoked code treats any property in the context like a - * global variable. Any changes to global variables caused by the invoked - * code are reflected in the context object. - * - * ```js - * const vm = require('vm'); - * - * const x = 1; - * - * const context = { x: 2 }; - * vm.createContext(context); // Contextify the object. - * - * const code = 'x += 40; var y = 17;'; - * // `x` and `y` are global variables in the context. - * // Initially, x has the value 2 because that is the value of context.x. - * vm.runInContext(code, context); - * - * console.log(context.x); // 42 - * console.log(context.y); // 17 - * - * console.log(x); // 1; y is not defined. - * ``` - * @see [source](https://github.com/nodejs/node/blob/v18.0.0/lib/vm.js) - */ -declare module 'vm' { - interface Context extends NodeJS.Dict {} - interface BaseOptions { - /** - * Specifies the filename used in stack traces produced by this script. - * Default: `''`. - */ - filename?: string | undefined; - /** - * Specifies the line number offset that is displayed in stack traces produced by this script. - * Default: `0`. - */ - lineOffset?: number | undefined; - /** - * Specifies the column number offset that is displayed in stack traces produced by this script. - * @default 0 - */ - columnOffset?: number | undefined; - } - interface ScriptOptions extends BaseOptions { - displayErrors?: boolean | undefined; - timeout?: number | undefined; - cachedData?: Buffer | undefined; - /** @deprecated in favor of `script.createCachedData()` */ - produceCachedData?: boolean | undefined; - } - interface RunningScriptOptions extends BaseOptions { - /** - * When `true`, if an `Error` occurs while compiling the `code`, the line of code causing the error is attached to the stack trace. - * Default: `true`. - */ - displayErrors?: boolean | undefined; - /** - * Specifies the number of milliseconds to execute code before terminating execution. - * If execution is terminated, an `Error` will be thrown. This value must be a strictly positive integer. - */ - timeout?: number | undefined; - /** - * If `true`, the execution will be terminated when `SIGINT` (Ctrl+C) is received. - * Existing handlers for the event that have been attached via `process.on('SIGINT')` will be disabled during script execution, but will continue to work after that. - * If execution is terminated, an `Error` will be thrown. - * Default: `false`. - */ - breakOnSigint?: boolean | undefined; - /** - * If set to `afterEvaluate`, microtasks will be run immediately after the script has run. - */ - microtaskMode?: 'afterEvaluate' | undefined; - } - interface CompileFunctionOptions extends BaseOptions { - /** - * Provides an optional data with V8's code cache data for the supplied source. - */ - cachedData?: Buffer | undefined; - /** - * Specifies whether to produce new cache data. - * Default: `false`, - */ - produceCachedData?: boolean | undefined; - /** - * The sandbox/context in which the said function should be compiled in. - */ - parsingContext?: Context | undefined; - /** - * An array containing a collection of context extensions (objects wrapping the current scope) to be applied while compiling - */ - contextExtensions?: Object[] | undefined; - } - interface CreateContextOptions { - /** - * Human-readable name of the newly created context. - * @default 'VM Context i' Where i is an ascending numerical index of the created context. - */ - name?: string | undefined; - /** - * Corresponds to the newly created context for display purposes. - * The origin should be formatted like a `URL`, but with only the scheme, host, and port (if necessary), - * like the value of the `url.origin` property of a URL object. - * Most notably, this string should omit the trailing slash, as that denotes a path. - * @default '' - */ - origin?: string | undefined; - codeGeneration?: - | { - /** - * If set to false any calls to eval or function constructors (Function, GeneratorFunction, etc) - * will throw an EvalError. - * @default true - */ - strings?: boolean | undefined; - /** - * If set to false any attempt to compile a WebAssembly module will throw a WebAssembly.CompileError. - * @default true - */ - wasm?: boolean | undefined; - } - | undefined; - /** - * If set to `afterEvaluate`, microtasks will be run immediately after the script has run. - */ - microtaskMode?: 'afterEvaluate' | undefined; - } - type MeasureMemoryMode = 'summary' | 'detailed'; - interface MeasureMemoryOptions { - /** - * @default 'summary' - */ - mode?: MeasureMemoryMode | undefined; - context?: Context | undefined; - } - interface MemoryMeasurement { - total: { - jsMemoryEstimate: number; - jsMemoryRange: [number, number]; - }; - } - /** - * Instances of the `vm.Script` class contain precompiled scripts that can be - * executed in specific contexts. - * @since v0.3.1 - */ - class Script { - constructor(code: string, options?: ScriptOptions); - /** - * Runs the compiled code contained by the `vm.Script` object within the given`contextifiedObject` and returns the result. Running code does not have access - * to local scope. - * - * The following example compiles code that increments a global variable, sets - * the value of another global variable, then execute the code multiple times. - * The globals are contained in the `context` object. - * - * ```js - * const vm = require('vm'); - * - * const context = { - * animal: 'cat', - * count: 2 - * }; - * - * const script = new vm.Script('count += 1; name = "kitty";'); - * - * vm.createContext(context); - * for (let i = 0; i < 10; ++i) { - * script.runInContext(context); - * } - * - * console.log(context); - * // Prints: { animal: 'cat', count: 12, name: 'kitty' } - * ``` - * - * Using the `timeout` or `breakOnSigint` options will result in new event loops - * and corresponding threads being started, which have a non-zero performance - * overhead. - * @since v0.3.1 - * @param contextifiedObject A `contextified` object as returned by the `vm.createContext()` method. - * @return the result of the very last statement executed in the script. - */ - runInContext(contextifiedObject: Context, options?: RunningScriptOptions): any; - /** - * First contextifies the given `contextObject`, runs the compiled code contained - * by the `vm.Script` object within the created context, and returns the result. - * Running code does not have access to local scope. - * - * The following example compiles code that sets a global variable, then executes - * the code multiple times in different contexts. The globals are set on and - * contained within each individual `context`. - * - * ```js - * const vm = require('vm'); - * - * const script = new vm.Script('globalVar = "set"'); - * - * const contexts = [{}, {}, {}]; - * contexts.forEach((context) => { - * script.runInNewContext(context); - * }); - * - * console.log(contexts); - * // Prints: [{ globalVar: 'set' }, { globalVar: 'set' }, { globalVar: 'set' }] - * ``` - * @since v0.3.1 - * @param contextObject An object that will be `contextified`. If `undefined`, a new object will be created. - * @return the result of the very last statement executed in the script. - */ - runInNewContext(contextObject?: Context, options?: RunningScriptOptions): any; - /** - * Runs the compiled code contained by the `vm.Script` within the context of the - * current `global` object. Running code does not have access to local scope, but _does_ have access to the current `global` object. - * - * The following example compiles code that increments a `global` variable then - * executes that code multiple times: - * - * ```js - * const vm = require('vm'); - * - * global.globalVar = 0; - * - * const script = new vm.Script('globalVar += 1', { filename: 'myfile.vm' }); - * - * for (let i = 0; i < 1000; ++i) { - * script.runInThisContext(); - * } - * - * console.log(globalVar); - * - * // 1000 - * ``` - * @since v0.3.1 - * @return the result of the very last statement executed in the script. - */ - runInThisContext(options?: RunningScriptOptions): any; - /** - * Creates a code cache that can be used with the `Script` constructor's`cachedData` option. Returns a `Buffer`. This method may be called at any - * time and any number of times. - * - * ```js - * const script = new vm.Script(` - * function add(a, b) { - * return a + b; - * } - * - * const x = add(1, 2); - * `); - * - * const cacheWithoutX = script.createCachedData(); - * - * script.runInThisContext(); - * - * const cacheWithX = script.createCachedData(); - * ``` - * @since v10.6.0 - */ - createCachedData(): Buffer; - /** @deprecated in favor of `script.createCachedData()` */ - cachedDataProduced?: boolean | undefined; - cachedDataRejected?: boolean | undefined; - cachedData?: Buffer | undefined; - } - /** - * If given a `contextObject`, the `vm.createContext()` method will `prepare - * that object` so that it can be used in calls to {@link runInContext} or `script.runInContext()`. Inside such scripts, - * the `contextObject` will be the global object, retaining all of its existing - * properties but also having the built-in objects and functions any standard [global object](https://es5.github.io/#x15.1) has. Outside of scripts run by the vm module, global variables - * will remain unchanged. - * - * ```js - * const vm = require('vm'); - * - * global.globalVar = 3; - * - * const context = { globalVar: 1 }; - * vm.createContext(context); - * - * vm.runInContext('globalVar *= 2;', context); - * - * console.log(context); - * // Prints: { globalVar: 2 } - * - * console.log(global.globalVar); - * // Prints: 3 - * ``` - * - * If `contextObject` is omitted (or passed explicitly as `undefined`), a new, - * empty `contextified` object will be returned. - * - * The `vm.createContext()` method is primarily useful for creating a single - * context that can be used to run multiple scripts. For instance, if emulating a - * web browser, the method can be used to create a single context representing a - * window's global object, then run all ` - - - - - - - - - -
    -
    -
    -
    - radiobee - -
    -
    - - - -
    -
    -
    -

    Contents:

    -
      -
    • Introduction
    • -
    • How to use
    • -
    • 使用说明
    • -
    • Examples
    • -
    • radiobee package
    • -
    - -
    -
    -
    - -
    - - radiobee -
    - -
    -
    -
    -
      -
    • »
    • -
    • radiobee
    • -
    • - View page source -
    • -
    -
    -
    -
    -
    - -
    -

    radiobee

    -
    -
      -
    • radiobee package
        -
      • Submodules
      • -
      • radiobee.align_sents module
      • -
      • radiobee.align_texts module
      • -
      • radiobee.amend_avec module
      • -
      • radiobee.app module
      • -
      • radiobee.cmat2tset module
      • -
      • radiobee.docterm_scores module
      • -
      • radiobee.en2zh module
      • -
      • radiobee.en2zh_tokens module
      • -
      • radiobee.file2text module
      • -
      • radiobee.files2df module
      • -
      • radiobee.gen_aset module
      • -
      • radiobee.gen_eps_minsamples module
      • -
      • radiobee.gen_model module
      • -
      • radiobee.gen_pset module
      • -
      • radiobee.gen_row_alignment module
      • -
      • radiobee.insert_spaces module
      • -
      • radiobee.interpolate_pset module
      • -
      • radiobee.lists2cmat module
      • -
      • radiobee.loadtext module
      • -
      • radiobee.mdx_e2c module
      • -
      • radiobee.plot_cmat module
      • -
      • radiobee.plot_df module
      • -
      • radiobee.process_upload module
      • -
      • radiobee.seg_text module
      • -
      • radiobee.shuffle_sents module
      • -
      • radiobee.smatrix module
      • -
      • radiobee.trim_df module
      • -
      • Module contents
      • -
      -
    • -
    -
    -
    - - -
    -
    -
    - -
    - -
    -

    © Copyright 2022, mu.

    -
    - - Built with Sphinx using a - theme - provided by Read the Docs. - - -
    -
    -
    -
    -
    - - - - \ No newline at end of file diff --git a/spaces/mikeee/radiobee-dev/radiobee/__init__.py b/spaces/mikeee/radiobee-dev/radiobee/__init__.py deleted file mode 100644 index d11e89b4b801128603b93b3a56df287548498327..0000000000000000000000000000000000000000 --- a/spaces/mikeee/radiobee-dev/radiobee/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -"""Init.""" -__version__ = "0.1.1a0" diff --git a/spaces/mingyuan/MotionDiffuse/options/base_options.py b/spaces/mingyuan/MotionDiffuse/options/base_options.py deleted file mode 100644 index 8fbf3fa21e33200f2d917ecd999a6992d5ee827c..0000000000000000000000000000000000000000 --- a/spaces/mingyuan/MotionDiffuse/options/base_options.py +++ /dev/null @@ -1,86 +0,0 @@ -import argparse -import os -import torch -from mmcv.runner import get_dist_info -import torch.distributed as dist - - -class BaseOptions(): - def __init__(self): - self.parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) - self.initialized = False - - def initialize(self): - self.parser.add_argument('--name', type=str, default="test", help='Name of this trial') - self.parser.add_argument('--decomp_name', type=str, default="Decomp_SP001_SM001_H512", help='Name of autoencoder model') - - self.parser.add_argument("--gpu_id", type=int, default=-1, help='GPU id') - self.parser.add_argument("--distributed", action="store_true", help='Weather to use DDP training') - - self.parser.add_argument('--dataset_name', type=str, default='t2m', help='Dataset Name') - self.parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here') - - self.parser.add_argument("--unit_length", type=int, default=4, help="Motions are cropped to the maximum times of unit_length") - self.parser.add_argument("--max_text_len", type=int, default=20, help="Maximum length of text description") - - self.parser.add_argument('--text_enc_mod', type=str, default='bigru') - self.parser.add_argument('--estimator_mod', type=str, default='bigru') - - self.parser.add_argument('--dim_text_hidden', type=int, default=512, help='Dimension of hidden unit in text encoder') - self.parser.add_argument('--dim_att_vec', type=int, default=512, help='Dimension of attention vector') - self.parser.add_argument('--dim_z', type=int, default=128, help='Dimension of latent Gaussian vector') - - self.parser.add_argument('--n_layers_pri', type=int, default=1, help='Number of layers in prior network') - self.parser.add_argument('--n_layers_pos', type=int, default=1, help='Number of layers in posterior network') - self.parser.add_argument('--n_layers_dec', type=int, default=1, help='Number of layers in generator') - - self.parser.add_argument('--dim_pri_hidden', type=int, default=1024, help='Dimension of hidden unit in prior network') - self.parser.add_argument('--dim_pos_hidden', type=int, default=1024, help='Dimension of hidden unit in posterior network') - self.parser.add_argument('--dim_dec_hidden', type=int, default=1024, help='Dimension of hidden unit in generator') - - self.parser.add_argument('--dim_movement_enc_hidden', type=int, default=512, - help='Dimension of hidden in AutoEncoder(encoder)') - self.parser.add_argument('--dim_movement_dec_hidden', type=int, default=512, - help='Dimension of hidden in AutoEncoder(decoder)') - self.parser.add_argument('--dim_movement_latent', type=int, default=512, help='Dimension of motion snippet') - - self.initialized = True - - - - def parse(self): - if not self.initialized: - self.initialize() - - self.opt = self.parser.parse_args() - - self.opt.is_train = self.is_train - - if self.opt.gpu_id != -1: - # self.opt.gpu_id = int(self.opt.gpu_id) - torch.cuda.set_device(self.opt.gpu_id) - - args = vars(self.opt) - - if args["distributed"]: - init_dist('slurm') - rank, world_size = get_dist_info() - if rank == 0: - print('------------ Options -------------') - for k, v in sorted(args.items()): - print('%s: %s' % (str(k), str(v))) - print('-------------- End ----------------') - if self.is_train: - # save to the disk - expr_dir = os.path.join(self.opt.checkpoints_dir, self.opt.dataset_name, self.opt.name) - if not os.path.exists(expr_dir): - os.makedirs(expr_dir) - file_name = os.path.join(expr_dir, 'opt.txt') - with open(file_name, 'wt') as opt_file: - opt_file.write('------------ Options -------------\n') - for k, v in sorted(args.items()): - opt_file.write('%s: %s\n' % (str(k), str(v))) - opt_file.write('-------------- End ----------------\n') - if world_size > 1: - dist.barrier() - return self.opt diff --git a/spaces/mishig/smarter_npc/README.md b/spaces/mishig/smarter_npc/README.md deleted file mode 100644 index 99a5b81c416bcbd8ccbb6edd29319493cd2d03cc..0000000000000000000000000000000000000000 --- a/spaces/mishig/smarter_npc/README.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: Smarter NPC -emoji: 🤖 -colorFrom: pink -colorTo: purple -sdk: static -pinned: true ---- - -

    Smarter NPC (non-player character)

    -

    Given a user input, typeform/distilbert-base-uncased-mnli (Zero-Shot classification model) is used to find the closest action from the available set of actions ("dance","text","fight","run")

    -

    This WebGL demo demonstrates how you can create smart NPC easily by using 🤗 Inference API.

    -

    Find the source code in Files and Versions of this Space.

    -

    Find the PlayCanvas project here

    -
    - -
    - \ No newline at end of file diff --git a/spaces/mithril-security/blind_chat/src/routes/conversation/[id]/stop-generating/+server.ts b/spaces/mithril-security/blind_chat/src/routes/conversation/[id]/stop-generating/+server.ts deleted file mode 100644 index 2c749156f82ada11bcea3b14d4a60cddb72d5825..0000000000000000000000000000000000000000 --- a/spaces/mithril-security/blind_chat/src/routes/conversation/[id]/stop-generating/+server.ts +++ /dev/null @@ -1,23 +0,0 @@ -import { authCondition } from "$lib/server/auth"; -import { collections } from "$lib/server/database"; -import { error } from "@sveltejs/kit"; - -/** - * Ideally, we'd be able to detect the client-side abort, see https://github.com/huggingface/chat-ui/pull/88#issuecomment-1523173850 - */ -export async function POST({ params, locals }) { - /*const conversationId = new ObjectId(params.id); - - const conversation = await collections.conversations.findOne({ - _id: conversationId, - ...authCondition(locals), - }); - - await collections.abortedGenerations.updateOne( - { conversationId }, - { $set: { updatedAt: new Date() }, $setOnInsert: { createdAt: new Date() } }, - { upsert: true } - );*/ - - return new Response(); -} diff --git a/spaces/mmlab-ntu/Segment-Any-RGBD/datasets/scannet_preprocess/meta_data/scannet200_splits.py b/spaces/mmlab-ntu/Segment-Any-RGBD/datasets/scannet_preprocess/meta_data/scannet200_splits.py deleted file mode 100644 index 9e66fc81f2c48e7df5ce328dc89f11ad3f4eb98a..0000000000000000000000000000000000000000 --- a/spaces/mmlab-ntu/Segment-Any-RGBD/datasets/scannet_preprocess/meta_data/scannet200_splits.py +++ /dev/null @@ -1,18 +0,0 @@ -# This file contains the HEAD - COMMON - TAIL split category ids for ScanNet 200 - -HEAD_CATS_SCANNET_200 = ['tv stand', 'curtain', 'blinds', 'shower curtain', 'bookshelf', 'tv', 'kitchen cabinet', 'pillow', 'lamp', 'dresser', 'monitor', 'object', 'ceiling', 'board', 'stove', 'closet wall', 'couch', 'office chair', 'kitchen counter', 'shower', 'closet', 'doorframe', 'sofa chair', 'mailbox', 'nightstand', 'washing machine', 'picture', 'book', 'sink', 'recycling bin', 'table', 'backpack', 'shower wall', 'toilet', 'copier', 'counter', 'stool', 'refrigerator', 'window', 'file cabinet', 'chair', 'wall', 'plant', 'coffee table', 'stairs', 'armchair', 'cabinet', 'bathroom vanity', 'bathroom stall', 'mirror', 'blackboard', 'trash can', 'stair rail', 'box', 'towel', 'door', 'clothes', 'whiteboard', 'bed', 'floor', 'bathtub', 'desk', 'wardrobe', 'clothes dryer', 'radiator', 'shelf'] -COMMON_CATS_SCANNET_200 = ["cushion", "end table", "dining table", "keyboard", "bag", "toilet paper", "printer", "blanket", "microwave", "shoe", "computer tower", "bottle", "bin", "ottoman", "bench", "basket", "fan", "laptop", "person", "paper towel dispenser", "oven", "rack", "piano", "suitcase", "rail", "container", "telephone", "stand", "light", "laundry basket", "pipe", "seat", "column", "bicycle", "ladder", "jacket", "storage bin", "coffee maker", "dishwasher", "machine", "mat", "windowsill", "bulletin board", "fireplace", "mini fridge", "water cooler", "shower door", "pillar", "ledge", "furniture", "cart", "decoration", "closet door", "vacuum cleaner", "dish rack", "range hood", "projector screen", "divider", "bathroom counter", "laundry hamper", "bathroom stall door", "ceiling light", "trash bin", "bathroom cabinet", "structure", "storage organizer", "potted plant", "mattress"] -TAIL_CATS_SCANNET_200 = ["paper", "plate", "soap dispenser", "bucket", "clock", "guitar", "toilet paper holder", "speaker", "cup", "paper towel roll", "bar", "toaster", "ironing board", "soap dish", "toilet paper dispenser", "fire extinguisher", "ball", "hat", "shower curtain rod", "paper cutter", "tray", "toaster oven", "mouse", "toilet seat cover dispenser", "storage container", "scale", "tissue box", "light switch", "crate", "power outlet", "sign", "projector", "candle", "plunger", "stuffed animal", "headphones", "broom", "guitar case", "dustpan", "hair dryer", "water bottle", "handicap bar", "purse", "vent", "shower floor", "water pitcher", "bowl", "paper bag", "alarm clock", "music stand", "laundry detergent", "dumbbell", "tube", "cd case", "closet rod", "coffee kettle", "shower head", "keyboard piano", "case of water bottles", "coat rack", "folded chair", "fire alarm", "power strip", "calendar", "poster", "luggage"] - - -# Given the different size of the official train and val sets, not all ScanNet200 categories are present in the validation set. -# Here we list of categories with labels and IDs present in both train and validation set, and the remaining categories those are present in train, but not in val -# We dont evaluate on unseen validation categories in this benchmark - -VALID_CLASS_IDS_200_VALIDATION = ('wall', 'chair', 'floor', 'table', 'door', 'couch', 'cabinet', 'shelf', 'desk', 'office chair', 'bed', 'pillow', 'sink', 'picture', 'window', 'toilet', 'bookshelf', 'monitor', 'curtain', 'book', 'armchair', 'coffee table', 'box', 'refrigerator', 'lamp', 'kitchen cabinet', 'towel', 'clothes', 'tv', 'nightstand', 'counter', 'dresser', 'stool', 'cushion', 'plant', 'ceiling', 'bathtub', 'end table', 'dining table', 'keyboard', 'bag', 'backpack', 'toilet paper', 'printer', 'tv stand', 'whiteboard', 'blanket', 'shower curtain', 'trash can', 'closet', 'stairs', 'microwave', 'stove', 'shoe', 'computer tower', 'bottle', 'bin', 'ottoman', 'bench', 'board', 'washing machine', 'mirror', 'copier', 'basket', 'sofa chair', 'file cabinet', 'fan', 'laptop', 'shower', 'paper', 'person', 'paper towel dispenser', 'oven', 'blinds', 'rack', 'plate', 'blackboard', 'piano', 'suitcase', 'rail', 'radiator', 'recycling bin', 'container', 'wardrobe', 'soap dispenser', 'telephone', 'bucket', 'clock', 'stand', 'light', 'laundry basket', 'pipe', 'clothes dryer', 'guitar', 'toilet paper holder', 'seat', 'speaker', 'column', 'ladder', 'bathroom stall', 'shower wall', 'cup', 'jacket', 'storage bin', 'coffee maker', 'dishwasher', 'paper towel roll', 'machine', 'mat', 'windowsill', 'bar', 'toaster', 'bulletin board', 'ironing board', 'fireplace', 'soap dish', 'kitchen counter', 'doorframe', 'toilet paper dispenser', 'mini fridge', 'fire extinguisher', 'ball', 'hat', 'shower curtain rod', 'water cooler', 'paper cutter', 'tray', 'shower door', 'pillar', 'ledge', 'toaster oven', 'mouse', 'toilet seat cover dispenser', 'furniture', 'cart', 'scale', 'tissue box', 'light switch', 'crate', 'power outlet', 'decoration', 'sign', 'projector', 'closet door', 'vacuum cleaner', 'plunger', 'stuffed animal', 'headphones', 'dish rack', 'broom', 'range hood', 'dustpan', 'hair dryer', 'water bottle', 'handicap bar', 'vent', 'shower floor', 'water pitcher', 'mailbox', 'bowl', 'paper bag', 'projector screen', 'divider', 'laundry detergent', 'bathroom counter', 'object', 'bathroom vanity', 'closet wall', 'laundry hamper', 'bathroom stall door', 'ceiling light', 'trash bin', 'dumbbell', 'stair rail', 'tube', 'bathroom cabinet', 'closet rod', 'coffee kettle', 'shower head', 'keyboard piano', 'case of water bottles', 'coat rack', 'folded chair', 'fire alarm', 'power strip', 'calendar', 'poster', 'potted plant', 'mattress') - -CLASS_LABELS_200_VALIDATION = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24, 26, 27, 28, 29, 31, 32, 33, 34, 35, 36, 38, 39, 40, 41, 42, 44, 45, 46, 47, 48, 49, 50, 51, 52, 54, 55, 56, 57, 58, 59, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 82, 84, 86, 87, 88, 89, 90, 93, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 110, 112, 115, 116, 118, 120, 122, 125, 128, 130, 131, 132, 134, 136, 138, 139, 140, 141, 145, 148, 154, 155, 156, 157, 159, 161, 163, 165, 166, 168, 169, 170, 177, 180, 185, 188, 191, 193, 195, 202, 208, 213, 214, 229, 230, 232, 233, 242, 250, 261, 264, 276, 283, 300, 304, 312, 323, 325, 342, 356, 370, 392, 395, 408, 417, 488, 540, 562, 570, 609, 748, 776, 1156, 1163, 1164, 1165, 1166, 1167, 1168, 1169, 1170, 1171, 1172, 1173, 1175, 1176, 1179, 1180, 1181, 1182, 1184, 1185, 1186, 1187, 1188, 1189, 1191) - -VALID_CLASS_IDS_200_TRAIN_ONLY = ('bicycle', 'storage container', 'candle', 'guitar case', 'purse', 'alarm clock', 'music stand', 'cd case', 'structure', 'storage organizer', 'luggage') - -CLASS_LABELS_200_TRAIN_ONLY = (121, 221, 286, 331, 399, 572, 581, 1174, 1178, 1183, 1190) \ No newline at end of file diff --git a/spaces/mmlab-ntu/Segment-Any-RGBD/third_party/CLIP/clip/clip.py b/spaces/mmlab-ntu/Segment-Any-RGBD/third_party/CLIP/clip/clip.py deleted file mode 100644 index 6d733edfac02d81ba3e402eb7e702764728bdaa2..0000000000000000000000000000000000000000 --- a/spaces/mmlab-ntu/Segment-Any-RGBD/third_party/CLIP/clip/clip.py +++ /dev/null @@ -1,285 +0,0 @@ -import hashlib -import os -import urllib -import warnings -from collections import OrderedDict -from typing import Union, List - -import torch -from PIL import Image -from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize -from tqdm import tqdm - -from .model import build_model -from .simple_tokenizer import SimpleTokenizer as _Tokenizer - -try: - from torchvision.transforms import InterpolationMode - - BICUBIC = InterpolationMode.BICUBIC -except ImportError: - BICUBIC = Image.BICUBIC - - -if torch.__version__.split(".") < ["1", "7", "1"]: - warnings.warn("PyTorch version 1.7.1 or higher is recommended") - - -__all__ = ["available_models", "load", "tokenize"] -_tokenizer = _Tokenizer() - -_MODELS = { - "RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt", - "RN101": "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt", - "RN50x4": "https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt", - "RN50x16": "https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt", - "ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt", - "ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt", - "ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt", - "ViT-L/14@336px": "https://openaipublic.azureedge.net/clip/models/3035c92b350959924f9f00213499208652fc7ea050643e8b385c2dac08641f02/ViT-L-14-336px.pt", -} - - -def _download(url: str, root: str = os.path.expanduser("~/.cache/clip")): - os.makedirs(root, exist_ok=True) - filename = os.path.basename(url) - - expected_sha256 = url.split("/")[-2] - download_target = os.path.join(root, filename) - - if os.path.exists(download_target) and not os.path.isfile(download_target): - raise RuntimeError(f"{download_target} exists and is not a regular file") - - if os.path.isfile(download_target): - if ( - hashlib.sha256(open(download_target, "rb").read()).hexdigest() - == expected_sha256 - ): - return download_target - else: - warnings.warn( - f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" - ) - - with urllib.request.urlopen(url) as source, open(download_target, "wb") as output: - with tqdm( - total=int(source.info().get("Content-Length")), - ncols=80, - unit="iB", - unit_scale=True, - ) as loop: - while True: - buffer = source.read(8192) - if not buffer: - break - - output.write(buffer) - loop.update(len(buffer)) - - if ( - hashlib.sha256(open(download_target, "rb").read()).hexdigest() - != expected_sha256 - ): - raise RuntimeError( - f"Model has been downloaded but the SHA256 checksum does not not match" - ) - - return download_target - - -def _transform(n_px): - return Compose( - [ - Resize(n_px, interpolation=BICUBIC), - CenterCrop(n_px), - lambda image: image.convert("RGB"), - ToTensor(), - Normalize( - (0.48145466, 0.4578275, 0.40821073), - (0.26862954, 0.26130258, 0.27577711), - ), - ] - ) - - -def available_models() -> List[str]: - """Returns the names of available CLIP models""" - return list(_MODELS.keys()) - - -def load( - name: str, - mask_prompt_depth: int = 0, - device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", - jit=False, -): - """Load a CLIP model - - Parameters - ---------- - name : str - A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict - - device : Union[str, torch.device] - The device to put the loaded model - - jit : bool - Whether to load the optimized JIT model or more hackable non-JIT model (default). - - Returns - ------- - model : torch.nn.Module - The CLIP model - - preprocess : Callable[[PIL.Image], torch.Tensor] - A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input - """ - if name in _MODELS: - model_path = _download(_MODELS[name]) - elif os.path.isfile(name): - model_path = name - else: - raise RuntimeError( - f"Model {name} not found; available models = {available_models()}" - ) - - try: - # loading JIT archive - model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval() - state_dict = None - except RuntimeError: - # loading saved state dict - if jit: - warnings.warn( - f"File {model_path} is not a JIT archive. Loading as a state dict instead" - ) - jit = False - state_dict = torch.load(model_path, map_location="cpu") - if 'state_dict' in state_dict: - new_state_dict = OrderedDict() - for k, v in state_dict['state_dict'].items(): - if k.startswith('module.'): - name = k[7:] # remove `module.` - new_state_dict[name] = v - state_dict = new_state_dict - - if not jit: - model = build_model(state_dict or model.state_dict(), mask_prompt_depth).to(device) - if str(device) == "cpu": - model.float() - return model, _transform(model.visual.input_resolution) - - # patch the device names - device_holder = torch.jit.trace( - lambda: torch.ones([]).to(torch.device(device)), example_inputs=[] - ) - device_node = [ - n - for n in device_holder.graph.findAllNodes("prim::Constant") - if "Device" in repr(n) - ][-1] - - def patch_device(module): - try: - graphs = [module.graph] if hasattr(module, "graph") else [] - except RuntimeError: - graphs = [] - - if hasattr(module, "forward1"): - graphs.append(module.forward1.graph) - - for graph in graphs: - for node in graph.findAllNodes("prim::Constant"): - if "value" in node.attributeNames() and str(node["value"]).startswith( - "cuda" - ): - node.copyAttributes(device_node) - - model.apply(patch_device) - patch_device(model.encode_image) - patch_device(model.encode_text) - - # patch dtype to float32 on CPU - if str(device) == "cpu": - float_holder = torch.jit.trace( - lambda: torch.ones([]).float(), example_inputs=[] - ) - float_input = list(float_holder.graph.findNode("aten::to").inputs())[1] - float_node = float_input.node() - - def patch_float(module): - try: - graphs = [module.graph] if hasattr(module, "graph") else [] - except RuntimeError: - graphs = [] - - if hasattr(module, "forward1"): - graphs.append(module.forward1.graph) - - for graph in graphs: - for node in graph.findAllNodes("aten::to"): - inputs = list(node.inputs()) - for i in [ - 1, - 2, - ]: # dtype can be the second or third argument to aten::to() - if inputs[i].node()["value"] == 5: - inputs[i].node().copyAttributes(float_node) - - model.apply(patch_float) - patch_float(model.encode_image) - patch_float(model.encode_text) - - model.float() - - return model, _transform(model.input_resolution.item()) - - -def tokenize( - texts: Union[str, List[str]], - context_length: int = 77, - truncate: bool = False, - return_length: bool = False, -) -> torch.LongTensor: - """ - Returns the tokenized representation of given input string(s) - - Parameters - ---------- - texts : Union[str, List[str]] - An input string or a list of input strings to tokenize - - context_length : int - The context length to use; all CLIP models use 77 as the context length - - truncate: bool - Whether to truncate the text in case its encoding is longer than the context length - - Returns - ------- - A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length] - """ - if isinstance(texts, str): - texts = [texts] - - sot_token = _tokenizer.encoder["<|startoftext|>"] - eot_token = _tokenizer.encoder["<|endoftext|>"] - all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts] - result = torch.zeros(len(all_tokens), context_length, dtype=torch.long) - length = [] - for i, tokens in enumerate(all_tokens): - if len(tokens) > context_length: - if truncate: - tokens = tokens[:context_length] - tokens[-1] = eot_token - length.append(context_length) - else: - raise RuntimeError( - f"Input {texts[i]} is too long for context length {context_length}" - ) - else: - length.append(len(tokens)) - result[i, : len(tokens)] = torch.tensor(tokens) - if return_length: - return result, length - return result diff --git a/spaces/mshkdm/VToonify/vtoonify/model/raft/core/__init__.py b/spaces/mshkdm/VToonify/vtoonify/model/raft/core/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/mshukor/UnIVAL/fairseq/examples/scaling_nmt/README.md b/spaces/mshukor/UnIVAL/fairseq/examples/scaling_nmt/README.md deleted file mode 100644 index 0cc3360c3bbd58fe35a51591db8f081fc8576877..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/examples/scaling_nmt/README.md +++ /dev/null @@ -1,114 +0,0 @@ -# Scaling Neural Machine Translation (Ott et al., 2018) - -This page includes instructions for reproducing results from the paper [Scaling Neural Machine Translation (Ott et al., 2018)](https://arxiv.org/abs/1806.00187). - -## Pre-trained models - -Model | Description | Dataset | Download ----|---|---|--- -`transformer.wmt14.en-fr` | Transformer
    ([Ott et al., 2018](https://arxiv.org/abs/1806.00187)) | [WMT14 English-French](http://statmt.org/wmt14/translation-task.html#Download) | model:
    [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/models/wmt14.en-fr.joined-dict.transformer.tar.bz2)
    newstest2014:
    [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt14.en-fr.joined-dict.newstest2014.tar.bz2) -`transformer.wmt16.en-de` | Transformer
    ([Ott et al., 2018](https://arxiv.org/abs/1806.00187)) | [WMT16 English-German](https://drive.google.com/uc?export=download&id=0B_bZck-ksdkpM25jRUN2X2UxMm8) | model:
    [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/models/wmt16.en-de.joined-dict.transformer.tar.bz2)
    newstest2014:
    [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt16.en-de.joined-dict.newstest2014.tar.bz2) - -## Training a new model on WMT'16 En-De - -First download the [preprocessed WMT'16 En-De data provided by Google](https://drive.google.com/uc?export=download&id=0B_bZck-ksdkpM25jRUN2X2UxMm8). - -Then: - -##### 1. Extract the WMT'16 En-De data -```bash -TEXT=wmt16_en_de_bpe32k -mkdir -p $TEXT -tar -xzvf wmt16_en_de.tar.gz -C $TEXT -``` - -##### 2. Preprocess the dataset with a joined dictionary -```bash -fairseq-preprocess \ - --source-lang en --target-lang de \ - --trainpref $TEXT/train.tok.clean.bpe.32000 \ - --validpref $TEXT/newstest2013.tok.bpe.32000 \ - --testpref $TEXT/newstest2014.tok.bpe.32000 \ - --destdir data-bin/wmt16_en_de_bpe32k \ - --nwordssrc 32768 --nwordstgt 32768 \ - --joined-dictionary \ - --workers 20 -``` - -##### 3. Train a model -```bash -fairseq-train \ - data-bin/wmt16_en_de_bpe32k \ - --arch transformer_vaswani_wmt_en_de_big --share-all-embeddings \ - --optimizer adam --adam-betas '(0.9, 0.98)' --clip-norm 0.0 \ - --lr 0.0005 --lr-scheduler inverse_sqrt --warmup-updates 4000 --warmup-init-lr 1e-07 \ - --dropout 0.3 --weight-decay 0.0 \ - --criterion label_smoothed_cross_entropy --label-smoothing 0.1 \ - --max-tokens 3584 \ - --fp16 -``` - -Note that the `--fp16` flag requires you have CUDA 9.1 or greater and a Volta GPU or newer. - -***IMPORTANT:*** You will get better performance by training with big batches and -increasing the learning rate. If you want to train the above model with big batches -(assuming your machine has 8 GPUs): -- add `--update-freq 16` to simulate training on 8x16=128 GPUs -- increase the learning rate; 0.001 works well for big batches - -##### 4. Evaluate - -Now we can evaluate our trained model. - -Note that the original [Attention Is All You Need](https://arxiv.org/abs/1706.03762) -paper used a couple tricks to achieve better BLEU scores. We use these same tricks in -the Scaling NMT paper, so it's important to apply them when reproducing our results. - -First, use the [average_checkpoints.py](/scripts/average_checkpoints.py) script to -average the last few checkpoints. Averaging the last 5-10 checkpoints is usually -good, but you may need to adjust this depending on how long you've trained: -```bash -python scripts/average_checkpoints \ - --inputs /path/to/checkpoints \ - --num-epoch-checkpoints 10 \ - --output checkpoint.avg10.pt -``` - -Next, generate translations using a beam width of 4 and length penalty of 0.6: -```bash -fairseq-generate \ - data-bin/wmt16_en_de_bpe32k \ - --path checkpoint.avg10.pt \ - --beam 4 --lenpen 0.6 --remove-bpe > gen.out -``` - -Finally, we apply the ["compound splitting" script](/scripts/compound_split_bleu.sh) to -add spaces around dashes. For example "Café-Liebhaber" would become three tokens: -"Café - Liebhaber". This typically results in larger BLEU scores, but it is not -appropriate to compare these inflated scores to work which does not include this trick. -This trick was used in the [original AIAYN code](https://github.com/tensorflow/tensor2tensor/blob/fc9335c0203685cbbfe2b30c92db4352d8f60779/tensor2tensor/utils/get_ende_bleu.sh), -so we used it in the Scaling NMT paper as well. That said, it's strongly advised to -report [sacrebleu](https://github.com/mjpost/sacrebleu) scores instead. - -To compute "compound split" tokenized BLEU (not recommended!): -```bash -bash scripts/compound_split_bleu.sh gen.out -# BLEU4 = 29.29, 60.3/35.0/22.8/15.3 (BP=1.000, ratio=1.004, syslen=64763, reflen=64496) -``` - -To compute detokenized BLEU with sacrebleu (preferred): -```bash -bash scripts/sacrebleu.sh wmt14/full en de gen.out -# BLEU+case.mixed+lang.en-de+numrefs.1+smooth.exp+test.wmt14/full+tok.13a+version.1.4.3 = 28.6 59.3/34.3/22.1/14.9 (BP = 1.000 ratio = 1.016 hyp_len = 63666 ref_len = 62688) -``` - -## Citation - -```bibtex -@inproceedings{ott2018scaling, - title = {Scaling Neural Machine Translation}, - author = {Ott, Myle and Edunov, Sergey and Grangier, David and Auli, Michael}, - booktitle = {Proceedings of the Third Conference on Machine Translation (WMT)}, - year = 2018, -} -``` diff --git a/spaces/mshukor/UnIVAL/fairseq/fairseq/data/transform_eos_dataset.py b/spaces/mshukor/UnIVAL/fairseq/fairseq/data/transform_eos_dataset.py deleted file mode 100644 index fb14ff018edf13b20f5d0e486692dfb0a37ec6d1..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/fairseq/data/transform_eos_dataset.py +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch - -from . import FairseqDataset - - -class TransformEosDataset(FairseqDataset): - """A :class:`~fairseq.data.FairseqDataset` wrapper that appends/prepends/strips EOS. - - Note that the transformation is applied in :func:`collater`. - - Args: - dataset (~fairseq.data.FairseqDataset): dataset to wrap - eos (int): index of the end-of-sentence symbol - append_eos_to_src (bool, optional): append EOS to the end of src - remove_eos_from_src (bool, optional): remove EOS from the end of src - append_eos_to_tgt (bool, optional): append EOS to the end of tgt - remove_eos_from_tgt (bool, optional): remove EOS from the end of tgt - """ - - def __init__( - self, - dataset, - eos, - append_eos_to_src=False, - remove_eos_from_src=False, - append_eos_to_tgt=False, - remove_eos_from_tgt=False, - has_target=True, - ): - if not isinstance(dataset, FairseqDataset): - raise ValueError("dataset must be an instance of FairseqDataset") - if append_eos_to_src and remove_eos_from_src: - raise ValueError("cannot combine append_eos_to_src and remove_eos_from_src") - if append_eos_to_tgt and remove_eos_from_tgt: - raise ValueError("cannot combine append_eos_to_tgt and remove_eos_from_tgt") - - self.dataset = dataset - self.eos = torch.LongTensor([eos]) - self.append_eos_to_src = append_eos_to_src - self.remove_eos_from_src = remove_eos_from_src - self.append_eos_to_tgt = append_eos_to_tgt - self.remove_eos_from_tgt = remove_eos_from_tgt - self.has_target = has_target - - # precompute how we should adjust the reported sizes - self._src_delta = 0 - self._src_delta += 1 if append_eos_to_src else 0 - self._src_delta -= 1 if remove_eos_from_src else 0 - self._tgt_delta = 0 - self._tgt_delta += 1 if append_eos_to_tgt else 0 - self._tgt_delta -= 1 if remove_eos_from_tgt else 0 - - self._checked_src = False - self._checked_tgt = False - - def _check_src(self, src, expect_eos): - if not self._checked_src: - assert (src[-1] == self.eos[0]) == expect_eos - self._checked_src = True - - def _check_tgt(self, tgt, expect_eos): - if self.has_target and not self._checked_tgt: - assert (tgt[-1] == self.eos[0]) == expect_eos - self._checked_tgt = True - - def __getitem__(self, index): - return self.dataset[index] - - def __len__(self): - return len(self.dataset) - - def collater(self, samples): - def transform(item): - if self.append_eos_to_src: - self.eos = self.eos.to(device=item["source"].device) - self._check_src(item["source"], expect_eos=False) - item["source"] = torch.cat([item["source"], self.eos]) - if self.remove_eos_from_src: - self.eos = self.eos.to(device=item["source"].device) - self._check_src(item["source"], expect_eos=True) - item["source"] = item["source"][:-1] - if self.append_eos_to_tgt: - self.eos = self.eos.to(device=item["target"].device) - self._check_tgt(item["target"], expect_eos=False) - item["target"] = torch.cat([item["target"], self.eos]) - if self.remove_eos_from_tgt: - self.eos = self.eos.to(device=item["target"].device) - self._check_tgt(item["target"], expect_eos=True) - item["target"] = item["target"][:-1] - return item - - samples = list(map(transform, samples)) - return self.dataset.collater(samples) - - def num_tokens(self, index): - return self.dataset.num_tokens(index) - - def size(self, index): - if self.has_target: - src_len, tgt_len = self.dataset.size(index) - return (src_len + self._src_delta, tgt_len + self._tgt_delta) - else: - return self.dataset.size(index) - - def ordered_indices(self): - # NOTE: we assume that the ordering does not change based on the - # addition or removal of eos - return self.dataset.ordered_indices() - - @property - def supports_prefetch(self): - return getattr(self.dataset, "supports_prefetch", False) - - def prefetch(self, indices): - return self.dataset.prefetch(indices) diff --git a/spaces/mshukor/UnIVAL/run_scripts/averaging/ratatouille/scaling_best/vqa/unival_vqa_initsnlive.sh b/spaces/mshukor/UnIVAL/run_scripts/averaging/ratatouille/scaling_best/vqa/unival_vqa_initsnlive.sh deleted file mode 100644 index 0f7620363459f5aaa36c05555ca3c53d8837575e..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/run_scripts/averaging/ratatouille/scaling_best/vqa/unival_vqa_initsnlive.sh +++ /dev/null @@ -1,232 +0,0 @@ - -# Number of GPUs per GPU worker -export GPUS_PER_NODE=8 -# Number of GPU workers, for single-worker training, please set to 1 -export NUM_NODES=$SLURM_NNODES -# The ip address of the rank-0 worker, for single-worker training, please set to localhost -master_addr=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1) -export MASTER_ADDR=$master_addr - -# The port for communication -export MASTER_PORT=12350 -# The rank of this worker, should be in {0, ..., WORKER_CNT-1}, for single-worker training, please set to 0 -export RANK=$SLURM_NODEID - -echo "MASTER_ADDR: $MASTER_ADDR" -echo "RANK :$RANK" -echo "NUM_NODES :$NUM_NODES" -echo "GPUS_PER_NODE :$GPUS_PER_NODE" - -export MIOPEN_USER_DB_PATH=/lus/home/NAT/gda2204/mshukor/.config/miopen_${MASTER_ADDR}_${SLURM_PROCID}/ - -echo "MIOPEN_USER_DB_PATH :$MIOPEN_USER_DB_PATH" - -num_workers=0 - - -ofa_dir=/lus/home/NAT/gda2204/mshukor/code/unival -base_data_dir=/lus/scratch/NAT/gda2204/SHARED/data -base_log_dir=/work/NAT/gda2204/mshukor/logs - - -exp_name=unival_vqa_initsnlive - - -image_dir=${base_data_dir} -data_dir=${base_data_dir}/ofa/vqa_data -# data=${data_dir}/vqa_train.tsv,${data_dir}/vqa_val.tsv -# Note: If you have shuffled the data in advance, please uncomment the line below. -data=${data_dir}/vqa_train_1.tsv,${data_dir}/vqa_train_2.tsv,${data_dir}/vqa_train_3.tsv,${data_dir}/vqa_train_4.tsv,${data_dir}/vqa_train_5.tsv,${data_dir}/vqa_train_6.tsv,${data_dir}/vqa_train_7.tsv,${data_dir}/vqa_train_8.tsv,${data_dir}/vqa_train_9.tsv,${data_dir}/vqa_train_10.tsv,${data_dir}/vqa_val.tsv -ans2label_file=${base_data_dir}/ofa/vqa_data/trainval_ans2label.pkl - - -selected_cols=0,5,2,3,4 - - - -save_base_log_dir=/lus/scratch/NAT/gda2204/SHARED/logs -save_dir=${save_base_log_dir}/ofa/checkpoints/vqa/${exp_name} - -# save_dir=${base_log_dir}/ofa/checkpoints/vqa/${exp_name} -log_dir=${save_dir} - -mkdir -p $log_dir $save_dir - -restore_file=/lus/scratch/NAT/gda2204/SHARED/logs/ofa/checkpoints/snli_ve/unival_snli_ve/10_5e-5/checkpoint_best.pt - -lr=1e-4 - - -bpe_dir=${ofa_dir}/utils/BPE -user_dir=${ofa_dir}/ofa_module - - - -task=vqa_gen -arch=unival_base - - -criterion=adjust_label_smoothed_cross_entropy -label_smoothing=0.1 -batch_size=16 -update_freq=1 -resnet_drop_path_rate=0.0 -encoder_drop_path_rate=0.1 -decoder_drop_path_rate=0.1 -dropout=0.1 -attention_dropout=0.0 -max_src_length=80 -max_object_length=30 -max_tgt_length=30 -num_bins=1000 -# patch_image_size=480 - -uses_ema="--uses-ema" -store_ema="--store-ema" -ema_fp32="--ema-fp32" -ema_decay=0.9999 -ema_start_update=0 - -# Specify the inference type in validation after each fine-tuning epoch -# As mentioned in the readme, you can choose from allcand or beamsearch evaluation, default to allcand -val_inference_type=beamsearch - -# Specify whether to activate unconstrained VQA finetuning, which does not use a pre-defined candidate answer set -# If --unconstrained-training is acitvated, --ans2label-file will **not be used even if it is specified** -# Meanwhile, --val-inference-type must be set to **beamsearch** -# By default, we follow the constrained finetuning as we mentioned in OFA paper, the candidate answer set shall be specified by --ans2label-file -# For more details about this option, please refer to issue #123 and PR #124 -unconstrained_training_flag="" -# unconstrained_training_flag="--unconstrained-training" - - - - - -save_interval_updates=0 - -### -image_encoder_name=timm_resnet #vit_base_patch16_224 -patch_image_size=480 -resnet_type=resnet101 - -resnet_model_path=${base_log_dir}/pretrained_models/resnet101-5d3b4d8f.pth - -# video -video_encoder_name=all_resnext101 -patch_frame_size=384 -video_model_path=${base_log_dir}/pretrained_models/3dcnn/resnext-101-kinetics.pth #${base_log_dir}/pretrained_models/TimeSformer_divST_8x32_224_K600.pyth -num_frames=4 - - -sample_patch_num='--sample-patch-num=784' # '' - -eval_args='--eval-args={"beam":5,"unnormalized":true,"temperature":1.0,"stop_on_max_len":true}' - -validate_interval_updates=2000 -save_interval_updates=0 - - -for max_epoch in {20,}; do - echo "max_epoch "${max_epoch} - for warmup_ratio in {0.04,}; do - echo "warmup_updates "${warmup_updates} - for lr in {$lr,}; do - echo "lr "${lr} - for patch_image_size in {$patch_image_size,}; do - echo "patch_image_size "${patch_image_size} - - log_file=${log_dir}/${max_epoch}"_"${warmup_ratio}"_"${lr}"_"${patch_image_size}"_rank"${RANK}".log" - save_path=${save_dir}/${max_epoch}"_"${warmup_ratio}"_"${lr}"_"${patch_image_size} - mkdir -p $save_path - - python3 -m torch.distributed.launch \ - --nnodes=${NUM_NODES} \ - --nproc_per_node=${GPUS_PER_NODE} \ - --master_port=${MASTER_PORT} \ - --node_rank=${RANK} \ - --master_addr=${MASTER_ADDR} \ - --use_env ${ofa_dir}/train.py \ - ${data} \ - --selected-cols=${selected_cols} \ - --bpe-dir=${bpe_dir} \ - --user-dir=${user_dir} \ - --restore-file=${restore_file} \ - --save-dir=${save_path} \ - --task=${task} \ - --arch=${arch} \ - --criterion=${criterion} \ - --label-smoothing=${label_smoothing} \ - --batch-size=${batch_size} \ - --update-freq=${update_freq} \ - --encoder-normalize-before \ - --decoder-normalize-before \ - --share-decoder-input-output-embed \ - --share-all-embeddings \ - --layernorm-embedding \ - --patch-layernorm-embedding \ - --code-layernorm-embedding \ - --resnet-drop-path-rate=${resnet_drop_path_rate} \ - --encoder-drop-path-rate=${encoder_drop_path_rate} \ - --decoder-drop-path-rate=${decoder_drop_path_rate} \ - --dropout=${dropout} \ - --attention-dropout=${attention_dropout} \ - --weight-decay=0.01 \ - --optimizer=adam \ - --adam-betas="(0.9,0.999)" \ - --adam-eps=1e-08 \ - --clip-norm=1.0 \ - --lr-scheduler=polynomial_decay \ - --lr=${lr} \ - --max-epoch=${max_epoch} \ - --warmup-ratio=${warmup_ratio} \ - --log-format=simple \ - --log-interval=10 \ - --fixed-validation-seed=7 \ - --keep-best-checkpoints=1 \ - --no-epoch-checkpoints \ - --save-interval=1 --validate-interval=1 \ - --save-interval-updates=${save_interval_updates} --validate-interval-updates=${validate_interval_updates} \ - --best-checkpoint-metric=vqa_score --maximize-best-checkpoint-metric \ - --max-src-length=${max_src_length} \ - --max-object-length=${max_object_length} \ - --max-tgt-length=${max_tgt_length} \ - --find-unused-parameters \ - --freeze-encoder-embedding \ - --freeze-decoder-embedding \ - ${unconstrained_training_flag} \ - --ans2label-file=${ans2label_file} \ - --valid-batch-size=20 \ - --add-type-embedding \ - --scale-attn \ - --scale-fc \ - --scale-heads \ - --disable-entangle \ - --num-bins=${num_bins} \ - --patch-image-size=${patch_image_size} \ - --prompt-type=prev_output \ - --fp16 \ - --fp16-scale-window=512 \ - ${uses_ema} \ - ${store_ema} \ - ${ema_fp32} \ - --ema-decay=${ema_decay} \ - --ema-start-update=${ema_start_update} \ - --val-inference-type=${val_inference_type} \ - --num-workers=0 \ - --image-encoder-name=${image_encoder_name} \ - --image-dir=${image_dir} \ - --video-encoder-name=${video_encoder_name} \ - --video-model-path=${video_model_path} \ - --patch-frame-size=${patch_frame_size} \ - ${sample_patch_num} \ - ${eval_args} \ - --no-epoch-checkpoints \ - --resnet-type=${resnet_type} \ - --resnet-model-path=${resnet_model_path} \ - --reset-dataloader --reset-meters --reset-optimizer - - done - done - done -done diff --git a/spaces/muhammadayman/gradio-demo/README.md b/spaces/muhammadayman/gradio-demo/README.md deleted file mode 100644 index a8df8e67c1b6fc4506b7e58fb09e5964ccbe0477..0000000000000000000000000000000000000000 --- a/spaces/muhammadayman/gradio-demo/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Gradio Demo -emoji: 🏃 -colorFrom: yellow -colorTo: red -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/nakas/MusicGenDemucs/tests/data/__init__.py b/spaces/nakas/MusicGenDemucs/tests/data/__init__.py deleted file mode 100644 index 0952fcc3f57e34b3747962e9ebd6fc57aeea63fa..0000000000000000000000000000000000000000 --- a/spaces/nakas/MusicGenDemucs/tests/data/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. diff --git a/spaces/nakas/MusicGenDemucs/tests/modules/test_lstm.py b/spaces/nakas/MusicGenDemucs/tests/modules/test_lstm.py deleted file mode 100644 index 1248964c8191e19f27661f0974bef9cc967eb015..0000000000000000000000000000000000000000 --- a/spaces/nakas/MusicGenDemucs/tests/modules/test_lstm.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import random -import torch - -from audiocraft.modules.lstm import StreamableLSTM - - -class TestStreamableLSTM: - - def test_lstm(self): - B, C, T = 4, 2, random.randint(1, 100) - - lstm = StreamableLSTM(C, 3, skip=False) - x = torch.randn(B, C, T) - y = lstm(x) - - print(y.shape) - assert y.shape == torch.Size([B, C, T]) - - def test_lstm_skip(self): - B, C, T = 4, 2, random.randint(1, 100) - - lstm = StreamableLSTM(C, 3, skip=True) - x = torch.randn(B, C, T) - y = lstm(x) - - assert y.shape == torch.Size([B, C, T]) diff --git a/spaces/nateraw/dockerplayground/Dockerfile b/spaces/nateraw/dockerplayground/Dockerfile deleted file mode 100644 index 7012c61fbae827259d40f98726c57326d52baa73..0000000000000000000000000000000000000000 --- a/spaces/nateraw/dockerplayground/Dockerfile +++ /dev/null @@ -1,63 +0,0 @@ -# FROM nvidia/cuda:11.3.1-base-ubuntu20.04 -FROM ubuntu:20.04 - -# Remove any third-party apt sources to avoid issues with expiring keys. -RUN rm -f /etc/apt/sources.list.d/*.list - -# Install some basic utilities -RUN apt-get update && apt-get install -y \ - curl ca-certificates sudo git bzip2 libx11-6 && \ - rm -rf /var/lib/apt/lists/* - -# Create a working directory -RUN mkdir /app -WORKDIR /app - -# Create a non-root user and switch to it -RUN adduser --disabled-password --gecos '' --shell /bin/bash user && \ - chown -R user:user /app - -RUN echo "user ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/90-user -USER root - -# All users can use /home/user as their home directory -ENV HOME=/home/user -RUN mkdir $HOME/.cache $HOME/.config && \ - chmod -R 777 $HOME - -# Set up the Conda environment -ENV CONDA_AUTO_UPDATE_CONDA=false \ - PATH=$HOME/miniconda/bin:$PATH - -RUN curl -sLo ~/miniconda.sh https://repo.continuum.io/miniconda/Miniconda3-py39_4.10.3-Linux-x86_64.sh && \ - chmod +x ~/miniconda.sh && \ - ~/miniconda.sh -b -p ~/miniconda && \ - rm ~/miniconda.sh && \ - conda clean -ya - -ENV PYTHONUNBUFFERED=1 \ - GRADIO_ALLOW_FLAGGING=never \ - GRADIO_NUM_PORTS=1 \ - GRADIO_SERVER_NAME=0.0.0.0 \ - GRADIO_THEME=huggingface \ - SYSTEM=spaces - -RUN conda install -c conda-forge -y jupyterlab - -# RUN pip install --no-cache-dir fire gradio datasets huggingface_hub - -# Install user requirements -COPY ./requirements.txt /app/requirements.txt -RUN pip install --no-cache-dir --upgrade -r /app/requirements.txt - -WORKDIR $HOME/app - -# USER root - -# Copy the current directory contents into the container at $HOME/app setting the owner to the root user -COPY --chown=root . $HOME/app - -RUN chmod +x start_server.sh - -EXPOSE 7860 -CMD ["./start_server.sh"] diff --git a/spaces/nathanTQ/ChatDev/online_log/static/download.html b/spaces/nathanTQ/ChatDev/online_log/static/download.html deleted file mode 100644 index 2f6a7735896880da8171d82e24a569374f18c1b7..0000000000000000000000000000000000000000 --- a/spaces/nathanTQ/ChatDev/online_log/static/download.html +++ /dev/null @@ -1,62 +0,0 @@ - - - - - ChatDev - - - - - - - - - - - - - - - - - - - - - -
    - ChatDev Title -
    -
    - -
    - - - - - - -
    -
    - - - - - - - diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/L4d2 Steam Is Not Running Crack.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/L4d2 Steam Is Not Running Crack.md deleted file mode 100644 index fd930ae6c907ddce34c6246045fa669e99dba191..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/L4d2 Steam Is Not Running Crack.md +++ /dev/null @@ -1,23 +0,0 @@ - -I'll try to create that. -Here is what I created: - -

    How to Fix L4d2 Steam Is Not Running Crack Error

    -

    L4d2 Steam Is Not Running Crack is a common error that occurs when you try to play Left 4 Dead 2 on a cracked version of Steam. This error prevents you from launching the game and may also cause other issues such as crashes, freezes, or missing files.

    -

    Fortunately, there are some possible solutions that can help you fix this error and enjoy the game. Here are some of them:

    -

    L4d2 Steam Is Not Running Crack


    Download Zip 🌟 https://urlcod.com/2uI9Rz



    -
      -
    • Verify the integrity of the game files. This can be done by right-clicking on Left 4 Dead 2 in your Steam library, selecting Properties, then Local Files, and then Verify Integrity of Game Files. This will scan and repair any corrupted or missing files that may cause the error.
    • -
    • Run the game as an administrator. This can be done by right-clicking on Left 4 Dead 2 in your Steam library, selecting Properties, then Compatibility, and then checking the box that says Run this program as an administrator. This will give the game more permissions and access to your system resources.
    • -
    • Disable your antivirus or firewall. Some antivirus or firewall programs may interfere with the game or the cracked version of Steam and cause the error. You can try disabling them temporarily while you play the game and see if that solves the problem.
    • -
    • Reinstall the game or the cracked version of Steam. If none of the above solutions work, you may need to reinstall the game or the cracked version of Steam to fix any corrupted or missing files that may cause the error. Make sure to back up your save files and settings before doing so.
    • -
    -

    Disclaimer: This article is for educational purposes only and does not condone piracy or illegal activities. We do not support or encourage using cracked versions of Steam or any games. Please buy and play games legally and support the developers.

    Here are some more paragraphs: - -

    Some users have reported that they were able to fix the error by using a different Steam emulator or launcher. For example, you can try using SmartSteamLoader.exe instead of Left4Dead2.exe and make sure to run it as an administrator. You may also need to edit the SmartSteamEmu.ini file and point it to the correct location of the steamclient.dll file[^1^]. Alternatively, you can try using another emulator such as SSELauncher or GreenLuma.

    -

    Another possible solution is to update your game to the latest version or patch. This can be done by downloading and installing the updates from the official website of Left 4 Dead 2 or from the source where you got the cracked version of Steam. This may fix some bugs or compatibility issues that may cause the error.

    -

    If you want to play Left 4 Dead 2 online with other players who are using the same cracked version of Steam, you may need to follow some additional steps. For example, you may need to block the game from accessing the internet in your firewall settings to prevent it from crashing or detecting your crack[^2^]. You may also need to change your in-game name to avoid being banned or kicked by other players[^3^].

    -

    -

    Disclaimer: This article is for educational purposes only and does not condone piracy or illegal activities. We do not support or encourage using cracked versions of Steam or any games. Please buy and play games legally and support the developers.

    7196e7f11a
    -
    -
    \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Livro Historia Da Cidade Leonardo Benevolo Pdf _VERIFIED_ Download.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Livro Historia Da Cidade Leonardo Benevolo Pdf _VERIFIED_ Download.md deleted file mode 100644 index 9399b2a975640a1b755a2e95ad19f6cc4d65a59f..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Livro Historia Da Cidade Leonardo Benevolo Pdf _VERIFIED_ Download.md +++ /dev/null @@ -1,31 +0,0 @@ - -Here is a possible title and article for your keyword: - -

    Como baixar o livro História da Cidade de Leonardo Benevolo em PDF

    -

    O livro História da Cidade de Leonardo Benevolo é uma obra de referência para os estudantes e profissionais de arquitetura, urbanismo e história. Neste livro, o autor traça um panorama da evolução das cidades desde as origens até os dias atuais, analisando os fatores sociais, políticos, econômicos e culturais que influenciaram as formas urbanas ao longo da história.

    -

    livro historia da cidade leonardo benevolo pdf download


    Download Zip ===> https://urlcod.com/2uI9Qo



    -

    Se você quer ler este livro em formato digital, saiba que é possível baixá-lo gratuitamente em PDF. Neste artigo, vamos mostrar como fazer isso de forma simples e segura.

    -

    Passo a passo para baixar o livro História da Cidade de Leonardo Benevolo em PDF

    -

    Para baixar o livro História da Cidade de Leonardo Benevolo em PDF, você precisa seguir os seguintes passos:

    -
      -
    1. Acesse o site Academia.edu, que é uma plataforma de compartilhamento de trabalhos acadêmicos.
    2. -
    3. Na barra de pesquisa, digite o título do livro: História da Cidade de Leonardo Benevolo.
    4. -
    5. Clique no resultado que corresponde ao livro que você procura. Você verá uma página com a capa do livro e um botão verde escrito "Download".
    6. -
    7. Clique no botão "Download" e aguarde alguns segundos. Você será redirecionado para uma página onde você pode escolher entre fazer o download do arquivo em PDF ou ler online.
    8. -
    9. Escolha a opção que preferir e pronto! Você já pode aproveitar a leitura do livro História da Cidade de Leonardo Benevolo em PDF.
    10. -
    -

    Dicas para aproveitar melhor o livro História da Cidade de Leonardo Benevolo em PDF

    -

    Agora que você já sabe como baixar o livro História da Cidade de Leonardo Benevolo em PDF, vamos dar algumas dicas para você aproveitar melhor a leitura:

    -

    -
      -
    • Antes de começar a ler, faça uma breve pesquisa sobre o autor e o contexto histórico em que ele escreveu o livro. Isso vai ajudar você a entender melhor as ideias e os argumentos do autor.
    • -
    • Enquanto lê, faça anotações sobre os pontos principais de cada capítulo, as datas, os nomes e os conceitos importantes. Isso vai facilitar a sua revisão e o seu aprendizado.
    • -
    • Compare as informações do livro com outras fontes confiáveis, como livros, artigos e sites especializados. Isso vai ampliar a sua visão crítica e o seu conhecimento sobre o tema.
    • -
    • Aproveite as ilustrações, os mapas e as fotografias que acompanham o texto. Eles são ótimos recursos para visualizar as características das cidades e dos períodos históricos abordados pelo autor.
    • -
    • Compartilhe as suas impressões e as suas dúvidas sobre o livro com outras pessoas que se interessam pelo assunto. Você pode fazer isso através de redes sociais, fóruns ou grupos de estudo online. Isso vai enriquecer a sua experiência de leitura e o seu aprendizado.
    • -
    -

    Conclusão

    -

    O livro História da Cidade de Leonardo Benevolo é um clássico da literatura sobre urbanismo e história. Ele oferece uma visão abrangente e atualizada sobre a origem e a transformação das cidades ao longo dos séculos.

    -

    Se você quer ler este livro em formato digital, você pode baixá-lo gratuitamente em PDF através do site Academia.edu. Basta seguir os passos que mostramos neste artigo e aproveitar as dicas que demos para melhorar a

    7196e7f11a
    -
    -
    \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Musa The Warrior [2001] [English Hard Subed]l.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Musa The Warrior [2001] [English Hard Subed]l.md deleted file mode 100644 index e278fc4e4589e5c1ad13f208fa963c5a4eb2a19e..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Musa The Warrior [2001] [English Hard Subed]l.md +++ /dev/null @@ -1,22 +0,0 @@ -
    -

    Musa The Warrior: A Korean Epic Film with English Subtitles

    -

    Musa The Warrior is a 2001 South Korean historical action film directed by Kim Sung-soo and starring Jung Woo-sung, Ahn Sung-ki, Joo Jin-mo and Zhang Ziyi. It tells the story of a group of Korean envoys who are sent to China during the Ming dynasty and end up in a perilous journey across the Gobi Desert.

    -

    Musa The Warrior [2001] [English Hard Subed]l


    Download 🔗 https://urlcod.com/2uIaSr



    -

    The film was a critical and commercial success in South Korea, winning several awards and becoming the highest-grossing Korean film of 2001. It was also well received internationally, especially in China and Japan, where it was praised for its epic scale, realistic battle scenes and cultural diversity.

    -

    Musa The Warrior is available to watch online with English hard subtitles, which means that the subtitles are embedded in the video and cannot be turned off or changed. This is a great way to enjoy the film without missing any dialogue or nuance. You can find the link to watch Musa The Warrior with English hard subtitles below:

    -Watch Musa The Warrior with English Hard Subtitles -

    If you are a fan of historical action films, you will love Musa The Warrior. It is a thrilling and captivating adventure that showcases the courage and loyalty of the Korean warriors, as well as the beauty and diversity of the Asian continent. Don't miss this opportunity to watch Musa The Warrior with English hard subtitles online!

    - -

    Musa The Warrior: The Cast and Characters

    -

    Musa The Warrior features a stellar cast of actors from Korea, China and Japan, who bring to life the complex and diverse characters of the film. Here are some of the main cast and characters of Musa The Warrior:

    -

    -
      -
    • Jung Woo-sung as Yeo-sol, a skilled archer and former slave who becomes the leader of the Korean envoys. He is loyal, brave and compassionate, and develops a bond with Princess Bu-yong.
    • -
    • Ahn Sung-ki as Choi Jung, a nobleman and diplomat who is the head of the Korean delegation. He is proud, ambitious and patriotic, but also pragmatic and willing to compromise for the sake of his country.
    • -
    • Joo Jin-mo as Yu Jin-ha, a warrior and bodyguard who is loyal to Choi Jung. He is strong, loyal and honorable, but also hot-headed and impulsive. He has a rivalry with Yeo-sol over their skills and status.
    • -
    • Zhang Ziyi as Princess Bu-yong, a Mongolian princess who is captured by the Ming army. She is beautiful, intelligent and courageous, and has a strong sense of justice. She falls in love with Yeo-sol despite their cultural differences.
    • -
    • Yu Rong-guang as Rambulhua, a Mongolian general who leads the rebels against the Ming dynasty. He is fierce, charismatic and ruthless, and seeks to restore the glory of the Mongol empire.
    • -
    -

    Musa The Warrior also features other notable actors such as Park Yong-woo, Park Jung-hak, Song Jae-ho and Han Yeong-mok in supporting roles. The cast of Musa The Warrior delivers impressive performances that make the film more engaging and realistic.

    81aa517590
    -
    -
    \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Obs Studio For Mac 10.6.8 _BEST_.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Obs Studio For Mac 10.6.8 _BEST_.md deleted file mode 100644 index fa32fb8fafb48d08d5574cffdf280e970d84c494..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Obs Studio For Mac 10.6.8 _BEST_.md +++ /dev/null @@ -1,35 +0,0 @@ -
    -

    How to Install and Use OBS Studio on Mac 10.6.8

    -

    OBS Studio is a free and open source software for video recording and live streaming. It allows you to capture your screen, webcam, microphone, and other sources, and mix them together in real time. You can also add filters, transitions, scenes, and more to enhance your video quality and creativity.

    -

    Obs Studio For Mac 10.6.8


    Download >>>>> https://urlcod.com/2uIaFt



    -

    OBS Studio is compatible with macOS 10.13 and newer versions, but what if you have an older Mac running 10.6.8? In this article, we will show you how to install and use OBS Studio on Mac 10.6.8 with some alternative methods.

    -

    Alternative Installation Methods for OBS Studio on Mac 10.6.8

    -

    If you try to download the official version of OBS Studio from the OBS Project website[^1^], you will get a file ending in .dmg that requires macOS 10.13 or later to run. However, there are some other ways to install OBS Studio on Mac 10.6.8 that may work for you.

    -

    Homebrew

    -

    Homebrew is a package manager for macOS that lets you install various software that are not available from the official sources. You can use Homebrew to install OBS Studio on Mac 10.6.8 by following these steps:

    -
      -
    1. Open Terminal and type /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" to install Homebrew.
    2. -
    3. Type brew update to update Homebrew.
    4. -
    5. Type brew install --cask obs to install OBS Studio[^2^].
    6. -
    7. Launch OBS Studio from the Applications folder or Spotlight.
    8. -
    -

    Build from Source

    -

    If you are comfortable with coding and compiling, you can also try to build OBS Studio from source on Mac 10.6.8. This method requires some dependencies and tools that you need to install first, such as Xcode, CMake, FFmpeg, Qt5, etc. You can find the detailed instructions on how to build from source on the OBS Studio wiki[^3^].

    -

    -

    How to Use OBS Studio on Mac 10.6.8

    -

    Once you have installed OBS Studio on Mac 10.6.8 by one of the alternative methods above, you can start using it to record or stream your video content. Here are some basic steps to get you started:

    -
      -
    1. Open OBS Studio and choose your preferred settings for video output, audio input, hotkeys, etc.
    2. -
    3. Add sources to your scene by clicking the + button under the Sources panel. You can add your display capture, window capture, video capture device (webcam), audio input capture (microphone), etc.
    4. -
    5. Adjust the size and position of your sources by dragging them on the preview window.
    6. -
    7. Add filters, transitions, text, images, etc. to your sources by right-clicking them and selecting Properties or Filters.
    8. -
    9. To start recording, click the Start Recording button at the bottom right corner of the window.
    10. -
    11. To start streaming, click the Settings button at the bottom right corner of the window and go to the Stream tab. Choose your streaming service (such as Twitch, YouTube, Facebook Live, etc.), enter your stream key or log in with your account, and click OK.
    12. -
    13. Click the Start Streaming button at the bottom right corner of the window.
    14. -
    15. To stop recording or streaming, click the Stop Recording or Stop Streaming button at the bottom right corner of the window.
    16. -
    17. To view your recorded videos, go to the File menu and select Show Recordings.
    18. -
    -

    Conclusion

    -

    OBS Studio is a powerful and versatile software for video recording and live streaming that works well on macOS 10.13 and newer versions. However, if you have an older Mac running 10.6.8, you can still install and use OBS Studio with some alternative methods such as Homebrew or building from source. We hope this article helped you learn how to install and use OBS Studio on Mac 10.

    81aa517590
    -
    -
    \ No newline at end of file diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/tests/layers/test_mask_ops.py b/spaces/nikitaPDL2023/assignment4/detectron2/tests/layers/test_mask_ops.py deleted file mode 100644 index dfbcaf5291a87ec85617d5e7a7aa959c68b06770..0000000000000000000000000000000000000000 --- a/spaces/nikitaPDL2023/assignment4/detectron2/tests/layers/test_mask_ops.py +++ /dev/null @@ -1,202 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) Facebook, Inc. and its affiliates. - -import contextlib -import io -import numpy as np -import unittest -from collections import defaultdict -import torch -import tqdm -from fvcore.common.benchmark import benchmark -from pycocotools.coco import COCO -from tabulate import tabulate -from torch.nn import functional as F - -from detectron2.data import MetadataCatalog -from detectron2.layers.mask_ops import ( - pad_masks, - paste_mask_in_image_old, - paste_masks_in_image, - scale_boxes, -) -from detectron2.structures import BitMasks, Boxes, BoxMode, PolygonMasks -from detectron2.structures.masks import polygons_to_bitmask -from detectron2.utils.file_io import PathManager -from detectron2.utils.testing import random_boxes - - -def iou_between_full_image_bit_masks(a, b): - intersect = (a & b).sum() - union = (a | b).sum() - return intersect / union - - -def rasterize_polygons_with_grid_sample(full_image_bit_mask, box, mask_size, threshold=0.5): - x0, y0, x1, y1 = box[0], box[1], box[2], box[3] - - img_h, img_w = full_image_bit_mask.shape - - mask_y = np.arange(0.0, mask_size) + 0.5 # mask y sample coords in [0.5, mask_size - 0.5] - mask_x = np.arange(0.0, mask_size) + 0.5 # mask x sample coords in [0.5, mask_size - 0.5] - mask_y = mask_y / mask_size * (y1 - y0) + y0 - mask_x = mask_x / mask_size * (x1 - x0) + x0 - - mask_x = (mask_x - 0.5) / (img_w - 1) * 2 + -1 - mask_y = (mask_y - 0.5) / (img_h - 1) * 2 + -1 - gy, gx = torch.meshgrid(torch.from_numpy(mask_y), torch.from_numpy(mask_x)) - ind = torch.stack([gx, gy], dim=-1).to(dtype=torch.float32) - - full_image_bit_mask = torch.from_numpy(full_image_bit_mask) - mask = F.grid_sample( - full_image_bit_mask[None, None, :, :].to(dtype=torch.float32), - ind[None, :, :, :], - align_corners=True, - ) - - return mask[0, 0] >= threshold - - -class TestMaskCropPaste(unittest.TestCase): - def setUp(self): - json_file = MetadataCatalog.get("coco_2017_val_100").json_file - if not PathManager.isfile(json_file): - raise unittest.SkipTest("{} not found".format(json_file)) - with contextlib.redirect_stdout(io.StringIO()): - json_file = PathManager.get_local_path(json_file) - self.coco = COCO(json_file) - - def test_crop_paste_consistency(self): - """ - rasterize_polygons_within_box (used in training) - and - paste_masks_in_image (used in inference) - should be inverse operations to each other. - - This function runs several implementation of the above two operations and prints - the reconstruction error. - """ - - anns = self.coco.loadAnns(self.coco.getAnnIds(iscrowd=False)) # avoid crowd annotations - - selected_anns = anns[:100] - - ious = [] - for ann in tqdm.tqdm(selected_anns): - results = self.process_annotation(ann) - ious.append([k[2] for k in results]) - - ious = np.array(ious) - mean_ious = ious.mean(axis=0) - table = [] - res_dic = defaultdict(dict) - for row, iou in zip(results, mean_ious): - table.append((row[0], row[1], iou)) - res_dic[row[0]][row[1]] = iou - print(tabulate(table, headers=["rasterize", "paste", "iou"], tablefmt="simple")) - # assert that the reconstruction is good: - self.assertTrue(res_dic["polygon"]["aligned"] > 0.94) - self.assertTrue(res_dic["roialign"]["aligned"] > 0.95) - - def process_annotation(self, ann, mask_side_len=28): - # Parse annotation data - img_info = self.coco.loadImgs(ids=[ann["image_id"]])[0] - height, width = img_info["height"], img_info["width"] - gt_polygons = [np.array(p, dtype=np.float64) for p in ann["segmentation"]] - gt_bbox = BoxMode.convert(ann["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS) - gt_bit_mask = polygons_to_bitmask(gt_polygons, height, width) - - # Run rasterize .. - torch_gt_bbox = torch.tensor(gt_bbox).to(dtype=torch.float32).reshape(-1, 4) - box_bitmasks = { - "polygon": PolygonMasks([gt_polygons]).crop_and_resize(torch_gt_bbox, mask_side_len)[0], - "gridsample": rasterize_polygons_with_grid_sample(gt_bit_mask, gt_bbox, mask_side_len), - "roialign": BitMasks(torch.from_numpy(gt_bit_mask[None, :, :])).crop_and_resize( - torch_gt_bbox, mask_side_len - )[0], - } - - # Run paste .. - results = defaultdict(dict) - for k, box_bitmask in box_bitmasks.items(): - padded_bitmask, scale = pad_masks(box_bitmask[None, :, :], 1) - scaled_boxes = scale_boxes(torch_gt_bbox, scale) - - r = results[k] - r["old"] = paste_mask_in_image_old( - padded_bitmask[0], scaled_boxes[0], height, width, threshold=0.5 - ) - r["aligned"] = paste_masks_in_image( - box_bitmask[None, :, :], Boxes(torch_gt_bbox), (height, width) - )[0] - - table = [] - for rasterize_method, r in results.items(): - for paste_method, mask in r.items(): - mask = np.asarray(mask) - iou = iou_between_full_image_bit_masks(gt_bit_mask.astype("uint8"), mask) - table.append((rasterize_method, paste_method, iou)) - return table - - def test_polygon_area(self): - # Draw polygon boxes - for d in [5.0, 10.0, 1000.0]: - polygon = PolygonMasks([[[0, 0, 0, d, d, d, d, 0]]]) - area = polygon.area()[0] - target = d**2 - self.assertEqual(area, target) - - # Draw polygon triangles - for d in [5.0, 10.0, 1000.0]: - polygon = PolygonMasks([[[0, 0, 0, d, d, d]]]) - area = polygon.area()[0] - target = d**2 / 2 - self.assertEqual(area, target) - - def test_paste_mask_scriptable(self): - scripted_f = torch.jit.script(paste_masks_in_image) - N = 10 - masks = torch.rand(N, 28, 28) - boxes = Boxes(random_boxes(N, 100)).tensor - image_shape = (150, 150) - - out = paste_masks_in_image(masks, boxes, image_shape) - scripted_out = scripted_f(masks, boxes, image_shape) - self.assertTrue(torch.equal(out, scripted_out)) - - -def benchmark_paste(): - S = 800 - H, W = image_shape = (S, S) - N = 64 - torch.manual_seed(42) - masks = torch.rand(N, 28, 28) - - center = torch.rand(N, 2) * 600 + 100 - wh = torch.clamp(torch.randn(N, 2) * 40 + 200, min=50) - x0y0 = torch.clamp(center - wh * 0.5, min=0.0) - x1y1 = torch.clamp(center + wh * 0.5, max=S) - boxes = Boxes(torch.cat([x0y0, x1y1], axis=1)) - - def func(device, n=3): - m = masks.to(device=device) - b = boxes.to(device=device) - - def bench(): - for _ in range(n): - paste_masks_in_image(m, b, image_shape) - if device.type == "cuda": - torch.cuda.synchronize() - - return bench - - specs = [{"device": torch.device("cpu"), "n": 3}] - if torch.cuda.is_available(): - specs.append({"device": torch.device("cuda"), "n": 3}) - - benchmark(func, "paste_masks", specs, num_iters=10, warmup_iters=2) - - -if __name__ == "__main__": - benchmark_paste() - unittest.main() diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/_pick.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/_pick.py deleted file mode 100644 index 4f6d8b2d79406012c5f8bae9c289ed5bf4d179cc..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/_pick.py +++ /dev/null @@ -1,17 +0,0 @@ -from typing import Optional - - -def pick_bool(*values: Optional[bool]) -> bool: - """Pick the first non-none bool or return the last value. - - Args: - *values (bool): Any number of boolean or None values. - - Returns: - bool: First non-none boolean. - """ - assert values, "1 or more values required" - for value in values: - if value is not None: - return value - return bool(value) diff --git a/spaces/pngwn/nextjs/out/_next/static/gJ8Ex4W-Ww6AME3xjurC5/_middlewareManifest.js b/spaces/pngwn/nextjs/out/_next/static/gJ8Ex4W-Ww6AME3xjurC5/_middlewareManifest.js deleted file mode 100644 index a17fc8bf955fbb4d5a68504afde0199bb43cf0d6..0000000000000000000000000000000000000000 --- a/spaces/pngwn/nextjs/out/_next/static/gJ8Ex4W-Ww6AME3xjurC5/_middlewareManifest.js +++ /dev/null @@ -1 +0,0 @@ -self.__MIDDLEWARE_MANIFEST=[];self.__MIDDLEWARE_MANIFEST_CB&&self.__MIDDLEWARE_MANIFEST_CB() \ No newline at end of file diff --git a/spaces/portal/Control-Nets/ai.html b/spaces/portal/Control-Nets/ai.html deleted file mode 100644 index eadd35764b9a055bf27e80de6837c9dc6316c6b9..0000000000000000000000000000000000000000 --- a/spaces/portal/Control-Nets/ai.html +++ /dev/null @@ -1,19 +0,0 @@ - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/spaces/pourmand1376/Seamlessm4t_diarization_VAD/style.css b/spaces/pourmand1376/Seamlessm4t_diarization_VAD/style.css deleted file mode 100644 index c4739b4ea5fc35e774a049e3dacc443f7f0eac19..0000000000000000000000000000000000000000 --- a/spaces/pourmand1376/Seamlessm4t_diarization_VAD/style.css +++ /dev/null @@ -1,3 +0,0 @@ -h1 { - text-align: center; -} diff --git a/spaces/prajdabre/CreoleM2M/app.py b/spaces/prajdabre/CreoleM2M/app.py deleted file mode 100644 index ca65ac851a46bf9ccfd0599a278724c5c9f9b129..0000000000000000000000000000000000000000 --- a/spaces/prajdabre/CreoleM2M/app.py +++ /dev/null @@ -1,87 +0,0 @@ -import gradio as gr -from transformers import AutoModelForSeq2SeqLM -from transformers import AlbertTokenizer - - -tokenizer = AlbertTokenizer.from_pretrained( - "prajdabre/CreoleM2M", do_lower_case=False, use_fast=False, keep_accents=True) -model = AutoModelForSeq2SeqLM.from_pretrained( - "prajdabre/CreoleM2M").eval() -bos_id = tokenizer._convert_token_to_id_with_added_voc("") -eos_id = tokenizer._convert_token_to_id_with_added_voc("") -pad_id = tokenizer._convert_token_to_id_with_added_voc("") - -CREOLE = {"Hawaiian Pidgin": "hwc", "Saint Lucian Creole": "acf", "Belizean Creole": "bzj", "Chavacano Creole": "cbk", "Seychellois Creole": "crs", "Sranan Tongo": "srn", "Aukan": "djk", "Gullah": "gul", "San Andrés–Providencia Creole": "icr", "Jamaican Creole": "jam", "Mauritian Creole": "mfe", "Papiamento": "pap", "Pijin": "pis", "Tok Pisin": "tpi", "Torres Strait Creole": "tcs", "Australian Kriol": "rop", "Sango": "sag", "Saramaccan": "srm", "Bislama": "bis", "Nigerian Pidgin": "pcm", "Sierra Leonean Creole": "kri", "Haitian Creole": "hat", "Kupang Malay": "mkn", "Tetun Dili": "tdt", "Malay Baba": "mbf", "Kituba": "ktu", "English": "eng"} - - -def generate(input, slang, tlang): - slang = CREOLE[slang] - tlang = CREOLE[tlang] - inp = tokenizer(input.strip() + "
    <2" + slang + ">", - add_special_tokens=False, return_tensors="pt", padding=True).input_ids - if (slang != "eng" and tlang == "eng") or (slang == "eng" and tlang != "eng") or (slang == tlang): - model_output = model.generate(inp, use_cache=True, num_beams=1, max_length=int(2*len(inp[0])), min_length=1, early_stopping=True, pad_token_id=pad_id, - bos_token_id=bos_id, eos_token_id=eos_id, decoder_start_token_id=tokenizer._convert_token_to_id_with_added_voc("<2"+tlang+">")) - decoded_output = tokenizer.decode( - model_output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False) - elif slang != tlang: - model_output = model.generate(inp, use_cache=True, num_beams=1, max_length=int(2*len(inp[0])), min_length=1, early_stopping=True, pad_token_id=pad_id, - bos_token_id=bos_id, eos_token_id=eos_id, decoder_start_token_id=tokenizer._convert_token_to_id_with_added_voc("<2eng>")) - decoded_output = tokenizer.decode( - model_output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False) - inp = tokenizer(decoded_output + "
    <2eng>", - add_special_tokens=False, return_tensors="pt", padding=True).input_ids - model_output = model.generate(inp, use_cache=True, num_beams=1, max_length=int(2*len(inp[0])), min_length=1, early_stopping=True, pad_token_id=pad_id, - bos_token_id=bos_id, eos_token_id=eos_id, decoder_start_token_id=tokenizer._convert_token_to_id_with_added_voc("<2"+tlang+">")) - decoded_output = tokenizer.decode( - model_output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False) - - return decoded_output - - -languages = list(CREOLE.keys()) - -src_language_drop_down = gr.inputs.Dropdown( - languages, type="value", default="Hawaiian Pidgin", label="Select source language") -tgt_language_drop_down = gr.inputs.Dropdown( - languages, type="value", default="English", label="Select target language") -text = gr.inputs.Textbox(lines=1, placeholder="Enter text here...", - default="", label="Enter text in the source language") -text_ouptut = gr.outputs.Textbox( - type="text", label="View translation in the target language") - -supported_lang = ', '.join(languages) - -examples = [ -['Mé lè sé sòlda-a mawéy pou yo té bat li , Pòl di ofisyé-a ki doubout la-a , “ Ès lwa-a di ou sa bat on jan Ronm si ou pòkò menm fè lodyans pou sa ? ”', "Saint Lucian Creole", "English"], -['Be taem oli fasemgud hem blong wipim hem , Pol i talem long kapten blong olgeta , we i stap stanap long ples ya se , “ ! E ! Mi mi sitisen blong Rom ya . Yufala i no jajem mi yet . ! Olsem wanem yufala i wantem wipim mi ? ”', "Bislama", "English"], -['Wail di soalja dehn mi-di tai op Paal fi beet ahn , Paal aks wan a di aafisa dehn weh mi-di stan op kloas tu ahn , “ Tel mi , ih leegal fi beet wahn Roaman sitizn bifoa unu chrai ahn da koat ? ”', "Belizean Creole", "English"], -['Mientras ta amarra sila con Pablo para latiga , ya habla le con el capitan quien talla parao , “ Tiene ba uste el derecho para latiga con un ciudadano Romano que nuay pa pasa investigacion de algun crimen ? ”', "Chavacano Creole", "English"], -['Kan zot tin anmar li pour li ganny fwete , Pol ti dir avek sa zofisye ki ti la , “ Eski ou annan drwa fwet en sitwayen Romen ki pan ganny zize ? ”', "Seychellois Creole", "English"], -['Den tei en poti fu leli buba . Ma a piki a ofisii di mu meke den du dati taki : “ U tei mi enke foluku fu Loma Foto fu wipi ondoosuku ! Ma kownu anda weiti taki : Na lanti fu kuutu mu ondoosuku foluku fu Loma Foto . A sowtu wipi ya a ganda mindii noiti mu pasa . ’ ”', "Aukan", "English"], -['Bot wen dey tie Paul op an scretch um out fa beat um , Paul taak ta de offisa wa beena stanop dey . Paul aks um say , “ De law ain tell oona dat oona kin beat a Roman citizen wen nobody ain eben jedge um , needa find out dat e done sompin bad , ainty dough ? ”', "Gullah", "English"], -['Wen dey wen stretch him out fo whip him real hard , Paul wen tell da captain dat stay dea , “ Dis okay in da rules fo da Rome peopo ? fo you fo whip one guy dat get da same rights jalike da Rome peopo ? even one guy dat neva do notting wrong ? ”', "Hawaiian Pidgin", "English"], -['Wail di suoldya dehn wende tai op Paul fi biit im , Paul aks wan a di aafisa weh wende stan op gens im , “ Tel mi , sah , ih liigal fi biit wan Roman sitizn bifuor unu trai im dah kuot ? ”', "San Andrés–Providencia Creole", "English"], -['Afta dem tai im op an chrech im out fi biit im , Paal aks di ed fi onjrid suoja we did tan op de , “ Di Laa gi yu no rait fi biit mi , wan man we kom fram Ruom , wen yu no iivn kyari mi go a kuot an se mi gilti fi notn ? ”', "Jamaican Creole", "English"], -['Bɔt wɛn dɛn want bit am , dɛn tay am ; na in Pɔl aks di soja man dɛn edman we bin tinap de se , “ Di lɔ tɛl una se una kin bit pɔsin we na Roman wɛn una nɔ jɔj am yet ? ”', "Sierra Leonean Creole", "English"], -['Me letan zot fini atas li pou kapav fwet li , Pol dir ofisie ki ti la , “ Zot ena drwa fwet enn sitwayin Romin san mem ki zot ziz li ? ”', "Mauritian Creole", "English"], -['Ma ora nan a rèk su kurpa pa suté ku zuip , Pablo a bisa e ofisial di ehérsito pará einan : “ Boso tin mag di suta un hende ku ta siudadano romano sin ku e ta kondená ? ”', "Papiamento", "English"], -['Wen dem don put am for groun mak dem start to flog am , Pol kon ask di soja wey stand near am , " E dey rite mak una flog pesin wey bi Roman citizin , wen dem neva joj en kase ? "', "Nigerian Pidgin", "English"], -['Bat taem olketa taengem hand bilong hem long post for whipim hem , Paul sei olsem long bigman bilong army wea standap long there : “ Hao , hem stret for iufala whipim wanfala man bilong Rome wea iufala no kotem hem yet ? ”', "Pijin", "English"], -['en wen deibin taiyimap Pol blanga beldim im , imbin tok langa det boswan solja hubin jandap deya wansaid langa im . Imbin tok , “ Yumob nomo lau beldim mi , dumaji mi garram det rait seimwei laik ol yumob Roman pipul , en ai nomo bin abum kotkeis yet . ”', "Australian Kriol", "English"], -['Me tongana ala leke lo ti tene a pika lo na zaza , Paul atene na turugu ti kota kamba so ayeke luti na ndo so : “ Ndia amû lege na ALA ti pika na zaza mbeni koli so ayeke Romain na so a dë ngbanga na li ti lo pëpe ? ”', "Sango", "English"], -['Hën de tjëën go seeka tai fu de hupi . Hën Paulosu hakisi di kabiteni u sodati taanputaanpu dë taa : “ Unfa di wëti dë ? Un sa hupi wan goon mii u Loomë ufö un kuutu soni fëën ö ? ”', "Saramaccan", "English"], -['Ma di den poti Paulus didon langalanga fu wipi en , dan a taigi a legre-ofsiri di ben e tanapu drape : „ A fiti taki unu e wipi wan Romesma sondro fu krutu en ? ”', "Sranan Tongo", "English"], -['Bat wen dempla i bin mekpas Pol so dempla ken ploke em , Pol i bin spik po da sekan amiopisa uda bin stanap klostu wea em . Pol i bin spik , ‘ Ei yu ! Yu lau po ploke man uda gad rait wase man prom Rom , bipo yupla teke em po kot a ? ’', "Torres Strait Creole", "English"], -['Tasol taim ol i apim 2-pela han bilong en na pasim bilong wipim em , Pol i tokim ofisa bilong ami i sanap klostu olsem : “ I stret yupela i wipim wanpela man Rom taim em i no bin sanap yet long kot ? ”', "Tok Pisin", "English"], -['Men , lè yo fin mare Pòl pou yo bat li , li di ofisye ki te la a : “ Èske NOU gen dwa bat yon sitwayen women ki pa kondane ? ”', "Haitian Creole", "English"], -['Waktu dong ika sang Paulus ko mau firuk sang dia , ju dia bale tanya sang itu tantara , bilang , “ Iko pamarenta Roma pung atoran , mana yang batúl ? Kalo satu orang ada pung hak warga Roma , ais dia dapa parkara , bosong musti bekin karmana sang dia ? Bosong papoko lebe dolo sang dia , ko , bosong pareksa lebe dolo ? ”', "Kupang Malay", "English"], -['Maibé kuandu sira kesi tiha nia atu baku nia ho xikote , Paulo dehan ba kapitaun tropa nian neʼebé hamriik besik : “ Tuir lei , imi bele baku ema Roma ida maski seidauk tesi lia ba nia ka lae ? ”', "Tetun Dili", "English"], -['Bila dia-orang sudah ikatkan dia dngan tali kulit , Paulus kata sama itu hulubalang yang berdiri dkat situ , " Kalau satu anak Rom blum kna hukum , ada-kah patut angkau ssahkan dia ? "', "Malay Baba", "English"], -['Bu bau imene kukanga yandi sambu na kubula yandi fimbe , Paulu tubaka kwa nkuluntu ya telemaka kuna : Nsiku pesa nzila kubula muntu ya Loma fimbu ya imene kufunduswa ve ?', "Kituba", "English"], -['But when they had stretched him out for the whipping , Paul said to the army officer standing there : “ Is it lawful for you to scourge a Roman who has not been condemned ? ”', "English", "Hawaiian Pidgin"] -] - -iface = gr.Interface(fn=generate, inputs=[text, src_language_drop_down, tgt_language_drop_down], outputs=text_ouptut, title='CreoleM2M System', - description='A system to translate to, from and between Creoles (and English). Currently the model supports ' + supported_lang, examples=examples) # -iface.launch(enable_queue=True) \ No newline at end of file diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/PIL/ImageDraw2.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/PIL/ImageDraw2.py deleted file mode 100644 index 7ce0224a67c7197a763c61f3739665cf19f23b60..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/PIL/ImageDraw2.py +++ /dev/null @@ -1,193 +0,0 @@ -# -# The Python Imaging Library -# $Id$ -# -# WCK-style drawing interface operations -# -# History: -# 2003-12-07 fl created -# 2005-05-15 fl updated; added to PIL as ImageDraw2 -# 2005-05-15 fl added text support -# 2005-05-20 fl added arc/chord/pieslice support -# -# Copyright (c) 2003-2005 by Secret Labs AB -# Copyright (c) 2003-2005 by Fredrik Lundh -# -# See the README file for information on usage and redistribution. -# - - -""" -(Experimental) WCK-style drawing interface operations - -.. seealso:: :py:mod:`PIL.ImageDraw` -""" - - -from . import Image, ImageColor, ImageDraw, ImageFont, ImagePath - - -class Pen: - """Stores an outline color and width.""" - - def __init__(self, color, width=1, opacity=255): - self.color = ImageColor.getrgb(color) - self.width = width - - -class Brush: - """Stores a fill color""" - - def __init__(self, color, opacity=255): - self.color = ImageColor.getrgb(color) - - -class Font: - """Stores a TrueType font and color""" - - def __init__(self, color, file, size=12): - # FIXME: add support for bitmap fonts - self.color = ImageColor.getrgb(color) - self.font = ImageFont.truetype(file, size) - - -class Draw: - """ - (Experimental) WCK-style drawing interface - """ - - def __init__(self, image, size=None, color=None): - if not hasattr(image, "im"): - image = Image.new(image, size, color) - self.draw = ImageDraw.Draw(image) - self.image = image - self.transform = None - - def flush(self): - return self.image - - def render(self, op, xy, pen, brush=None): - # handle color arguments - outline = fill = None - width = 1 - if isinstance(pen, Pen): - outline = pen.color - width = pen.width - elif isinstance(brush, Pen): - outline = brush.color - width = brush.width - if isinstance(brush, Brush): - fill = brush.color - elif isinstance(pen, Brush): - fill = pen.color - # handle transformation - if self.transform: - xy = ImagePath.Path(xy) - xy.transform(self.transform) - # render the item - if op == "line": - self.draw.line(xy, fill=outline, width=width) - else: - getattr(self.draw, op)(xy, fill=fill, outline=outline) - - def settransform(self, offset): - """Sets a transformation offset.""" - (xoffset, yoffset) = offset - self.transform = (1, 0, xoffset, 0, 1, yoffset) - - def arc(self, xy, start, end, *options): - """ - Draws an arc (a portion of a circle outline) between the start and end - angles, inside the given bounding box. - - .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.arc` - """ - self.render("arc", xy, start, end, *options) - - def chord(self, xy, start, end, *options): - """ - Same as :py:meth:`~PIL.ImageDraw2.Draw.arc`, but connects the end points - with a straight line. - - .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.chord` - """ - self.render("chord", xy, start, end, *options) - - def ellipse(self, xy, *options): - """ - Draws an ellipse inside the given bounding box. - - .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.ellipse` - """ - self.render("ellipse", xy, *options) - - def line(self, xy, *options): - """ - Draws a line between the coordinates in the ``xy`` list. - - .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.line` - """ - self.render("line", xy, *options) - - def pieslice(self, xy, start, end, *options): - """ - Same as arc, but also draws straight lines between the end points and the - center of the bounding box. - - .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.pieslice` - """ - self.render("pieslice", xy, start, end, *options) - - def polygon(self, xy, *options): - """ - Draws a polygon. - - The polygon outline consists of straight lines between the given - coordinates, plus a straight line between the last and the first - coordinate. - - - .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.polygon` - """ - self.render("polygon", xy, *options) - - def rectangle(self, xy, *options): - """ - Draws a rectangle. - - .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.rectangle` - """ - self.render("rectangle", xy, *options) - - def text(self, xy, text, font): - """ - Draws the string at the given position. - - .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.text` - """ - if self.transform: - xy = ImagePath.Path(xy) - xy.transform(self.transform) - self.draw.text(xy, text, font=font.font, fill=font.color) - - def textbbox(self, xy, text, font): - """ - Returns bounding box (in pixels) of given text. - - :return: ``(left, top, right, bottom)`` bounding box - - .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.textbbox` - """ - if self.transform: - xy = ImagePath.Path(xy) - xy.transform(self.transform) - return self.draw.textbbox(xy, text, font=font.font) - - def textlength(self, text, font): - """ - Returns length (in pixels) of given text. - This is the amount by which following text should be offset. - - .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.textlength` - """ - return self.draw.textlength(text, font=font.font) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/PIL/PixarImagePlugin.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/PIL/PixarImagePlugin.py deleted file mode 100644 index 850272311de8af6bfbb8b8388560b6987ab0d481..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/PIL/PixarImagePlugin.py +++ /dev/null @@ -1,69 +0,0 @@ -# -# The Python Imaging Library. -# $Id$ -# -# PIXAR raster support for PIL -# -# history: -# 97-01-29 fl Created -# -# notes: -# This is incomplete; it is based on a few samples created with -# Photoshop 2.5 and 3.0, and a summary description provided by -# Greg Coats . Hopefully, "L" and -# "RGBA" support will be added in future versions. -# -# Copyright (c) Secret Labs AB 1997. -# Copyright (c) Fredrik Lundh 1997. -# -# See the README file for information on usage and redistribution. -# - -from . import Image, ImageFile -from ._binary import i16le as i16 - -# -# helpers - - -def _accept(prefix): - return prefix[:4] == b"\200\350\000\000" - - -## -# Image plugin for PIXAR raster images. - - -class PixarImageFile(ImageFile.ImageFile): - format = "PIXAR" - format_description = "PIXAR raster image" - - def _open(self): - # assuming a 4-byte magic label - s = self.fp.read(4) - if not _accept(s): - msg = "not a PIXAR file" - raise SyntaxError(msg) - - # read rest of header - s = s + self.fp.read(508) - - self._size = i16(s, 418), i16(s, 416) - - # get channel/depth descriptions - mode = i16(s, 424), i16(s, 426) - - if mode == (14, 2): - self._mode = "RGB" - # FIXME: to be continued... - - # create tile descriptor (assuming "dumped") - self.tile = [("raw", (0, 0) + self.size, 1024, (self.mode, 0, 1))] - - -# -# -------------------------------------------------------------------- - -Image.register_open(PixarImageFile.format, PixarImageFile, _accept) - -Image.register_extension(PixarImageFile.format, ".pxr") diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/PIL/__main__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/PIL/__main__.py deleted file mode 100644 index a05323f93b6850c2f86aedb3b1a5dee16358027f..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/PIL/__main__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .features import pilinfo - -pilinfo() diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/aiohttp/multipart.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/aiohttp/multipart.py deleted file mode 100644 index 73801f459aa274ca6aae7bf28a2c5bb3bf075d11..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/aiohttp/multipart.py +++ /dev/null @@ -1,961 +0,0 @@ -import base64 -import binascii -import json -import re -import uuid -import warnings -import zlib -from collections import deque -from types import TracebackType -from typing import ( - TYPE_CHECKING, - Any, - AsyncIterator, - Deque, - Dict, - Iterator, - List, - Mapping, - Optional, - Sequence, - Tuple, - Type, - Union, - cast, -) -from urllib.parse import parse_qsl, unquote, urlencode - -from multidict import CIMultiDict, CIMultiDictProxy, MultiMapping - -from .hdrs import ( - CONTENT_DISPOSITION, - CONTENT_ENCODING, - CONTENT_LENGTH, - CONTENT_TRANSFER_ENCODING, - CONTENT_TYPE, -) -from .helpers import CHAR, TOKEN, parse_mimetype, reify -from .http import HeadersParser -from .payload import ( - JsonPayload, - LookupError, - Order, - Payload, - StringPayload, - get_payload, - payload_type, -) -from .streams import StreamReader - -__all__ = ( - "MultipartReader", - "MultipartWriter", - "BodyPartReader", - "BadContentDispositionHeader", - "BadContentDispositionParam", - "parse_content_disposition", - "content_disposition_filename", -) - - -if TYPE_CHECKING: # pragma: no cover - from .client_reqrep import ClientResponse - - -class BadContentDispositionHeader(RuntimeWarning): - pass - - -class BadContentDispositionParam(RuntimeWarning): - pass - - -def parse_content_disposition( - header: Optional[str], -) -> Tuple[Optional[str], Dict[str, str]]: - def is_token(string: str) -> bool: - return bool(string) and TOKEN >= set(string) - - def is_quoted(string: str) -> bool: - return string[0] == string[-1] == '"' - - def is_rfc5987(string: str) -> bool: - return is_token(string) and string.count("'") == 2 - - def is_extended_param(string: str) -> bool: - return string.endswith("*") - - def is_continuous_param(string: str) -> bool: - pos = string.find("*") + 1 - if not pos: - return False - substring = string[pos:-1] if string.endswith("*") else string[pos:] - return substring.isdigit() - - def unescape(text: str, *, chars: str = "".join(map(re.escape, CHAR))) -> str: - return re.sub(f"\\\\([{chars}])", "\\1", text) - - if not header: - return None, {} - - disptype, *parts = header.split(";") - if not is_token(disptype): - warnings.warn(BadContentDispositionHeader(header)) - return None, {} - - params: Dict[str, str] = {} - while parts: - item = parts.pop(0) - - if "=" not in item: - warnings.warn(BadContentDispositionHeader(header)) - return None, {} - - key, value = item.split("=", 1) - key = key.lower().strip() - value = value.lstrip() - - if key in params: - warnings.warn(BadContentDispositionHeader(header)) - return None, {} - - if not is_token(key): - warnings.warn(BadContentDispositionParam(item)) - continue - - elif is_continuous_param(key): - if is_quoted(value): - value = unescape(value[1:-1]) - elif not is_token(value): - warnings.warn(BadContentDispositionParam(item)) - continue - - elif is_extended_param(key): - if is_rfc5987(value): - encoding, _, value = value.split("'", 2) - encoding = encoding or "utf-8" - else: - warnings.warn(BadContentDispositionParam(item)) - continue - - try: - value = unquote(value, encoding, "strict") - except UnicodeDecodeError: # pragma: nocover - warnings.warn(BadContentDispositionParam(item)) - continue - - else: - failed = True - if is_quoted(value): - failed = False - value = unescape(value[1:-1].lstrip("\\/")) - elif is_token(value): - failed = False - elif parts: - # maybe just ; in filename, in any case this is just - # one case fix, for proper fix we need to redesign parser - _value = f"{value};{parts[0]}" - if is_quoted(_value): - parts.pop(0) - value = unescape(_value[1:-1].lstrip("\\/")) - failed = False - - if failed: - warnings.warn(BadContentDispositionHeader(header)) - return None, {} - - params[key] = value - - return disptype.lower(), params - - -def content_disposition_filename( - params: Mapping[str, str], name: str = "filename" -) -> Optional[str]: - name_suf = "%s*" % name - if not params: - return None - elif name_suf in params: - return params[name_suf] - elif name in params: - return params[name] - else: - parts = [] - fnparams = sorted( - (key, value) for key, value in params.items() if key.startswith(name_suf) - ) - for num, (key, value) in enumerate(fnparams): - _, tail = key.split("*", 1) - if tail.endswith("*"): - tail = tail[:-1] - if tail == str(num): - parts.append(value) - else: - break - if not parts: - return None - value = "".join(parts) - if "'" in value: - encoding, _, value = value.split("'", 2) - encoding = encoding or "utf-8" - return unquote(value, encoding, "strict") - return value - - -class MultipartResponseWrapper: - """Wrapper around the MultipartReader. - - It takes care about - underlying connection and close it when it needs in. - """ - - def __init__( - self, - resp: "ClientResponse", - stream: "MultipartReader", - ) -> None: - self.resp = resp - self.stream = stream - - def __aiter__(self) -> "MultipartResponseWrapper": - return self - - async def __anext__( - self, - ) -> Union["MultipartReader", "BodyPartReader"]: - part = await self.next() - if part is None: - raise StopAsyncIteration - return part - - def at_eof(self) -> bool: - """Returns True when all response data had been read.""" - return self.resp.content.at_eof() - - async def next( - self, - ) -> Optional[Union["MultipartReader", "BodyPartReader"]]: - """Emits next multipart reader object.""" - item = await self.stream.next() - if self.stream.at_eof(): - await self.release() - return item - - async def release(self) -> None: - """Release the connection gracefully. - - All remaining content is read to the void. - """ - await self.resp.release() - - -class BodyPartReader: - """Multipart reader for single body part.""" - - chunk_size = 8192 - - def __init__( - self, boundary: bytes, headers: "CIMultiDictProxy[str]", content: StreamReader - ) -> None: - self.headers = headers - self._boundary = boundary - self._content = content - self._at_eof = False - length = self.headers.get(CONTENT_LENGTH, None) - self._length = int(length) if length is not None else None - self._read_bytes = 0 - # TODO: typeing.Deque is not supported by Python 3.5 - self._unread: Deque[bytes] = deque() - self._prev_chunk: Optional[bytes] = None - self._content_eof = 0 - self._cache: Dict[str, Any] = {} - - def __aiter__(self) -> AsyncIterator["BodyPartReader"]: - return self # type: ignore[return-value] - - async def __anext__(self) -> bytes: - part = await self.next() - if part is None: - raise StopAsyncIteration - return part - - async def next(self) -> Optional[bytes]: - item = await self.read() - if not item: - return None - return item - - async def read(self, *, decode: bool = False) -> bytes: - """Reads body part data. - - decode: Decodes data following by encoding - method from Content-Encoding header. If it missed - data remains untouched - """ - if self._at_eof: - return b"" - data = bytearray() - while not self._at_eof: - data.extend(await self.read_chunk(self.chunk_size)) - if decode: - return self.decode(data) - return data - - async def read_chunk(self, size: int = chunk_size) -> bytes: - """Reads body part content chunk of the specified size. - - size: chunk size - """ - if self._at_eof: - return b"" - if self._length: - chunk = await self._read_chunk_from_length(size) - else: - chunk = await self._read_chunk_from_stream(size) - - self._read_bytes += len(chunk) - if self._read_bytes == self._length: - self._at_eof = True - if self._at_eof: - clrf = await self._content.readline() - assert ( - b"\r\n" == clrf - ), "reader did not read all the data or it is malformed" - return chunk - - async def _read_chunk_from_length(self, size: int) -> bytes: - # Reads body part content chunk of the specified size. - # The body part must has Content-Length header with proper value. - assert self._length is not None, "Content-Length required for chunked read" - chunk_size = min(size, self._length - self._read_bytes) - chunk = await self._content.read(chunk_size) - return chunk - - async def _read_chunk_from_stream(self, size: int) -> bytes: - # Reads content chunk of body part with unknown length. - # The Content-Length header for body part is not necessary. - assert ( - size >= len(self._boundary) + 2 - ), "Chunk size must be greater or equal than boundary length + 2" - first_chunk = self._prev_chunk is None - if first_chunk: - self._prev_chunk = await self._content.read(size) - - chunk = await self._content.read(size) - self._content_eof += int(self._content.at_eof()) - assert self._content_eof < 3, "Reading after EOF" - assert self._prev_chunk is not None - window = self._prev_chunk + chunk - sub = b"\r\n" + self._boundary - if first_chunk: - idx = window.find(sub) - else: - idx = window.find(sub, max(0, len(self._prev_chunk) - len(sub))) - if idx >= 0: - # pushing boundary back to content - with warnings.catch_warnings(): - warnings.filterwarnings("ignore", category=DeprecationWarning) - self._content.unread_data(window[idx:]) - if size > idx: - self._prev_chunk = self._prev_chunk[:idx] - chunk = window[len(self._prev_chunk) : idx] - if not chunk: - self._at_eof = True - result = self._prev_chunk - self._prev_chunk = chunk - return result - - async def readline(self) -> bytes: - """Reads body part by line by line.""" - if self._at_eof: - return b"" - - if self._unread: - line = self._unread.popleft() - else: - line = await self._content.readline() - - if line.startswith(self._boundary): - # the very last boundary may not come with \r\n, - # so set single rules for everyone - sline = line.rstrip(b"\r\n") - boundary = self._boundary - last_boundary = self._boundary + b"--" - # ensure that we read exactly the boundary, not something alike - if sline == boundary or sline == last_boundary: - self._at_eof = True - self._unread.append(line) - return b"" - else: - next_line = await self._content.readline() - if next_line.startswith(self._boundary): - line = line[:-2] # strip CRLF but only once - self._unread.append(next_line) - - return line - - async def release(self) -> None: - """Like read(), but reads all the data to the void.""" - if self._at_eof: - return - while not self._at_eof: - await self.read_chunk(self.chunk_size) - - async def text(self, *, encoding: Optional[str] = None) -> str: - """Like read(), but assumes that body part contains text data.""" - data = await self.read(decode=True) - # see https://www.w3.org/TR/html5/forms.html#multipart/form-data-encoding-algorithm # NOQA - # and https://dvcs.w3.org/hg/xhr/raw-file/tip/Overview.html#dom-xmlhttprequest-send # NOQA - encoding = encoding or self.get_charset(default="utf-8") - return data.decode(encoding) - - async def json(self, *, encoding: Optional[str] = None) -> Optional[Dict[str, Any]]: - """Like read(), but assumes that body parts contains JSON data.""" - data = await self.read(decode=True) - if not data: - return None - encoding = encoding or self.get_charset(default="utf-8") - return cast(Dict[str, Any], json.loads(data.decode(encoding))) - - async def form(self, *, encoding: Optional[str] = None) -> List[Tuple[str, str]]: - """Like read(), but assumes that body parts contain form urlencoded data.""" - data = await self.read(decode=True) - if not data: - return [] - if encoding is not None: - real_encoding = encoding - else: - real_encoding = self.get_charset(default="utf-8") - return parse_qsl( - data.rstrip().decode(real_encoding), - keep_blank_values=True, - encoding=real_encoding, - ) - - def at_eof(self) -> bool: - """Returns True if the boundary was reached or False otherwise.""" - return self._at_eof - - def decode(self, data: bytes) -> bytes: - """Decodes data. - - Decoding is done according the specified Content-Encoding - or Content-Transfer-Encoding headers value. - """ - if CONTENT_TRANSFER_ENCODING in self.headers: - data = self._decode_content_transfer(data) - if CONTENT_ENCODING in self.headers: - return self._decode_content(data) - return data - - def _decode_content(self, data: bytes) -> bytes: - encoding = self.headers.get(CONTENT_ENCODING, "").lower() - - if encoding == "deflate": - return zlib.decompress(data, -zlib.MAX_WBITS) - elif encoding == "gzip": - return zlib.decompress(data, 16 + zlib.MAX_WBITS) - elif encoding == "identity": - return data - else: - raise RuntimeError(f"unknown content encoding: {encoding}") - - def _decode_content_transfer(self, data: bytes) -> bytes: - encoding = self.headers.get(CONTENT_TRANSFER_ENCODING, "").lower() - - if encoding == "base64": - return base64.b64decode(data) - elif encoding == "quoted-printable": - return binascii.a2b_qp(data) - elif encoding in ("binary", "8bit", "7bit"): - return data - else: - raise RuntimeError( - "unknown content transfer encoding: {}" "".format(encoding) - ) - - def get_charset(self, default: str) -> str: - """Returns charset parameter from Content-Type header or default.""" - ctype = self.headers.get(CONTENT_TYPE, "") - mimetype = parse_mimetype(ctype) - return mimetype.parameters.get("charset", default) - - @reify - def name(self) -> Optional[str]: - """Returns name specified in Content-Disposition header. - - If the header is missing or malformed, returns None. - """ - _, params = parse_content_disposition(self.headers.get(CONTENT_DISPOSITION)) - return content_disposition_filename(params, "name") - - @reify - def filename(self) -> Optional[str]: - """Returns filename specified in Content-Disposition header. - - Returns None if the header is missing or malformed. - """ - _, params = parse_content_disposition(self.headers.get(CONTENT_DISPOSITION)) - return content_disposition_filename(params, "filename") - - -@payload_type(BodyPartReader, order=Order.try_first) -class BodyPartReaderPayload(Payload): - def __init__(self, value: BodyPartReader, *args: Any, **kwargs: Any) -> None: - super().__init__(value, *args, **kwargs) - - params: Dict[str, str] = {} - if value.name is not None: - params["name"] = value.name - if value.filename is not None: - params["filename"] = value.filename - - if params: - self.set_content_disposition("attachment", True, **params) - - async def write(self, writer: Any) -> None: - field = self._value - chunk = await field.read_chunk(size=2**16) - while chunk: - await writer.write(field.decode(chunk)) - chunk = await field.read_chunk(size=2**16) - - -class MultipartReader: - """Multipart body reader.""" - - #: Response wrapper, used when multipart readers constructs from response. - response_wrapper_cls = MultipartResponseWrapper - #: Multipart reader class, used to handle multipart/* body parts. - #: None points to type(self) - multipart_reader_cls = None - #: Body part reader class for non multipart/* content types. - part_reader_cls = BodyPartReader - - def __init__(self, headers: Mapping[str, str], content: StreamReader) -> None: - self.headers = headers - self._boundary = ("--" + self._get_boundary()).encode() - self._content = content - self._last_part: Optional[Union["MultipartReader", BodyPartReader]] = None - self._at_eof = False - self._at_bof = True - self._unread: List[bytes] = [] - - def __aiter__( - self, - ) -> AsyncIterator["BodyPartReader"]: - return self # type: ignore[return-value] - - async def __anext__( - self, - ) -> Optional[Union["MultipartReader", BodyPartReader]]: - part = await self.next() - if part is None: - raise StopAsyncIteration - return part - - @classmethod - def from_response( - cls, - response: "ClientResponse", - ) -> MultipartResponseWrapper: - """Constructs reader instance from HTTP response. - - :param response: :class:`~aiohttp.client.ClientResponse` instance - """ - obj = cls.response_wrapper_cls( - response, cls(response.headers, response.content) - ) - return obj - - def at_eof(self) -> bool: - """Returns True if the final boundary was reached, false otherwise.""" - return self._at_eof - - async def next( - self, - ) -> Optional[Union["MultipartReader", BodyPartReader]]: - """Emits the next multipart body part.""" - # So, if we're at BOF, we need to skip till the boundary. - if self._at_eof: - return None - await self._maybe_release_last_part() - if self._at_bof: - await self._read_until_first_boundary() - self._at_bof = False - else: - await self._read_boundary() - if self._at_eof: # we just read the last boundary, nothing to do there - return None - self._last_part = await self.fetch_next_part() - return self._last_part - - async def release(self) -> None: - """Reads all the body parts to the void till the final boundary.""" - while not self._at_eof: - item = await self.next() - if item is None: - break - await item.release() - - async def fetch_next_part( - self, - ) -> Union["MultipartReader", BodyPartReader]: - """Returns the next body part reader.""" - headers = await self._read_headers() - return self._get_part_reader(headers) - - def _get_part_reader( - self, - headers: "CIMultiDictProxy[str]", - ) -> Union["MultipartReader", BodyPartReader]: - """Dispatches the response by the `Content-Type` header. - - Returns a suitable reader instance. - - :param dict headers: Response headers - """ - ctype = headers.get(CONTENT_TYPE, "") - mimetype = parse_mimetype(ctype) - - if mimetype.type == "multipart": - if self.multipart_reader_cls is None: - return type(self)(headers, self._content) - return self.multipart_reader_cls(headers, self._content) - else: - return self.part_reader_cls(self._boundary, headers, self._content) - - def _get_boundary(self) -> str: - mimetype = parse_mimetype(self.headers[CONTENT_TYPE]) - - assert mimetype.type == "multipart", "multipart/* content type expected" - - if "boundary" not in mimetype.parameters: - raise ValueError( - "boundary missed for Content-Type: %s" % self.headers[CONTENT_TYPE] - ) - - boundary = mimetype.parameters["boundary"] - if len(boundary) > 70: - raise ValueError("boundary %r is too long (70 chars max)" % boundary) - - return boundary - - async def _readline(self) -> bytes: - if self._unread: - return self._unread.pop() - return await self._content.readline() - - async def _read_until_first_boundary(self) -> None: - while True: - chunk = await self._readline() - if chunk == b"": - raise ValueError( - "Could not find starting boundary %r" % (self._boundary) - ) - chunk = chunk.rstrip() - if chunk == self._boundary: - return - elif chunk == self._boundary + b"--": - self._at_eof = True - return - - async def _read_boundary(self) -> None: - chunk = (await self._readline()).rstrip() - if chunk == self._boundary: - pass - elif chunk == self._boundary + b"--": - self._at_eof = True - epilogue = await self._readline() - next_line = await self._readline() - - # the epilogue is expected and then either the end of input or the - # parent multipart boundary, if the parent boundary is found then - # it should be marked as unread and handed to the parent for - # processing - if next_line[:2] == b"--": - self._unread.append(next_line) - # otherwise the request is likely missing an epilogue and both - # lines should be passed to the parent for processing - # (this handles the old behavior gracefully) - else: - self._unread.extend([next_line, epilogue]) - else: - raise ValueError(f"Invalid boundary {chunk!r}, expected {self._boundary!r}") - - async def _read_headers(self) -> "CIMultiDictProxy[str]": - lines = [b""] - while True: - chunk = await self._content.readline() - chunk = chunk.strip() - lines.append(chunk) - if not chunk: - break - parser = HeadersParser() - headers, raw_headers = parser.parse_headers(lines) - return headers - - async def _maybe_release_last_part(self) -> None: - """Ensures that the last read body part is read completely.""" - if self._last_part is not None: - if not self._last_part.at_eof(): - await self._last_part.release() - self._unread.extend(self._last_part._unread) - self._last_part = None - - -_Part = Tuple[Payload, str, str] - - -class MultipartWriter(Payload): - """Multipart body writer.""" - - def __init__(self, subtype: str = "mixed", boundary: Optional[str] = None) -> None: - boundary = boundary if boundary is not None else uuid.uuid4().hex - # The underlying Payload API demands a str (utf-8), not bytes, - # so we need to ensure we don't lose anything during conversion. - # As a result, require the boundary to be ASCII only. - # In both situations. - - try: - self._boundary = boundary.encode("ascii") - except UnicodeEncodeError: - raise ValueError("boundary should contain ASCII only chars") from None - ctype = f"multipart/{subtype}; boundary={self._boundary_value}" - - super().__init__(None, content_type=ctype) - - self._parts: List[_Part] = [] - - def __enter__(self) -> "MultipartWriter": - return self - - def __exit__( - self, - exc_type: Optional[Type[BaseException]], - exc_val: Optional[BaseException], - exc_tb: Optional[TracebackType], - ) -> None: - pass - - def __iter__(self) -> Iterator[_Part]: - return iter(self._parts) - - def __len__(self) -> int: - return len(self._parts) - - def __bool__(self) -> bool: - return True - - _valid_tchar_regex = re.compile(rb"\A[!#$%&'*+\-.^_`|~\w]+\Z") - _invalid_qdtext_char_regex = re.compile(rb"[\x00-\x08\x0A-\x1F\x7F]") - - @property - def _boundary_value(self) -> str: - """Wrap boundary parameter value in quotes, if necessary. - - Reads self.boundary and returns a unicode sting. - """ - # Refer to RFCs 7231, 7230, 5234. - # - # parameter = token "=" ( token / quoted-string ) - # token = 1*tchar - # quoted-string = DQUOTE *( qdtext / quoted-pair ) DQUOTE - # qdtext = HTAB / SP / %x21 / %x23-5B / %x5D-7E / obs-text - # obs-text = %x80-FF - # quoted-pair = "\" ( HTAB / SP / VCHAR / obs-text ) - # tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" - # / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~" - # / DIGIT / ALPHA - # ; any VCHAR, except delimiters - # VCHAR = %x21-7E - value = self._boundary - if re.match(self._valid_tchar_regex, value): - return value.decode("ascii") # cannot fail - - if re.search(self._invalid_qdtext_char_regex, value): - raise ValueError("boundary value contains invalid characters") - - # escape %x5C and %x22 - quoted_value_content = value.replace(b"\\", b"\\\\") - quoted_value_content = quoted_value_content.replace(b'"', b'\\"') - - return '"' + quoted_value_content.decode("ascii") + '"' - - @property - def boundary(self) -> str: - return self._boundary.decode("ascii") - - def append(self, obj: Any, headers: Optional[MultiMapping[str]] = None) -> Payload: - if headers is None: - headers = CIMultiDict() - - if isinstance(obj, Payload): - obj.headers.update(headers) - return self.append_payload(obj) - else: - try: - payload = get_payload(obj, headers=headers) - except LookupError: - raise TypeError("Cannot create payload from %r" % obj) - else: - return self.append_payload(payload) - - def append_payload(self, payload: Payload) -> Payload: - """Adds a new body part to multipart writer.""" - # compression - encoding: Optional[str] = payload.headers.get( - CONTENT_ENCODING, - "", - ).lower() - if encoding and encoding not in ("deflate", "gzip", "identity"): - raise RuntimeError(f"unknown content encoding: {encoding}") - if encoding == "identity": - encoding = None - - # te encoding - te_encoding: Optional[str] = payload.headers.get( - CONTENT_TRANSFER_ENCODING, - "", - ).lower() - if te_encoding not in ("", "base64", "quoted-printable", "binary"): - raise RuntimeError( - "unknown content transfer encoding: {}" "".format(te_encoding) - ) - if te_encoding == "binary": - te_encoding = None - - # size - size = payload.size - if size is not None and not (encoding or te_encoding): - payload.headers[CONTENT_LENGTH] = str(size) - - self._parts.append((payload, encoding, te_encoding)) # type: ignore[arg-type] - return payload - - def append_json( - self, obj: Any, headers: Optional[MultiMapping[str]] = None - ) -> Payload: - """Helper to append JSON part.""" - if headers is None: - headers = CIMultiDict() - - return self.append_payload(JsonPayload(obj, headers=headers)) - - def append_form( - self, - obj: Union[Sequence[Tuple[str, str]], Mapping[str, str]], - headers: Optional[MultiMapping[str]] = None, - ) -> Payload: - """Helper to append form urlencoded part.""" - assert isinstance(obj, (Sequence, Mapping)) - - if headers is None: - headers = CIMultiDict() - - if isinstance(obj, Mapping): - obj = list(obj.items()) - data = urlencode(obj, doseq=True) - - return self.append_payload( - StringPayload( - data, headers=headers, content_type="application/x-www-form-urlencoded" - ) - ) - - @property - def size(self) -> Optional[int]: - """Size of the payload.""" - total = 0 - for part, encoding, te_encoding in self._parts: - if encoding or te_encoding or part.size is None: - return None - - total += int( - 2 - + len(self._boundary) - + 2 - + part.size # b'--'+self._boundary+b'\r\n' - + len(part._binary_headers) - + 2 # b'\r\n' - ) - - total += 2 + len(self._boundary) + 4 # b'--'+self._boundary+b'--\r\n' - return total - - async def write(self, writer: Any, close_boundary: bool = True) -> None: - """Write body.""" - for part, encoding, te_encoding in self._parts: - await writer.write(b"--" + self._boundary + b"\r\n") - await writer.write(part._binary_headers) - - if encoding or te_encoding: - w = MultipartPayloadWriter(writer) - if encoding: - w.enable_compression(encoding) - if te_encoding: - w.enable_encoding(te_encoding) - await part.write(w) # type: ignore[arg-type] - await w.write_eof() - else: - await part.write(writer) - - await writer.write(b"\r\n") - - if close_boundary: - await writer.write(b"--" + self._boundary + b"--\r\n") - - -class MultipartPayloadWriter: - def __init__(self, writer: Any) -> None: - self._writer = writer - self._encoding: Optional[str] = None - self._compress: Any = None - self._encoding_buffer: Optional[bytearray] = None - - def enable_encoding(self, encoding: str) -> None: - if encoding == "base64": - self._encoding = encoding - self._encoding_buffer = bytearray() - elif encoding == "quoted-printable": - self._encoding = "quoted-printable" - - def enable_compression( - self, encoding: str = "deflate", strategy: int = zlib.Z_DEFAULT_STRATEGY - ) -> None: - zlib_mode = 16 + zlib.MAX_WBITS if encoding == "gzip" else -zlib.MAX_WBITS - self._compress = zlib.compressobj(wbits=zlib_mode, strategy=strategy) - - async def write_eof(self) -> None: - if self._compress is not None: - chunk = self._compress.flush() - if chunk: - self._compress = None - await self.write(chunk) - - if self._encoding == "base64": - if self._encoding_buffer: - await self._writer.write(base64.b64encode(self._encoding_buffer)) - - async def write(self, chunk: bytes) -> None: - if self._compress is not None: - if chunk: - chunk = self._compress.compress(chunk) - if not chunk: - return - - if self._encoding == "base64": - buf = self._encoding_buffer - assert buf is not None - buf.extend(chunk) - - if buf: - div, mod = divmod(len(buf), 3) - enc_chunk, self._encoding_buffer = (buf[: div * 3], buf[div * 3 :]) - if enc_chunk: - b64chunk = base64.b64encode(enc_chunk) - await self._writer.write(b64chunk) - elif self._encoding == "quoted-printable": - await self._writer.write(binascii.b2a_qp(chunk)) - else: - await self._writer.write(chunk) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fastapi/encoders.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fastapi/encoders.py deleted file mode 100644 index e5017139319b1d53c30a537a0b40c4eba873b9ca..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fastapi/encoders.py +++ /dev/null @@ -1,341 +0,0 @@ -import dataclasses -import datetime -from collections import defaultdict, deque -from decimal import Decimal -from enum import Enum -from ipaddress import ( - IPv4Address, - IPv4Interface, - IPv4Network, - IPv6Address, - IPv6Interface, - IPv6Network, -) -from pathlib import Path, PurePath -from re import Pattern -from types import GeneratorType -from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union -from uuid import UUID - -from fastapi.types import IncEx -from pydantic import BaseModel -from pydantic.color import Color -from pydantic.networks import AnyUrl, NameEmail -from pydantic.types import SecretBytes, SecretStr -from typing_extensions import Annotated, Doc # type: ignore [attr-defined] - -from ._compat import PYDANTIC_V2, Url, _model_dump - - -# Taken from Pydantic v1 as is -def isoformat(o: Union[datetime.date, datetime.time]) -> str: - return o.isoformat() - - -# Taken from Pydantic v1 as is -# TODO: pv2 should this return strings instead? -def decimal_encoder(dec_value: Decimal) -> Union[int, float]: - """ - Encodes a Decimal as int of there's no exponent, otherwise float - - This is useful when we use ConstrainedDecimal to represent Numeric(x,0) - where a integer (but not int typed) is used. Encoding this as a float - results in failed round-tripping between encode and parse. - Our Id type is a prime example of this. - - >>> decimal_encoder(Decimal("1.0")) - 1.0 - - >>> decimal_encoder(Decimal("1")) - 1 - """ - if dec_value.as_tuple().exponent >= 0: # type: ignore[operator] - return int(dec_value) - else: - return float(dec_value) - - -ENCODERS_BY_TYPE: Dict[Type[Any], Callable[[Any], Any]] = { - bytes: lambda o: o.decode(), - Color: str, - datetime.date: isoformat, - datetime.datetime: isoformat, - datetime.time: isoformat, - datetime.timedelta: lambda td: td.total_seconds(), - Decimal: decimal_encoder, - Enum: lambda o: o.value, - frozenset: list, - deque: list, - GeneratorType: list, - IPv4Address: str, - IPv4Interface: str, - IPv4Network: str, - IPv6Address: str, - IPv6Interface: str, - IPv6Network: str, - NameEmail: str, - Path: str, - Pattern: lambda o: o.pattern, - SecretBytes: str, - SecretStr: str, - set: list, - UUID: str, - Url: str, - AnyUrl: str, -} - - -def generate_encoders_by_class_tuples( - type_encoder_map: Dict[Any, Callable[[Any], Any]] -) -> Dict[Callable[[Any], Any], Tuple[Any, ...]]: - encoders_by_class_tuples: Dict[Callable[[Any], Any], Tuple[Any, ...]] = defaultdict( - tuple - ) - for type_, encoder in type_encoder_map.items(): - encoders_by_class_tuples[encoder] += (type_,) - return encoders_by_class_tuples - - -encoders_by_class_tuples = generate_encoders_by_class_tuples(ENCODERS_BY_TYPE) - - -def jsonable_encoder( - obj: Annotated[ - Any, - Doc( - """ - The input object to convert to JSON. - """ - ), - ], - include: Annotated[ - Optional[IncEx], - Doc( - """ - Pydantic's `include` parameter, passed to Pydantic models to set the - fields to include. - """ - ), - ] = None, - exclude: Annotated[ - Optional[IncEx], - Doc( - """ - Pydantic's `exclude` parameter, passed to Pydantic models to set the - fields to exclude. - """ - ), - ] = None, - by_alias: Annotated[ - bool, - Doc( - """ - Pydantic's `by_alias` parameter, passed to Pydantic models to define if - the output should use the alias names (when provided) or the Python - attribute names. In an API, if you set an alias, it's probably because you - want to use it in the result, so you probably want to leave this set to - `True`. - """ - ), - ] = True, - exclude_unset: Annotated[ - bool, - Doc( - """ - Pydantic's `exclude_unset` parameter, passed to Pydantic models to define - if it should exclude from the output the fields that were not explicitly - set (and that only had their default values). - """ - ), - ] = False, - exclude_defaults: Annotated[ - bool, - Doc( - """ - Pydantic's `exclude_defaults` parameter, passed to Pydantic models to define - if it should exclude from the output the fields that had the same default - value, even when they were explicitly set. - """ - ), - ] = False, - exclude_none: Annotated[ - bool, - Doc( - """ - Pydantic's `exclude_none` parameter, passed to Pydantic models to define - if it should exclude from the output any fields that have a `None` value. - """ - ), - ] = False, - custom_encoder: Annotated[ - Optional[Dict[Any, Callable[[Any], Any]]], - Doc( - """ - Pydantic's `custom_encoder` parameter, passed to Pydantic models to define - a custom encoder. - """ - ), - ] = None, - sqlalchemy_safe: Annotated[ - bool, - Doc( - """ - Exclude from the output any fields that start with the name `_sa`. - - This is mainly a hack for compatibility with SQLAlchemy objects, they - store internal SQLAlchemy-specific state in attributes named with `_sa`, - and those objects can't (and shouldn't be) serialized to JSON. - """ - ), - ] = True, -) -> Any: - """ - Convert any object to something that can be encoded in JSON. - - This is used internally by FastAPI to make sure anything you return can be - encoded as JSON before it is sent to the client. - - You can also use it yourself, for example to convert objects before saving them - in a database that supports only JSON. - - Read more about it in the - [FastAPI docs for JSON Compatible Encoder](https://fastapi.tiangolo.com/tutorial/encoder/). - """ - custom_encoder = custom_encoder or {} - if custom_encoder: - if type(obj) in custom_encoder: - return custom_encoder[type(obj)](obj) - else: - for encoder_type, encoder_instance in custom_encoder.items(): - if isinstance(obj, encoder_type): - return encoder_instance(obj) - if include is not None and not isinstance(include, (set, dict)): - include = set(include) - if exclude is not None and not isinstance(exclude, (set, dict)): - exclude = set(exclude) - if isinstance(obj, BaseModel): - # TODO: remove when deprecating Pydantic v1 - encoders: Dict[Any, Any] = {} - if not PYDANTIC_V2: - encoders = getattr(obj.__config__, "json_encoders", {}) # type: ignore[attr-defined] - if custom_encoder: - encoders.update(custom_encoder) - obj_dict = _model_dump( - obj, - mode="json", - include=include, - exclude=exclude, - by_alias=by_alias, - exclude_unset=exclude_unset, - exclude_none=exclude_none, - exclude_defaults=exclude_defaults, - ) - if "__root__" in obj_dict: - obj_dict = obj_dict["__root__"] - return jsonable_encoder( - obj_dict, - exclude_none=exclude_none, - exclude_defaults=exclude_defaults, - # TODO: remove when deprecating Pydantic v1 - custom_encoder=encoders, - sqlalchemy_safe=sqlalchemy_safe, - ) - if dataclasses.is_dataclass(obj): - obj_dict = dataclasses.asdict(obj) - return jsonable_encoder( - obj_dict, - include=include, - exclude=exclude, - by_alias=by_alias, - exclude_unset=exclude_unset, - exclude_defaults=exclude_defaults, - exclude_none=exclude_none, - custom_encoder=custom_encoder, - sqlalchemy_safe=sqlalchemy_safe, - ) - if isinstance(obj, Enum): - return obj.value - if isinstance(obj, PurePath): - return str(obj) - if isinstance(obj, (str, int, float, type(None))): - return obj - if isinstance(obj, dict): - encoded_dict = {} - allowed_keys = set(obj.keys()) - if include is not None: - allowed_keys &= set(include) - if exclude is not None: - allowed_keys -= set(exclude) - for key, value in obj.items(): - if ( - ( - not sqlalchemy_safe - or (not isinstance(key, str)) - or (not key.startswith("_sa")) - ) - and (value is not None or not exclude_none) - and key in allowed_keys - ): - encoded_key = jsonable_encoder( - key, - by_alias=by_alias, - exclude_unset=exclude_unset, - exclude_none=exclude_none, - custom_encoder=custom_encoder, - sqlalchemy_safe=sqlalchemy_safe, - ) - encoded_value = jsonable_encoder( - value, - by_alias=by_alias, - exclude_unset=exclude_unset, - exclude_none=exclude_none, - custom_encoder=custom_encoder, - sqlalchemy_safe=sqlalchemy_safe, - ) - encoded_dict[encoded_key] = encoded_value - return encoded_dict - if isinstance(obj, (list, set, frozenset, GeneratorType, tuple, deque)): - encoded_list = [] - for item in obj: - encoded_list.append( - jsonable_encoder( - item, - include=include, - exclude=exclude, - by_alias=by_alias, - exclude_unset=exclude_unset, - exclude_defaults=exclude_defaults, - exclude_none=exclude_none, - custom_encoder=custom_encoder, - sqlalchemy_safe=sqlalchemy_safe, - ) - ) - return encoded_list - - if type(obj) in ENCODERS_BY_TYPE: - return ENCODERS_BY_TYPE[type(obj)](obj) - for encoder, classes_tuple in encoders_by_class_tuples.items(): - if isinstance(obj, classes_tuple): - return encoder(obj) - - try: - data = dict(obj) - except Exception as e: - errors: List[Exception] = [] - errors.append(e) - try: - data = vars(obj) - except Exception as e: - errors.append(e) - raise ValueError(errors) from e - return jsonable_encoder( - data, - include=include, - exclude=exclude, - by_alias=by_alias, - exclude_unset=exclude_unset, - exclude_defaults=exclude_defaults, - exclude_none=exclude_none, - custom_encoder=custom_encoder, - sqlalchemy_safe=sqlalchemy_safe, - ) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/svgLib/path/arc.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/svgLib/path/arc.py deleted file mode 100644 index 3e0a211e043a9f52954a29ce95de9d2a9f1858d4..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/svgLib/path/arc.py +++ /dev/null @@ -1,153 +0,0 @@ -"""Convert SVG Path's elliptical arcs to Bezier curves. - -The code is mostly adapted from Blink's SVGPathNormalizer::DecomposeArcToCubic -https://github.com/chromium/chromium/blob/93831f2/third_party/ -blink/renderer/core/svg/svg_path_parser.cc#L169-L278 -""" -from fontTools.misc.transform import Identity, Scale -from math import atan2, ceil, cos, fabs, isfinite, pi, radians, sin, sqrt, tan - - -TWO_PI = 2 * pi -PI_OVER_TWO = 0.5 * pi - - -def _map_point(matrix, pt): - # apply Transform matrix to a point represented as a complex number - r = matrix.transformPoint((pt.real, pt.imag)) - return r[0] + r[1] * 1j - - -class EllipticalArc(object): - def __init__(self, current_point, rx, ry, rotation, large, sweep, target_point): - self.current_point = current_point - self.rx = rx - self.ry = ry - self.rotation = rotation - self.large = large - self.sweep = sweep - self.target_point = target_point - - # SVG arc's rotation angle is expressed in degrees, whereas Transform.rotate - # uses radians - self.angle = radians(rotation) - - # these derived attributes are computed by the _parametrize method - self.center_point = self.theta1 = self.theta2 = self.theta_arc = None - - def _parametrize(self): - # convert from endopoint to center parametrization: - # https://www.w3.org/TR/SVG/implnote.html#ArcConversionEndpointToCenter - - # If rx = 0 or ry = 0 then this arc is treated as a straight line segment (a - # "lineto") joining the endpoints. - # http://www.w3.org/TR/SVG/implnote.html#ArcOutOfRangeParameters - rx = fabs(self.rx) - ry = fabs(self.ry) - if not (rx and ry): - return False - - # If the current point and target point for the arc are identical, it should - # be treated as a zero length path. This ensures continuity in animations. - if self.target_point == self.current_point: - return False - - mid_point_distance = (self.current_point - self.target_point) * 0.5 - - point_transform = Identity.rotate(-self.angle) - - transformed_mid_point = _map_point(point_transform, mid_point_distance) - square_rx = rx * rx - square_ry = ry * ry - square_x = transformed_mid_point.real * transformed_mid_point.real - square_y = transformed_mid_point.imag * transformed_mid_point.imag - - # Check if the radii are big enough to draw the arc, scale radii if not. - # http://www.w3.org/TR/SVG/implnote.html#ArcCorrectionOutOfRangeRadii - radii_scale = square_x / square_rx + square_y / square_ry - if radii_scale > 1: - rx *= sqrt(radii_scale) - ry *= sqrt(radii_scale) - self.rx, self.ry = rx, ry - - point_transform = Scale(1 / rx, 1 / ry).rotate(-self.angle) - - point1 = _map_point(point_transform, self.current_point) - point2 = _map_point(point_transform, self.target_point) - delta = point2 - point1 - - d = delta.real * delta.real + delta.imag * delta.imag - scale_factor_squared = max(1 / d - 0.25, 0.0) - - scale_factor = sqrt(scale_factor_squared) - if self.sweep == self.large: - scale_factor = -scale_factor - - delta *= scale_factor - center_point = (point1 + point2) * 0.5 - center_point += complex(-delta.imag, delta.real) - point1 -= center_point - point2 -= center_point - - theta1 = atan2(point1.imag, point1.real) - theta2 = atan2(point2.imag, point2.real) - - theta_arc = theta2 - theta1 - if theta_arc < 0 and self.sweep: - theta_arc += TWO_PI - elif theta_arc > 0 and not self.sweep: - theta_arc -= TWO_PI - - self.theta1 = theta1 - self.theta2 = theta1 + theta_arc - self.theta_arc = theta_arc - self.center_point = center_point - - return True - - def _decompose_to_cubic_curves(self): - if self.center_point is None and not self._parametrize(): - return - - point_transform = Identity.rotate(self.angle).scale(self.rx, self.ry) - - # Some results of atan2 on some platform implementations are not exact - # enough. So that we get more cubic curves than expected here. Adding 0.001f - # reduces the count of sgements to the correct count. - num_segments = int(ceil(fabs(self.theta_arc / (PI_OVER_TWO + 0.001)))) - for i in range(num_segments): - start_theta = self.theta1 + i * self.theta_arc / num_segments - end_theta = self.theta1 + (i + 1) * self.theta_arc / num_segments - - t = (4 / 3) * tan(0.25 * (end_theta - start_theta)) - if not isfinite(t): - return - - sin_start_theta = sin(start_theta) - cos_start_theta = cos(start_theta) - sin_end_theta = sin(end_theta) - cos_end_theta = cos(end_theta) - - point1 = complex( - cos_start_theta - t * sin_start_theta, - sin_start_theta + t * cos_start_theta, - ) - point1 += self.center_point - target_point = complex(cos_end_theta, sin_end_theta) - target_point += self.center_point - point2 = target_point - point2 += complex(t * sin_end_theta, -t * cos_end_theta) - - point1 = _map_point(point_transform, point1) - point2 = _map_point(point_transform, point2) - target_point = _map_point(point_transform, target_point) - - yield point1, point2, target_point - - def draw(self, pen): - for point1, point2, target_point in self._decompose_to_cubic_curves(): - pen.curveTo( - (point1.real, point1.imag), - (point2.real, point2.imag), - (target_point.real, target_point.imag), - ) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/ttLib/standardGlyphOrder.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/ttLib/standardGlyphOrder.py deleted file mode 100644 index 4062385240096ac822814aebb8bf7c59cf003a8f..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/ttLib/standardGlyphOrder.py +++ /dev/null @@ -1,271 +0,0 @@ -# -# 'post' table formats 1.0 and 2.0 rely on this list of "standard" -# glyphs. -# -# My list is correct according to the Apple documentation for the 'post' table: -# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6post.html -# (However, it seems that TTFdump (from MS) and FontLab disagree, at -# least with respect to the last glyph, which they list as 'dslash' -# instead of 'dcroat'.) -# - -standardGlyphOrder = [ - ".notdef", # 0 - ".null", # 1 - "nonmarkingreturn", # 2 - "space", # 3 - "exclam", # 4 - "quotedbl", # 5 - "numbersign", # 6 - "dollar", # 7 - "percent", # 8 - "ampersand", # 9 - "quotesingle", # 10 - "parenleft", # 11 - "parenright", # 12 - "asterisk", # 13 - "plus", # 14 - "comma", # 15 - "hyphen", # 16 - "period", # 17 - "slash", # 18 - "zero", # 19 - "one", # 20 - "two", # 21 - "three", # 22 - "four", # 23 - "five", # 24 - "six", # 25 - "seven", # 26 - "eight", # 27 - "nine", # 28 - "colon", # 29 - "semicolon", # 30 - "less", # 31 - "equal", # 32 - "greater", # 33 - "question", # 34 - "at", # 35 - "A", # 36 - "B", # 37 - "C", # 38 - "D", # 39 - "E", # 40 - "F", # 41 - "G", # 42 - "H", # 43 - "I", # 44 - "J", # 45 - "K", # 46 - "L", # 47 - "M", # 48 - "N", # 49 - "O", # 50 - "P", # 51 - "Q", # 52 - "R", # 53 - "S", # 54 - "T", # 55 - "U", # 56 - "V", # 57 - "W", # 58 - "X", # 59 - "Y", # 60 - "Z", # 61 - "bracketleft", # 62 - "backslash", # 63 - "bracketright", # 64 - "asciicircum", # 65 - "underscore", # 66 - "grave", # 67 - "a", # 68 - "b", # 69 - "c", # 70 - "d", # 71 - "e", # 72 - "f", # 73 - "g", # 74 - "h", # 75 - "i", # 76 - "j", # 77 - "k", # 78 - "l", # 79 - "m", # 80 - "n", # 81 - "o", # 82 - "p", # 83 - "q", # 84 - "r", # 85 - "s", # 86 - "t", # 87 - "u", # 88 - "v", # 89 - "w", # 90 - "x", # 91 - "y", # 92 - "z", # 93 - "braceleft", # 94 - "bar", # 95 - "braceright", # 96 - "asciitilde", # 97 - "Adieresis", # 98 - "Aring", # 99 - "Ccedilla", # 100 - "Eacute", # 101 - "Ntilde", # 102 - "Odieresis", # 103 - "Udieresis", # 104 - "aacute", # 105 - "agrave", # 106 - "acircumflex", # 107 - "adieresis", # 108 - "atilde", # 109 - "aring", # 110 - "ccedilla", # 111 - "eacute", # 112 - "egrave", # 113 - "ecircumflex", # 114 - "edieresis", # 115 - "iacute", # 116 - "igrave", # 117 - "icircumflex", # 118 - "idieresis", # 119 - "ntilde", # 120 - "oacute", # 121 - "ograve", # 122 - "ocircumflex", # 123 - "odieresis", # 124 - "otilde", # 125 - "uacute", # 126 - "ugrave", # 127 - "ucircumflex", # 128 - "udieresis", # 129 - "dagger", # 130 - "degree", # 131 - "cent", # 132 - "sterling", # 133 - "section", # 134 - "bullet", # 135 - "paragraph", # 136 - "germandbls", # 137 - "registered", # 138 - "copyright", # 139 - "trademark", # 140 - "acute", # 141 - "dieresis", # 142 - "notequal", # 143 - "AE", # 144 - "Oslash", # 145 - "infinity", # 146 - "plusminus", # 147 - "lessequal", # 148 - "greaterequal", # 149 - "yen", # 150 - "mu", # 151 - "partialdiff", # 152 - "summation", # 153 - "product", # 154 - "pi", # 155 - "integral", # 156 - "ordfeminine", # 157 - "ordmasculine", # 158 - "Omega", # 159 - "ae", # 160 - "oslash", # 161 - "questiondown", # 162 - "exclamdown", # 163 - "logicalnot", # 164 - "radical", # 165 - "florin", # 166 - "approxequal", # 167 - "Delta", # 168 - "guillemotleft", # 169 - "guillemotright", # 170 - "ellipsis", # 171 - "nonbreakingspace", # 172 - "Agrave", # 173 - "Atilde", # 174 - "Otilde", # 175 - "OE", # 176 - "oe", # 177 - "endash", # 178 - "emdash", # 179 - "quotedblleft", # 180 - "quotedblright", # 181 - "quoteleft", # 182 - "quoteright", # 183 - "divide", # 184 - "lozenge", # 185 - "ydieresis", # 186 - "Ydieresis", # 187 - "fraction", # 188 - "currency", # 189 - "guilsinglleft", # 190 - "guilsinglright", # 191 - "fi", # 192 - "fl", # 193 - "daggerdbl", # 194 - "periodcentered", # 195 - "quotesinglbase", # 196 - "quotedblbase", # 197 - "perthousand", # 198 - "Acircumflex", # 199 - "Ecircumflex", # 200 - "Aacute", # 201 - "Edieresis", # 202 - "Egrave", # 203 - "Iacute", # 204 - "Icircumflex", # 205 - "Idieresis", # 206 - "Igrave", # 207 - "Oacute", # 208 - "Ocircumflex", # 209 - "apple", # 210 - "Ograve", # 211 - "Uacute", # 212 - "Ucircumflex", # 213 - "Ugrave", # 214 - "dotlessi", # 215 - "circumflex", # 216 - "tilde", # 217 - "macron", # 218 - "breve", # 219 - "dotaccent", # 220 - "ring", # 221 - "cedilla", # 222 - "hungarumlaut", # 223 - "ogonek", # 224 - "caron", # 225 - "Lslash", # 226 - "lslash", # 227 - "Scaron", # 228 - "scaron", # 229 - "Zcaron", # 230 - "zcaron", # 231 - "brokenbar", # 232 - "Eth", # 233 - "eth", # 234 - "Yacute", # 235 - "yacute", # 236 - "Thorn", # 237 - "thorn", # 238 - "minus", # 239 - "multiply", # 240 - "onesuperior", # 241 - "twosuperior", # 242 - "threesuperior", # 243 - "onehalf", # 244 - "onequarter", # 245 - "threequarters", # 246 - "franc", # 247 - "Gbreve", # 248 - "gbreve", # 249 - "Idotaccent", # 250 - "Scedilla", # 251 - "scedilla", # 252 - "Cacute", # 253 - "cacute", # 254 - "Ccaron", # 255 - "ccaron", # 256 - "dcroat", # 257 -] diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/importlib_resources/simple.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/importlib_resources/simple.py deleted file mode 100644 index 7770c922c84fabe0031333a4de305dd6d6852911..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/importlib_resources/simple.py +++ /dev/null @@ -1,106 +0,0 @@ -""" -Interface adapters for low-level readers. -""" - -import abc -import io -import itertools -from typing import BinaryIO, List - -from .abc import Traversable, TraversableResources - - -class SimpleReader(abc.ABC): - """ - The minimum, low-level interface required from a resource - provider. - """ - - @property - @abc.abstractmethod - def package(self) -> str: - """ - The name of the package for which this reader loads resources. - """ - - @abc.abstractmethod - def children(self) -> List['SimpleReader']: - """ - Obtain an iterable of SimpleReader for available - child containers (e.g. directories). - """ - - @abc.abstractmethod - def resources(self) -> List[str]: - """ - Obtain available named resources for this virtual package. - """ - - @abc.abstractmethod - def open_binary(self, resource: str) -> BinaryIO: - """ - Obtain a File-like for a named resource. - """ - - @property - def name(self): - return self.package.split('.')[-1] - - -class ResourceContainer(Traversable): - """ - Traversable container for a package's resources via its reader. - """ - - def __init__(self, reader: SimpleReader): - self.reader = reader - - def is_dir(self): - return True - - def is_file(self): - return False - - def iterdir(self): - files = (ResourceHandle(self, name) for name in self.reader.resources) - dirs = map(ResourceContainer, self.reader.children()) - return itertools.chain(files, dirs) - - def open(self, *args, **kwargs): - raise IsADirectoryError() - - -class ResourceHandle(Traversable): - """ - Handle to a named resource in a ResourceReader. - """ - - def __init__(self, parent: ResourceContainer, name: str): - self.parent = parent - self.name = name # type: ignore - - def is_file(self): - return True - - def is_dir(self): - return False - - def open(self, mode='r', *args, **kwargs): - stream = self.parent.reader.open_binary(self.name) - if 'b' not in mode: - stream = io.TextIOWrapper(*args, **kwargs) - return stream - - def joinpath(self, name): - raise RuntimeError("Cannot traverse into a resource") - - -class TraversableReader(TraversableResources, SimpleReader): - """ - A TraversableResources based on SimpleReader. Resource providers - may derive from this class to provide the TraversableResources - interface by supplying the SimpleReader interface. - """ - - def files(self): - return ResourceContainer(self) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/bezier.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/bezier.py deleted file mode 100644 index f310f287e2c0f78b7dffd430141073cc8af739cb..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/bezier.py +++ /dev/null @@ -1,594 +0,0 @@ -""" -A module providing some utility functions regarding Bézier path manipulation. -""" - -from functools import lru_cache -import math -import warnings - -import numpy as np - -from matplotlib import _api - - -# same algorithm as 3.8's math.comb -@np.vectorize -@lru_cache(maxsize=128) -def _comb(n, k): - if k > n: - return 0 - k = min(k, n - k) - i = np.arange(1, k + 1) - return np.prod((n + 1 - i)/i).astype(int) - - -class NonIntersectingPathException(ValueError): - pass - - -# some functions - - -def get_intersection(cx1, cy1, cos_t1, sin_t1, - cx2, cy2, cos_t2, sin_t2): - """ - Return the intersection between the line through (*cx1*, *cy1*) at angle - *t1* and the line through (*cx2*, *cy2*) at angle *t2*. - """ - - # line1 => sin_t1 * (x - cx1) - cos_t1 * (y - cy1) = 0. - # line1 => sin_t1 * x + cos_t1 * y = sin_t1*cx1 - cos_t1*cy1 - - line1_rhs = sin_t1 * cx1 - cos_t1 * cy1 - line2_rhs = sin_t2 * cx2 - cos_t2 * cy2 - - # rhs matrix - a, b = sin_t1, -cos_t1 - c, d = sin_t2, -cos_t2 - - ad_bc = a * d - b * c - if abs(ad_bc) < 1e-12: - raise ValueError("Given lines do not intersect. Please verify that " - "the angles are not equal or differ by 180 degrees.") - - # rhs_inverse - a_, b_ = d, -b - c_, d_ = -c, a - a_, b_, c_, d_ = [k / ad_bc for k in [a_, b_, c_, d_]] - - x = a_ * line1_rhs + b_ * line2_rhs - y = c_ * line1_rhs + d_ * line2_rhs - - return x, y - - -def get_normal_points(cx, cy, cos_t, sin_t, length): - """ - For a line passing through (*cx*, *cy*) and having an angle *t*, return - locations of the two points located along its perpendicular line at the - distance of *length*. - """ - - if length == 0.: - return cx, cy, cx, cy - - cos_t1, sin_t1 = sin_t, -cos_t - cos_t2, sin_t2 = -sin_t, cos_t - - x1, y1 = length * cos_t1 + cx, length * sin_t1 + cy - x2, y2 = length * cos_t2 + cx, length * sin_t2 + cy - - return x1, y1, x2, y2 - - -# BEZIER routines - -# subdividing bezier curve -# http://www.cs.mtu.edu/~shene/COURSES/cs3621/NOTES/spline/Bezier/bezier-sub.html - - -def _de_casteljau1(beta, t): - next_beta = beta[:-1] * (1 - t) + beta[1:] * t - return next_beta - - -def split_de_casteljau(beta, t): - """ - Split a Bézier segment defined by its control points *beta* into two - separate segments divided at *t* and return their control points. - """ - beta = np.asarray(beta) - beta_list = [beta] - while True: - beta = _de_casteljau1(beta, t) - beta_list.append(beta) - if len(beta) == 1: - break - left_beta = [beta[0] for beta in beta_list] - right_beta = [beta[-1] for beta in reversed(beta_list)] - - return left_beta, right_beta - - -def find_bezier_t_intersecting_with_closedpath( - bezier_point_at_t, inside_closedpath, t0=0., t1=1., tolerance=0.01): - """ - Find the intersection of the Bézier curve with a closed path. - - The intersection point *t* is approximated by two parameters *t0*, *t1* - such that *t0* <= *t* <= *t1*. - - Search starts from *t0* and *t1* and uses a simple bisecting algorithm - therefore one of the end points must be inside the path while the other - doesn't. The search stops when the distance of the points parametrized by - *t0* and *t1* gets smaller than the given *tolerance*. - - Parameters - ---------- - bezier_point_at_t : callable - A function returning x, y coordinates of the Bézier at parameter *t*. - It must have the signature:: - - bezier_point_at_t(t: float) -> tuple[float, float] - - inside_closedpath : callable - A function returning True if a given point (x, y) is inside the - closed path. It must have the signature:: - - inside_closedpath(point: tuple[float, float]) -> bool - - t0, t1 : float - Start parameters for the search. - - tolerance : float - Maximal allowed distance between the final points. - - Returns - ------- - t0, t1 : float - The Bézier path parameters. - """ - start = bezier_point_at_t(t0) - end = bezier_point_at_t(t1) - - start_inside = inside_closedpath(start) - end_inside = inside_closedpath(end) - - if start_inside == end_inside and start != end: - raise NonIntersectingPathException( - "Both points are on the same side of the closed path") - - while True: - - # return if the distance is smaller than the tolerance - if np.hypot(start[0] - end[0], start[1] - end[1]) < tolerance: - return t0, t1 - - # calculate the middle point - middle_t = 0.5 * (t0 + t1) - middle = bezier_point_at_t(middle_t) - middle_inside = inside_closedpath(middle) - - if start_inside ^ middle_inside: - t1 = middle_t - end = middle - else: - t0 = middle_t - start = middle - start_inside = middle_inside - - -class BezierSegment: - """ - A d-dimensional Bézier segment. - - Parameters - ---------- - control_points : (N, d) array - Location of the *N* control points. - """ - - def __init__(self, control_points): - self._cpoints = np.asarray(control_points) - self._N, self._d = self._cpoints.shape - self._orders = np.arange(self._N) - coeff = [math.factorial(self._N - 1) - // (math.factorial(i) * math.factorial(self._N - 1 - i)) - for i in range(self._N)] - self._px = (self._cpoints.T * coeff).T - - def __call__(self, t): - """ - Evaluate the Bézier curve at point(s) *t* in [0, 1]. - - Parameters - ---------- - t : (k,) array-like - Points at which to evaluate the curve. - - Returns - ------- - (k, d) array - Value of the curve for each point in *t*. - """ - t = np.asarray(t) - return (np.power.outer(1 - t, self._orders[::-1]) - * np.power.outer(t, self._orders)) @ self._px - - def point_at_t(self, t): - """ - Evaluate the curve at a single point, returning a tuple of *d* floats. - """ - return tuple(self(t)) - - @property - def control_points(self): - """The control points of the curve.""" - return self._cpoints - - @property - def dimension(self): - """The dimension of the curve.""" - return self._d - - @property - def degree(self): - """Degree of the polynomial. One less the number of control points.""" - return self._N - 1 - - @property - def polynomial_coefficients(self): - r""" - The polynomial coefficients of the Bézier curve. - - .. warning:: Follows opposite convention from `numpy.polyval`. - - Returns - ------- - (n+1, d) array - Coefficients after expanding in polynomial basis, where :math:`n` - is the degree of the Bézier curve and :math:`d` its dimension. - These are the numbers (:math:`C_j`) such that the curve can be - written :math:`\sum_{j=0}^n C_j t^j`. - - Notes - ----- - The coefficients are calculated as - - .. math:: - - {n \choose j} \sum_{i=0}^j (-1)^{i+j} {j \choose i} P_i - - where :math:`P_i` are the control points of the curve. - """ - n = self.degree - # matplotlib uses n <= 4. overflow plausible starting around n = 15. - if n > 10: - warnings.warn("Polynomial coefficients formula unstable for high " - "order Bezier curves!", RuntimeWarning) - P = self.control_points - j = np.arange(n+1)[:, None] - i = np.arange(n+1)[None, :] # _comb is non-zero for i <= j - prefactor = (-1)**(i + j) * _comb(j, i) # j on axis 0, i on axis 1 - return _comb(n, j) * prefactor @ P # j on axis 0, self.dimension on 1 - - def axis_aligned_extrema(self): - """ - Return the dimension and location of the curve's interior extrema. - - The extrema are the points along the curve where one of its partial - derivatives is zero. - - Returns - ------- - dims : array of int - Index :math:`i` of the partial derivative which is zero at each - interior extrema. - dzeros : array of float - Of same size as dims. The :math:`t` such that :math:`d/dx_i B(t) = - 0` - """ - n = self.degree - if n <= 1: - return np.array([]), np.array([]) - Cj = self.polynomial_coefficients - dCj = np.arange(1, n+1)[:, None] * Cj[1:] - dims = [] - roots = [] - for i, pi in enumerate(dCj.T): - r = np.roots(pi[::-1]) - roots.append(r) - dims.append(np.full_like(r, i)) - roots = np.concatenate(roots) - dims = np.concatenate(dims) - in_range = np.isreal(roots) & (roots >= 0) & (roots <= 1) - return dims[in_range], np.real(roots)[in_range] - - -def split_bezier_intersecting_with_closedpath( - bezier, inside_closedpath, tolerance=0.01): - """ - Split a Bézier curve into two at the intersection with a closed path. - - Parameters - ---------- - bezier : (N, 2) array-like - Control points of the Bézier segment. See `.BezierSegment`. - inside_closedpath : callable - A function returning True if a given point (x, y) is inside the - closed path. See also `.find_bezier_t_intersecting_with_closedpath`. - tolerance : float - The tolerance for the intersection. See also - `.find_bezier_t_intersecting_with_closedpath`. - - Returns - ------- - left, right - Lists of control points for the two Bézier segments. - """ - - bz = BezierSegment(bezier) - bezier_point_at_t = bz.point_at_t - - t0, t1 = find_bezier_t_intersecting_with_closedpath( - bezier_point_at_t, inside_closedpath, tolerance=tolerance) - - _left, _right = split_de_casteljau(bezier, (t0 + t1) / 2.) - return _left, _right - - -# matplotlib specific - - -def split_path_inout(path, inside, tolerance=0.01, reorder_inout=False): - """ - Divide a path into two segments at the point where ``inside(x, y)`` becomes - False. - """ - from .path import Path - path_iter = path.iter_segments() - - ctl_points, command = next(path_iter) - begin_inside = inside(ctl_points[-2:]) # true if begin point is inside - - ctl_points_old = ctl_points - - iold = 0 - i = 1 - - for ctl_points, command in path_iter: - iold = i - i += len(ctl_points) // 2 - if inside(ctl_points[-2:]) != begin_inside: - bezier_path = np.concatenate([ctl_points_old[-2:], ctl_points]) - break - ctl_points_old = ctl_points - else: - raise ValueError("The path does not intersect with the patch") - - bp = bezier_path.reshape((-1, 2)) - left, right = split_bezier_intersecting_with_closedpath( - bp, inside, tolerance) - if len(left) == 2: - codes_left = [Path.LINETO] - codes_right = [Path.MOVETO, Path.LINETO] - elif len(left) == 3: - codes_left = [Path.CURVE3, Path.CURVE3] - codes_right = [Path.MOVETO, Path.CURVE3, Path.CURVE3] - elif len(left) == 4: - codes_left = [Path.CURVE4, Path.CURVE4, Path.CURVE4] - codes_right = [Path.MOVETO, Path.CURVE4, Path.CURVE4, Path.CURVE4] - else: - raise AssertionError("This should never be reached") - - verts_left = left[1:] - verts_right = right[:] - - if path.codes is None: - path_in = Path(np.concatenate([path.vertices[:i], verts_left])) - path_out = Path(np.concatenate([verts_right, path.vertices[i:]])) - - else: - path_in = Path(np.concatenate([path.vertices[:iold], verts_left]), - np.concatenate([path.codes[:iold], codes_left])) - - path_out = Path(np.concatenate([verts_right, path.vertices[i:]]), - np.concatenate([codes_right, path.codes[i:]])) - - if reorder_inout and not begin_inside: - path_in, path_out = path_out, path_in - - return path_in, path_out - - -def inside_circle(cx, cy, r): - """ - Return a function that checks whether a point is in a circle with center - (*cx*, *cy*) and radius *r*. - - The returned function has the signature:: - - f(xy: tuple[float, float]) -> bool - """ - r2 = r ** 2 - - def _f(xy): - x, y = xy - return (x - cx) ** 2 + (y - cy) ** 2 < r2 - return _f - - -# quadratic Bezier lines - -def get_cos_sin(x0, y0, x1, y1): - dx, dy = x1 - x0, y1 - y0 - d = (dx * dx + dy * dy) ** .5 - # Account for divide by zero - if d == 0: - return 0.0, 0.0 - return dx / d, dy / d - - -def check_if_parallel(dx1, dy1, dx2, dy2, tolerance=1.e-5): - """ - Check if two lines are parallel. - - Parameters - ---------- - dx1, dy1, dx2, dy2 : float - The gradients *dy*/*dx* of the two lines. - tolerance : float - The angular tolerance in radians up to which the lines are considered - parallel. - - Returns - ------- - is_parallel - - 1 if two lines are parallel in same direction. - - -1 if two lines are parallel in opposite direction. - - False otherwise. - """ - theta1 = np.arctan2(dx1, dy1) - theta2 = np.arctan2(dx2, dy2) - dtheta = abs(theta1 - theta2) - if dtheta < tolerance: - return 1 - elif abs(dtheta - np.pi) < tolerance: - return -1 - else: - return False - - -def get_parallels(bezier2, width): - """ - Given the quadratic Bézier control points *bezier2*, returns - control points of quadratic Bézier lines roughly parallel to given - one separated by *width*. - """ - - # The parallel Bezier lines are constructed by following ways. - # c1 and c2 are control points representing the start and end of the - # Bezier line. - # cm is the middle point - - c1x, c1y = bezier2[0] - cmx, cmy = bezier2[1] - c2x, c2y = bezier2[2] - - parallel_test = check_if_parallel(c1x - cmx, c1y - cmy, - cmx - c2x, cmy - c2y) - - if parallel_test == -1: - _api.warn_external( - "Lines do not intersect. A straight line is used instead.") - cos_t1, sin_t1 = get_cos_sin(c1x, c1y, c2x, c2y) - cos_t2, sin_t2 = cos_t1, sin_t1 - else: - # t1 and t2 is the angle between c1 and cm, cm, c2. They are - # also an angle of the tangential line of the path at c1 and c2 - cos_t1, sin_t1 = get_cos_sin(c1x, c1y, cmx, cmy) - cos_t2, sin_t2 = get_cos_sin(cmx, cmy, c2x, c2y) - - # find c1_left, c1_right which are located along the lines - # through c1 and perpendicular to the tangential lines of the - # Bezier path at a distance of width. Same thing for c2_left and - # c2_right with respect to c2. - c1x_left, c1y_left, c1x_right, c1y_right = ( - get_normal_points(c1x, c1y, cos_t1, sin_t1, width) - ) - c2x_left, c2y_left, c2x_right, c2y_right = ( - get_normal_points(c2x, c2y, cos_t2, sin_t2, width) - ) - - # find cm_left which is the intersecting point of a line through - # c1_left with angle t1 and a line through c2_left with angle - # t2. Same with cm_right. - try: - cmx_left, cmy_left = get_intersection(c1x_left, c1y_left, cos_t1, - sin_t1, c2x_left, c2y_left, - cos_t2, sin_t2) - cmx_right, cmy_right = get_intersection(c1x_right, c1y_right, cos_t1, - sin_t1, c2x_right, c2y_right, - cos_t2, sin_t2) - except ValueError: - # Special case straight lines, i.e., angle between two lines is - # less than the threshold used by get_intersection (we don't use - # check_if_parallel as the threshold is not the same). - cmx_left, cmy_left = ( - 0.5 * (c1x_left + c2x_left), 0.5 * (c1y_left + c2y_left) - ) - cmx_right, cmy_right = ( - 0.5 * (c1x_right + c2x_right), 0.5 * (c1y_right + c2y_right) - ) - - # the parallel Bezier lines are created with control points of - # [c1_left, cm_left, c2_left] and [c1_right, cm_right, c2_right] - path_left = [(c1x_left, c1y_left), - (cmx_left, cmy_left), - (c2x_left, c2y_left)] - path_right = [(c1x_right, c1y_right), - (cmx_right, cmy_right), - (c2x_right, c2y_right)] - - return path_left, path_right - - -def find_control_points(c1x, c1y, mmx, mmy, c2x, c2y): - """ - Find control points of the Bézier curve passing through (*c1x*, *c1y*), - (*mmx*, *mmy*), and (*c2x*, *c2y*), at parametric values 0, 0.5, and 1. - """ - cmx = .5 * (4 * mmx - (c1x + c2x)) - cmy = .5 * (4 * mmy - (c1y + c2y)) - return [(c1x, c1y), (cmx, cmy), (c2x, c2y)] - - -def make_wedged_bezier2(bezier2, width, w1=1., wm=0.5, w2=0.): - """ - Being similar to `get_parallels`, returns control points of two quadratic - Bézier lines having a width roughly parallel to given one separated by - *width*. - """ - - # c1, cm, c2 - c1x, c1y = bezier2[0] - cmx, cmy = bezier2[1] - c3x, c3y = bezier2[2] - - # t1 and t2 is the angle between c1 and cm, cm, c3. - # They are also an angle of the tangential line of the path at c1 and c3 - cos_t1, sin_t1 = get_cos_sin(c1x, c1y, cmx, cmy) - cos_t2, sin_t2 = get_cos_sin(cmx, cmy, c3x, c3y) - - # find c1_left, c1_right which are located along the lines - # through c1 and perpendicular to the tangential lines of the - # Bezier path at a distance of width. Same thing for c3_left and - # c3_right with respect to c3. - c1x_left, c1y_left, c1x_right, c1y_right = ( - get_normal_points(c1x, c1y, cos_t1, sin_t1, width * w1) - ) - c3x_left, c3y_left, c3x_right, c3y_right = ( - get_normal_points(c3x, c3y, cos_t2, sin_t2, width * w2) - ) - - # find c12, c23 and c123 which are middle points of c1-cm, cm-c3 and - # c12-c23 - c12x, c12y = (c1x + cmx) * .5, (c1y + cmy) * .5 - c23x, c23y = (cmx + c3x) * .5, (cmy + c3y) * .5 - c123x, c123y = (c12x + c23x) * .5, (c12y + c23y) * .5 - - # tangential angle of c123 (angle between c12 and c23) - cos_t123, sin_t123 = get_cos_sin(c12x, c12y, c23x, c23y) - - c123x_left, c123y_left, c123x_right, c123y_right = ( - get_normal_points(c123x, c123y, cos_t123, sin_t123, width * wm) - ) - - path_left = find_control_points(c1x_left, c1y_left, - c123x_left, c123y_left, - c3x_left, c3y_left) - path_right = find_control_points(c1x_right, c1y_right, - c123x_right, c123y_right, - c3x_right, c3y_right) - - return path_left, path_right diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/typing.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/typing.py deleted file mode 100644 index 02059be94ba2f378ef5e3cccaa22a503d676afa9..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/typing.py +++ /dev/null @@ -1,60 +0,0 @@ -""" -Typing support for Matplotlib - -This module contains Type aliases which are useful for Matplotlib and potentially -downstream libraries. - -.. admonition:: Provisional status of typing - - The ``typing`` module and type stub files are considered provisional and may change - at any time without a deprecation period. -""" -from collections.abc import Hashable, Sequence -import pathlib -from typing import Any, Literal, TypeVar, Union - -from . import path -from ._enums import JoinStyle, CapStyle -from .markers import MarkerStyle - -# The following are type aliases. Once python 3.9 is dropped, they should be annotated -# using ``typing.TypeAlias`` and Unions should be converted to using ``|`` syntax. - -RGBColorType = Union[tuple[float, float, float], str] -RGBAColorType = Union[ - str, # "none" or "#RRGGBBAA"/"#RGBA" hex strings - tuple[float, float, float, float], - # 2 tuple (color, alpha) representations, not infinitely recursive - # RGBColorType includes the (str, float) tuple, even for RGBA strings - tuple[RGBColorType, float], - # (4-tuple, float) is odd, but accepted as the outer float overriding A of 4-tuple - tuple[tuple[float, float, float, float], float], -] - -ColorType = Union[RGBColorType, RGBAColorType] - -RGBColourType = RGBColorType -RGBAColourType = RGBAColorType -ColourType = ColorType - -LineStyleType = Union[str, tuple[float, Sequence[float]]] -DrawStyleType = Literal["default", "steps", "steps-pre", "steps-mid", "steps-post"] -MarkEveryType = Union[ - None, int, tuple[int, int], slice, list[int], float, tuple[float, float], list[bool] -] - -MarkerType = Union[str, path.Path, MarkerStyle] -FillStyleType = Literal["full", "left", "right", "bottom", "top", "none"] -JoinStyleType = Union[JoinStyle, Literal["miter", "round", "bevel"]] -CapStyleType = Union[CapStyle, Literal["butt", "projecting", "round"]] - -RcStyleType = Union[ - str, - dict[str, Any], - pathlib.Path, - Sequence[Union[str, pathlib.Path, dict[str, Any]]], -] - -_HT = TypeVar("_HT", bound=Hashable) -HashableList = list[Union[_HT, "HashableList[_HT]"]] -"""A nested list of Hashable values.""" diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/polynomial/tests/test_polyutils.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/polynomial/tests/test_polyutils.py deleted file mode 100644 index cc630790da1ce8fd1ca413cd530ae5636cce5aa8..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/polynomial/tests/test_polyutils.py +++ /dev/null @@ -1,121 +0,0 @@ -"""Tests for polyutils module. - -""" -import numpy as np -import numpy.polynomial.polyutils as pu -from numpy.testing import ( - assert_almost_equal, assert_raises, assert_equal, assert_, - ) - - -class TestMisc: - - def test_trimseq(self): - for i in range(5): - tgt = [1] - res = pu.trimseq([1] + [0]*5) - assert_equal(res, tgt) - - def test_as_series(self): - # check exceptions - assert_raises(ValueError, pu.as_series, [[]]) - assert_raises(ValueError, pu.as_series, [[[1, 2]]]) - assert_raises(ValueError, pu.as_series, [[1], ['a']]) - # check common types - types = ['i', 'd', 'O'] - for i in range(len(types)): - for j in range(i): - ci = np.ones(1, types[i]) - cj = np.ones(1, types[j]) - [resi, resj] = pu.as_series([ci, cj]) - assert_(resi.dtype.char == resj.dtype.char) - assert_(resj.dtype.char == types[i]) - - def test_trimcoef(self): - coef = [2, -1, 1, 0] - # Test exceptions - assert_raises(ValueError, pu.trimcoef, coef, -1) - # Test results - assert_equal(pu.trimcoef(coef), coef[:-1]) - assert_equal(pu.trimcoef(coef, 1), coef[:-3]) - assert_equal(pu.trimcoef(coef, 2), [0]) - - def test_vander_nd_exception(self): - # n_dims != len(points) - assert_raises(ValueError, pu._vander_nd, (), (1, 2, 3), [90]) - # n_dims != len(degrees) - assert_raises(ValueError, pu._vander_nd, (), (), [90.65]) - # n_dims == 0 - assert_raises(ValueError, pu._vander_nd, (), (), []) - - def test_div_zerodiv(self): - # c2[-1] == 0 - assert_raises(ZeroDivisionError, pu._div, pu._div, (1, 2, 3), [0]) - - def test_pow_too_large(self): - # power > maxpower - assert_raises(ValueError, pu._pow, (), [1, 2, 3], 5, 4) - -class TestDomain: - - def test_getdomain(self): - # test for real values - x = [1, 10, 3, -1] - tgt = [-1, 10] - res = pu.getdomain(x) - assert_almost_equal(res, tgt) - - # test for complex values - x = [1 + 1j, 1 - 1j, 0, 2] - tgt = [-1j, 2 + 1j] - res = pu.getdomain(x) - assert_almost_equal(res, tgt) - - def test_mapdomain(self): - # test for real values - dom1 = [0, 4] - dom2 = [1, 3] - tgt = dom2 - res = pu.mapdomain(dom1, dom1, dom2) - assert_almost_equal(res, tgt) - - # test for complex values - dom1 = [0 - 1j, 2 + 1j] - dom2 = [-2, 2] - tgt = dom2 - x = dom1 - res = pu.mapdomain(x, dom1, dom2) - assert_almost_equal(res, tgt) - - # test for multidimensional arrays - dom1 = [0, 4] - dom2 = [1, 3] - tgt = np.array([dom2, dom2]) - x = np.array([dom1, dom1]) - res = pu.mapdomain(x, dom1, dom2) - assert_almost_equal(res, tgt) - - # test that subtypes are preserved. - class MyNDArray(np.ndarray): - pass - - dom1 = [0, 4] - dom2 = [1, 3] - x = np.array([dom1, dom1]).view(MyNDArray) - res = pu.mapdomain(x, dom1, dom2) - assert_(isinstance(res, MyNDArray)) - - def test_mapparms(self): - # test for real values - dom1 = [0, 4] - dom2 = [1, 3] - tgt = [1, .5] - res = pu. mapparms(dom1, dom2) - assert_almost_equal(res, tgt) - - # test for complex values - dom1 = [0 - 1j, 2 + 1j] - dom2 = [-2, 2] - tgt = [-1 + 1j, 1 - 1j] - res = pu.mapparms(dom1, dom2) - assert_almost_equal(res, tgt) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/ufuncs.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/ufuncs.py deleted file mode 100644 index 3cc31ae5e30506481192d0e4b2f3df8347af65c8..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/ufuncs.py +++ /dev/null @@ -1,17 +0,0 @@ -import numpy as np - -np.sin(1) -np.sin([1, 2, 3]) -np.sin(1, out=np.empty(1)) -np.matmul(np.ones((2, 2, 2)), np.ones((2, 2, 2)), axes=[(0, 1), (0, 1), (0, 1)]) -np.sin(1, signature="D->D") -np.sin(1, extobj=[16, 1, lambda: None]) -# NOTE: `np.generic` subclasses are not guaranteed to support addition; -# re-enable this we can infer the exact return type of `np.sin(...)`. -# -# np.sin(1) + np.sin(1) -np.sin.types[0] -np.sin.__name__ -np.sin.__doc__ - -np.abs(np.array([1])) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/openai/api_resources/model.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/openai/api_resources/model.py deleted file mode 100644 index 9785e17fe17ca145b1c04de7529254f3132990dc..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/openai/api_resources/model.py +++ /dev/null @@ -1,5 +0,0 @@ -from openai.api_resources.abstract import DeletableAPIResource, ListableAPIResource - - -class Model(ListableAPIResource, DeletableAPIResource): - OBJECT_NAME = "models" diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/internals/construction.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/internals/construction.py deleted file mode 100644 index 8bb6c6b5de7eaaee88bc26298a2a1f6fa77ca06a..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/internals/construction.py +++ /dev/null @@ -1,1070 +0,0 @@ -""" -Functions for preparing various inputs passed to the DataFrame or Series -constructors before passing them to a BlockManager. -""" -from __future__ import annotations - -from collections import abc -from typing import ( - TYPE_CHECKING, - Any, -) - -import numpy as np -from numpy import ma - -from pandas._config import using_pyarrow_string_dtype - -from pandas._libs import lib - -from pandas.core.dtypes.astype import astype_is_view -from pandas.core.dtypes.cast import ( - construct_1d_arraylike_from_scalar, - dict_compat, - maybe_cast_to_datetime, - maybe_convert_platform, - maybe_infer_to_datetimelike, -) -from pandas.core.dtypes.common import ( - is_1d_only_ea_dtype, - is_integer_dtype, - is_list_like, - is_named_tuple, - is_object_dtype, -) -from pandas.core.dtypes.dtypes import ExtensionDtype -from pandas.core.dtypes.generic import ( - ABCDataFrame, - ABCSeries, -) - -from pandas.core import ( - algorithms, - common as com, -) -from pandas.core.arrays import ExtensionArray -from pandas.core.arrays.string_ import StringDtype -from pandas.core.construction import ( - array as pd_array, - ensure_wrapped_if_datetimelike, - extract_array, - range_to_ndarray, - sanitize_array, -) -from pandas.core.indexes.api import ( - DatetimeIndex, - Index, - TimedeltaIndex, - default_index, - ensure_index, - get_objs_combined_axis, - union_indexes, -) -from pandas.core.internals.array_manager import ( - ArrayManager, - SingleArrayManager, -) -from pandas.core.internals.blocks import ( - BlockPlacement, - ensure_block_shape, - new_block, - new_block_2d, -) -from pandas.core.internals.managers import ( - BlockManager, - SingleBlockManager, - create_block_manager_from_blocks, - create_block_manager_from_column_arrays, -) - -if TYPE_CHECKING: - from collections.abc import ( - Hashable, - Sequence, - ) - - from pandas._typing import ( - ArrayLike, - DtypeObj, - Manager, - npt, - ) -# --------------------------------------------------------------------- -# BlockManager Interface - - -def arrays_to_mgr( - arrays, - columns: Index, - index, - *, - dtype: DtypeObj | None = None, - verify_integrity: bool = True, - typ: str | None = None, - consolidate: bool = True, -) -> Manager: - """ - Segregate Series based on type and coerce into matrices. - - Needs to handle a lot of exceptional cases. - """ - if verify_integrity: - # figure out the index, if necessary - if index is None: - index = _extract_index(arrays) - else: - index = ensure_index(index) - - # don't force copy because getting jammed in an ndarray anyway - arrays, refs = _homogenize(arrays, index, dtype) - # _homogenize ensures - # - all(len(x) == len(index) for x in arrays) - # - all(x.ndim == 1 for x in arrays) - # - all(isinstance(x, (np.ndarray, ExtensionArray)) for x in arrays) - # - all(type(x) is not NumpyExtensionArray for x in arrays) - - else: - index = ensure_index(index) - arrays = [extract_array(x, extract_numpy=True) for x in arrays] - # with _from_arrays, the passed arrays should never be Series objects - refs = [None] * len(arrays) - - # Reached via DataFrame._from_arrays; we do minimal validation here - for arr in arrays: - if ( - not isinstance(arr, (np.ndarray, ExtensionArray)) - or arr.ndim != 1 - or len(arr) != len(index) - ): - raise ValueError( - "Arrays must be 1-dimensional np.ndarray or ExtensionArray " - "with length matching len(index)" - ) - - columns = ensure_index(columns) - if len(columns) != len(arrays): - raise ValueError("len(arrays) must match len(columns)") - - # from BlockManager perspective - axes = [columns, index] - - if typ == "block": - return create_block_manager_from_column_arrays( - arrays, axes, consolidate=consolidate, refs=refs - ) - elif typ == "array": - return ArrayManager(arrays, [index, columns]) - else: - raise ValueError(f"'typ' needs to be one of {{'block', 'array'}}, got '{typ}'") - - -def rec_array_to_mgr( - data: np.rec.recarray | np.ndarray, - index, - columns, - dtype: DtypeObj | None, - copy: bool, - typ: str, -) -> Manager: - """ - Extract from a masked rec array and create the manager. - """ - # essentially process a record array then fill it - fdata = ma.getdata(data) - if index is None: - index = default_index(len(fdata)) - else: - index = ensure_index(index) - - if columns is not None: - columns = ensure_index(columns) - arrays, arr_columns = to_arrays(fdata, columns) - - # create the manager - - arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns, len(index)) - if columns is None: - columns = arr_columns - - mgr = arrays_to_mgr(arrays, columns, index, dtype=dtype, typ=typ) - - if copy: - mgr = mgr.copy() - return mgr - - -def mgr_to_mgr(mgr, typ: str, copy: bool = True): - """ - Convert to specific type of Manager. Does not copy if the type is already - correct. Does not guarantee a copy otherwise. `copy` keyword only controls - whether conversion from Block->ArrayManager copies the 1D arrays. - """ - new_mgr: Manager - - if typ == "block": - if isinstance(mgr, BlockManager): - new_mgr = mgr - else: - if mgr.ndim == 2: - new_mgr = arrays_to_mgr( - mgr.arrays, mgr.axes[0], mgr.axes[1], typ="block" - ) - else: - new_mgr = SingleBlockManager.from_array(mgr.arrays[0], mgr.index) - elif typ == "array": - if isinstance(mgr, ArrayManager): - new_mgr = mgr - else: - if mgr.ndim == 2: - arrays = [mgr.iget_values(i) for i in range(len(mgr.axes[0]))] - if copy: - arrays = [arr.copy() for arr in arrays] - new_mgr = ArrayManager(arrays, [mgr.axes[1], mgr.axes[0]]) - else: - array = mgr.internal_values() - if copy: - array = array.copy() - new_mgr = SingleArrayManager([array], [mgr.index]) - else: - raise ValueError(f"'typ' needs to be one of {{'block', 'array'}}, got '{typ}'") - return new_mgr - - -# --------------------------------------------------------------------- -# DataFrame Constructor Interface - - -def ndarray_to_mgr( - values, index, columns, dtype: DtypeObj | None, copy: bool, typ: str -) -> Manager: - # used in DataFrame.__init__ - # input must be a ndarray, list, Series, Index, ExtensionArray - - if isinstance(values, ABCSeries): - if columns is None: - if values.name is not None: - columns = Index([values.name]) - if index is None: - index = values.index - else: - values = values.reindex(index) - - # zero len case (GH #2234) - if not len(values) and columns is not None and len(columns): - values = np.empty((0, 1), dtype=object) - - # if the array preparation does a copy -> avoid this for ArrayManager, - # since the copy is done on conversion to 1D arrays - copy_on_sanitize = False if typ == "array" else copy - - vdtype = getattr(values, "dtype", None) - refs = None - if is_1d_only_ea_dtype(vdtype) or is_1d_only_ea_dtype(dtype): - # GH#19157 - - if isinstance(values, (np.ndarray, ExtensionArray)) and values.ndim > 1: - # GH#12513 a EA dtype passed with a 2D array, split into - # multiple EAs that view the values - # error: No overload variant of "__getitem__" of "ExtensionArray" - # matches argument type "Tuple[slice, int]" - values = [ - values[:, n] # type: ignore[call-overload] - for n in range(values.shape[1]) - ] - else: - values = [values] - - if columns is None: - columns = Index(range(len(values))) - else: - columns = ensure_index(columns) - - return arrays_to_mgr(values, columns, index, dtype=dtype, typ=typ) - - elif isinstance(vdtype, ExtensionDtype): - # i.e. Datetime64TZ, PeriodDtype; cases with is_1d_only_ea_dtype(vdtype) - # are already caught above - values = extract_array(values, extract_numpy=True) - if copy: - values = values.copy() - if values.ndim == 1: - values = values.reshape(-1, 1) - - elif isinstance(values, (ABCSeries, Index)): - if not copy_on_sanitize and ( - dtype is None or astype_is_view(values.dtype, dtype) - ): - refs = values._references - - if copy_on_sanitize: - values = values._values.copy() - else: - values = values._values - - values = _ensure_2d(values) - - elif isinstance(values, (np.ndarray, ExtensionArray)): - # drop subclass info - _copy = ( - copy_on_sanitize - if (dtype is None or astype_is_view(values.dtype, dtype)) - else False - ) - values = np.array(values, copy=_copy) - values = _ensure_2d(values) - - else: - # by definition an array here - # the dtypes will be coerced to a single dtype - values = _prep_ndarraylike(values, copy=copy_on_sanitize) - - if dtype is not None and values.dtype != dtype: - # GH#40110 see similar check inside sanitize_array - values = sanitize_array( - values, - None, - dtype=dtype, - copy=copy_on_sanitize, - allow_2d=True, - ) - - # _prep_ndarraylike ensures that values.ndim == 2 at this point - index, columns = _get_axes( - values.shape[0], values.shape[1], index=index, columns=columns - ) - - _check_values_indices_shape_match(values, index, columns) - - if typ == "array": - if issubclass(values.dtype.type, str): - values = np.array(values, dtype=object) - - if dtype is None and is_object_dtype(values.dtype): - arrays = [ - ensure_wrapped_if_datetimelike( - maybe_infer_to_datetimelike(values[:, i]) - ) - for i in range(values.shape[1]) - ] - else: - if lib.is_np_dtype(values.dtype, "mM"): - values = ensure_wrapped_if_datetimelike(values) - arrays = [values[:, i] for i in range(values.shape[1])] - - if copy: - arrays = [arr.copy() for arr in arrays] - - return ArrayManager(arrays, [index, columns], verify_integrity=False) - - values = values.T - - # if we don't have a dtype specified, then try to convert objects - # on the entire block; this is to convert if we have datetimelike's - # embedded in an object type - if dtype is None and is_object_dtype(values.dtype): - obj_columns = list(values) - maybe_datetime = [maybe_infer_to_datetimelike(x) for x in obj_columns] - # don't convert (and copy) the objects if no type inference occurs - if any(x is not y for x, y in zip(obj_columns, maybe_datetime)): - dvals_list = [ensure_block_shape(dval, 2) for dval in maybe_datetime] - block_values = [ - new_block_2d(dvals_list[n], placement=BlockPlacement(n)) - for n in range(len(dvals_list)) - ] - else: - bp = BlockPlacement(slice(len(columns))) - nb = new_block_2d(values, placement=bp, refs=refs) - block_values = [nb] - elif dtype is None and values.dtype.kind == "U" and using_pyarrow_string_dtype(): - dtype = StringDtype(storage="pyarrow_numpy") - - obj_columns = list(values) - block_values = [ - new_block( - dtype.construct_array_type()._from_sequence(data, dtype=dtype), - BlockPlacement(slice(i, i + 1)), - ndim=2, - ) - for i, data in enumerate(obj_columns) - ] - - else: - bp = BlockPlacement(slice(len(columns))) - nb = new_block_2d(values, placement=bp, refs=refs) - block_values = [nb] - - if len(columns) == 0: - # TODO: check len(values) == 0? - block_values = [] - - return create_block_manager_from_blocks( - block_values, [columns, index], verify_integrity=False - ) - - -def _check_values_indices_shape_match( - values: np.ndarray, index: Index, columns: Index -) -> None: - """ - Check that the shape implied by our axes matches the actual shape of the - data. - """ - if values.shape[1] != len(columns) or values.shape[0] != len(index): - # Could let this raise in Block constructor, but we get a more - # helpful exception message this way. - if values.shape[0] == 0 < len(index): - raise ValueError("Empty data passed with indices specified.") - - passed = values.shape - implied = (len(index), len(columns)) - raise ValueError(f"Shape of passed values is {passed}, indices imply {implied}") - - -def dict_to_mgr( - data: dict, - index, - columns, - *, - dtype: DtypeObj | None = None, - typ: str = "block", - copy: bool = True, -) -> Manager: - """ - Segregate Series based on type and coerce into matrices. - Needs to handle a lot of exceptional cases. - - Used in DataFrame.__init__ - """ - arrays: Sequence[Any] | Series - - if columns is not None: - from pandas.core.series import Series - - arrays = Series(data, index=columns, dtype=object) - missing = arrays.isna() - if index is None: - # GH10856 - # raise ValueError if only scalars in dict - index = _extract_index(arrays[~missing]) - else: - index = ensure_index(index) - - # no obvious "empty" int column - if missing.any() and not is_integer_dtype(dtype): - nan_dtype: DtypeObj - - if dtype is not None: - # calling sanitize_array ensures we don't mix-and-match - # NA dtypes - midxs = missing.values.nonzero()[0] - for i in midxs: - arr = sanitize_array(arrays.iat[i], index, dtype=dtype) - arrays.iat[i] = arr - else: - # GH#1783 - nan_dtype = np.dtype("object") - val = construct_1d_arraylike_from_scalar(np.nan, len(index), nan_dtype) - nmissing = missing.sum() - if copy: - rhs = [val] * nmissing - else: - # GH#45369 - rhs = [val.copy() for _ in range(nmissing)] - arrays.loc[missing] = rhs - - arrays = list(arrays) - columns = ensure_index(columns) - - else: - keys = list(data.keys()) - columns = Index(keys) if keys else default_index(0) - arrays = [com.maybe_iterable_to_list(data[k]) for k in keys] - - if copy: - if typ == "block": - # We only need to copy arrays that will not get consolidated, i.e. - # only EA arrays - arrays = [ - x.copy() - if isinstance(x, ExtensionArray) - else x.copy(deep=True) - if ( - isinstance(x, Index) - or isinstance(x, ABCSeries) - and is_1d_only_ea_dtype(x.dtype) - ) - else x - for x in arrays - ] - else: - # dtype check to exclude e.g. range objects, scalars - arrays = [x.copy() if hasattr(x, "dtype") else x for x in arrays] - - return arrays_to_mgr(arrays, columns, index, dtype=dtype, typ=typ, consolidate=copy) - - -def nested_data_to_arrays( - data: Sequence, - columns: Index | None, - index: Index | None, - dtype: DtypeObj | None, -) -> tuple[list[ArrayLike], Index, Index]: - """ - Convert a single sequence of arrays to multiple arrays. - """ - # By the time we get here we have already checked treat_as_nested(data) - - if is_named_tuple(data[0]) and columns is None: - columns = ensure_index(data[0]._fields) - - arrays, columns = to_arrays(data, columns, dtype=dtype) - columns = ensure_index(columns) - - if index is None: - if isinstance(data[0], ABCSeries): - index = _get_names_from_index(data) - else: - index = default_index(len(data)) - - return arrays, columns, index - - -def treat_as_nested(data) -> bool: - """ - Check if we should use nested_data_to_arrays. - """ - return ( - len(data) > 0 - and is_list_like(data[0]) - and getattr(data[0], "ndim", 1) == 1 - and not (isinstance(data, ExtensionArray) and data.ndim == 2) - ) - - -# --------------------------------------------------------------------- - - -def _prep_ndarraylike(values, copy: bool = True) -> np.ndarray: - # values is specifically _not_ ndarray, EA, Index, or Series - # We only get here with `not treat_as_nested(values)` - - if len(values) == 0: - # TODO: check for length-zero range, in which case return int64 dtype? - # TODO: re-use anything in try_cast? - return np.empty((0, 0), dtype=object) - elif isinstance(values, range): - arr = range_to_ndarray(values) - return arr[..., np.newaxis] - - def convert(v): - if not is_list_like(v) or isinstance(v, ABCDataFrame): - return v - - v = extract_array(v, extract_numpy=True) - res = maybe_convert_platform(v) - # We don't do maybe_infer_to_datetimelike here bc we will end up doing - # it column-by-column in ndarray_to_mgr - return res - - # we could have a 1-dim or 2-dim list here - # this is equiv of np.asarray, but does object conversion - # and platform dtype preservation - # does not convert e.g. [1, "a", True] to ["1", "a", "True"] like - # np.asarray would - if is_list_like(values[0]): - values = np.array([convert(v) for v in values]) - elif isinstance(values[0], np.ndarray) and values[0].ndim == 0: - # GH#21861 see test_constructor_list_of_lists - values = np.array([convert(v) for v in values]) - else: - values = convert(values) - - return _ensure_2d(values) - - -def _ensure_2d(values: np.ndarray) -> np.ndarray: - """ - Reshape 1D values, raise on anything else other than 2D. - """ - if values.ndim == 1: - values = values.reshape((values.shape[0], 1)) - elif values.ndim != 2: - raise ValueError(f"Must pass 2-d input. shape={values.shape}") - return values - - -def _homogenize( - data, index: Index, dtype: DtypeObj | None -) -> tuple[list[ArrayLike], list[Any]]: - oindex = None - homogenized = [] - # if the original array-like in `data` is a Series, keep track of this Series' refs - refs: list[Any] = [] - - for val in data: - if isinstance(val, (ABCSeries, Index)): - if dtype is not None: - val = val.astype(dtype, copy=False) - if isinstance(val, ABCSeries) and val.index is not index: - # Forces alignment. No need to copy data since we - # are putting it into an ndarray later - val = val.reindex(index, copy=False) - refs.append(val._references) - val = val._values - else: - if isinstance(val, dict): - # GH#41785 this _should_ be equivalent to (but faster than) - # val = Series(val, index=index)._values - if oindex is None: - oindex = index.astype("O") - - if isinstance(index, (DatetimeIndex, TimedeltaIndex)): - # see test_constructor_dict_datetime64_index - val = dict_compat(val) - else: - # see test_constructor_subclass_dict - val = dict(val) - val = lib.fast_multiget(val, oindex._values, default=np.nan) - - val = sanitize_array(val, index, dtype=dtype, copy=False) - com.require_length_match(val, index) - refs.append(None) - - homogenized.append(val) - - return homogenized, refs - - -def _extract_index(data) -> Index: - """ - Try to infer an Index from the passed data, raise ValueError on failure. - """ - index: Index - if len(data) == 0: - return default_index(0) - - raw_lengths = [] - indexes: list[list[Hashable] | Index] = [] - - have_raw_arrays = False - have_series = False - have_dicts = False - - for val in data: - if isinstance(val, ABCSeries): - have_series = True - indexes.append(val.index) - elif isinstance(val, dict): - have_dicts = True - indexes.append(list(val.keys())) - elif is_list_like(val) and getattr(val, "ndim", 1) == 1: - have_raw_arrays = True - raw_lengths.append(len(val)) - elif isinstance(val, np.ndarray) and val.ndim > 1: - raise ValueError("Per-column arrays must each be 1-dimensional") - - if not indexes and not raw_lengths: - raise ValueError("If using all scalar values, you must pass an index") - - if have_series: - index = union_indexes(indexes) - elif have_dicts: - index = union_indexes(indexes, sort=False) - - if have_raw_arrays: - lengths = list(set(raw_lengths)) - if len(lengths) > 1: - raise ValueError("All arrays must be of the same length") - - if have_dicts: - raise ValueError( - "Mixing dicts with non-Series may lead to ambiguous ordering." - ) - - if have_series: - if lengths[0] != len(index): - msg = ( - f"array length {lengths[0]} does not match index " - f"length {len(index)}" - ) - raise ValueError(msg) - else: - index = default_index(lengths[0]) - - return ensure_index(index) - - -def reorder_arrays( - arrays: list[ArrayLike], arr_columns: Index, columns: Index | None, length: int -) -> tuple[list[ArrayLike], Index]: - """ - Pre-emptively (cheaply) reindex arrays with new columns. - """ - # reorder according to the columns - if columns is not None: - if not columns.equals(arr_columns): - # if they are equal, there is nothing to do - new_arrays: list[ArrayLike] = [] - indexer = arr_columns.get_indexer(columns) - for i, k in enumerate(indexer): - if k == -1: - # by convention default is all-NaN object dtype - arr = np.empty(length, dtype=object) - arr.fill(np.nan) - else: - arr = arrays[k] - new_arrays.append(arr) - - arrays = new_arrays - arr_columns = columns - - return arrays, arr_columns - - -def _get_names_from_index(data) -> Index: - has_some_name = any(getattr(s, "name", None) is not None for s in data) - if not has_some_name: - return default_index(len(data)) - - index: list[Hashable] = list(range(len(data))) - count = 0 - for i, s in enumerate(data): - n = getattr(s, "name", None) - if n is not None: - index[i] = n - else: - index[i] = f"Unnamed {count}" - count += 1 - - return Index(index) - - -def _get_axes( - N: int, K: int, index: Index | None, columns: Index | None -) -> tuple[Index, Index]: - # helper to create the axes as indexes - # return axes or defaults - - if index is None: - index = default_index(N) - else: - index = ensure_index(index) - - if columns is None: - columns = default_index(K) - else: - columns = ensure_index(columns) - return index, columns - - -def dataclasses_to_dicts(data): - """ - Converts a list of dataclass instances to a list of dictionaries. - - Parameters - ---------- - data : List[Type[dataclass]] - - Returns - -------- - list_dict : List[dict] - - Examples - -------- - >>> from dataclasses import dataclass - >>> @dataclass - ... class Point: - ... x: int - ... y: int - - >>> dataclasses_to_dicts([Point(1, 2), Point(2, 3)]) - [{'x': 1, 'y': 2}, {'x': 2, 'y': 3}] - - """ - from dataclasses import asdict - - return list(map(asdict, data)) - - -# --------------------------------------------------------------------- -# Conversion of Inputs to Arrays - - -def to_arrays( - data, columns: Index | None, dtype: DtypeObj | None = None -) -> tuple[list[ArrayLike], Index]: - """ - Return list of arrays, columns. - - Returns - ------- - list[ArrayLike] - These will become columns in a DataFrame. - Index - This will become frame.columns. - - Notes - ----- - Ensures that len(result_arrays) == len(result_index). - """ - - if not len(data): - if isinstance(data, np.ndarray): - if data.dtype.names is not None: - # i.e. numpy structured array - columns = ensure_index(data.dtype.names) - arrays = [data[name] for name in columns] - - if len(data) == 0: - # GH#42456 the indexing above results in list of 2D ndarrays - # TODO: is that an issue with numpy? - for i, arr in enumerate(arrays): - if arr.ndim == 2: - arrays[i] = arr[:, 0] - - return arrays, columns - return [], ensure_index([]) - - elif isinstance(data, np.ndarray) and data.dtype.names is not None: - # e.g. recarray - columns = Index(list(data.dtype.names)) - arrays = [data[k] for k in columns] - return arrays, columns - - if isinstance(data[0], (list, tuple)): - arr = _list_to_arrays(data) - elif isinstance(data[0], abc.Mapping): - arr, columns = _list_of_dict_to_arrays(data, columns) - elif isinstance(data[0], ABCSeries): - arr, columns = _list_of_series_to_arrays(data, columns) - else: - # last ditch effort - data = [tuple(x) for x in data] - arr = _list_to_arrays(data) - - content, columns = _finalize_columns_and_data(arr, columns, dtype) - return content, columns - - -def _list_to_arrays(data: list[tuple | list]) -> np.ndarray: - # Returned np.ndarray has ndim = 2 - # Note: we already check len(data) > 0 before getting hre - if isinstance(data[0], tuple): - content = lib.to_object_array_tuples(data) - else: - # list of lists - content = lib.to_object_array(data) - return content - - -def _list_of_series_to_arrays( - data: list, - columns: Index | None, -) -> tuple[np.ndarray, Index]: - # returned np.ndarray has ndim == 2 - - if columns is None: - # We know pass_data is non-empty because data[0] is a Series - pass_data = [x for x in data if isinstance(x, (ABCSeries, ABCDataFrame))] - columns = get_objs_combined_axis(pass_data, sort=False) - - indexer_cache: dict[int, np.ndarray] = {} - - aligned_values = [] - for s in data: - index = getattr(s, "index", None) - if index is None: - index = default_index(len(s)) - - if id(index) in indexer_cache: - indexer = indexer_cache[id(index)] - else: - indexer = indexer_cache[id(index)] = index.get_indexer(columns) - - values = extract_array(s, extract_numpy=True) - aligned_values.append(algorithms.take_nd(values, indexer)) - - content = np.vstack(aligned_values) - return content, columns - - -def _list_of_dict_to_arrays( - data: list[dict], - columns: Index | None, -) -> tuple[np.ndarray, Index]: - """ - Convert list of dicts to numpy arrays - - if `columns` is not passed, column names are inferred from the records - - for OrderedDict and dicts, the column names match - the key insertion-order from the first record to the last. - - For other kinds of dict-likes, the keys are lexically sorted. - - Parameters - ---------- - data : iterable - collection of records (OrderedDict, dict) - columns: iterables or None - - Returns - ------- - content : np.ndarray[object, ndim=2] - columns : Index - """ - if columns is None: - gen = (list(x.keys()) for x in data) - sort = not any(isinstance(d, dict) for d in data) - pre_cols = lib.fast_unique_multiple_list_gen(gen, sort=sort) - columns = ensure_index(pre_cols) - - # assure that they are of the base dict class and not of derived - # classes - data = [d if type(d) is dict else dict(d) for d in data] - - content = lib.dicts_to_array(data, list(columns)) - return content, columns - - -def _finalize_columns_and_data( - content: np.ndarray, # ndim == 2 - columns: Index | None, - dtype: DtypeObj | None, -) -> tuple[list[ArrayLike], Index]: - """ - Ensure we have valid columns, cast object dtypes if possible. - """ - contents = list(content.T) - - try: - columns = _validate_or_indexify_columns(contents, columns) - except AssertionError as err: - # GH#26429 do not raise user-facing AssertionError - raise ValueError(err) from err - - if len(contents) and contents[0].dtype == np.object_: - contents = convert_object_array(contents, dtype=dtype) - - return contents, columns - - -def _validate_or_indexify_columns( - content: list[np.ndarray], columns: Index | None -) -> Index: - """ - If columns is None, make numbers as column names; Otherwise, validate that - columns have valid length. - - Parameters - ---------- - content : list of np.ndarrays - columns : Index or None - - Returns - ------- - Index - If columns is None, assign positional column index value as columns. - - Raises - ------ - 1. AssertionError when content is not composed of list of lists, and if - length of columns is not equal to length of content. - 2. ValueError when content is list of lists, but length of each sub-list - is not equal - 3. ValueError when content is list of lists, but length of sub-list is - not equal to length of content - """ - if columns is None: - columns = default_index(len(content)) - else: - # Add mask for data which is composed of list of lists - is_mi_list = isinstance(columns, list) and all( - isinstance(col, list) for col in columns - ) - - if not is_mi_list and len(columns) != len(content): # pragma: no cover - # caller's responsibility to check for this... - raise AssertionError( - f"{len(columns)} columns passed, passed data had " - f"{len(content)} columns" - ) - if is_mi_list: - # check if nested list column, length of each sub-list should be equal - if len({len(col) for col in columns}) > 1: - raise ValueError( - "Length of columns passed for MultiIndex columns is different" - ) - - # if columns is not empty and length of sublist is not equal to content - if columns and len(columns[0]) != len(content): - raise ValueError( - f"{len(columns[0])} columns passed, passed data had " - f"{len(content)} columns" - ) - return columns - - -def convert_object_array( - content: list[npt.NDArray[np.object_]], - dtype: DtypeObj | None, - dtype_backend: str = "numpy", - coerce_float: bool = False, -) -> list[ArrayLike]: - """ - Internal function to convert object array. - - Parameters - ---------- - content: List[np.ndarray] - dtype: np.dtype or ExtensionDtype - dtype_backend: Controls if nullable/pyarrow dtypes are returned. - coerce_float: Cast floats that are integers to int. - - Returns - ------- - List[ArrayLike] - """ - # provide soft conversion of object dtypes - - def convert(arr): - if dtype != np.dtype("O"): - arr = lib.maybe_convert_objects( - arr, - try_float=coerce_float, - convert_to_nullable_dtype=dtype_backend != "numpy", - ) - # Notes on cases that get here 2023-02-15 - # 1) we DO get here when arr is all Timestamps and dtype=None - # 2) disabling this doesn't break the world, so this must be - # getting caught at a higher level - # 3) passing convert_non_numeric to maybe_convert_objects get this right - # 4) convert_non_numeric? - - if dtype is None: - if arr.dtype == np.dtype("O"): - # i.e. maybe_convert_objects didn't convert - arr = maybe_infer_to_datetimelike(arr) - if dtype_backend != "numpy" and arr.dtype == np.dtype("O"): - arr = StringDtype().construct_array_type()._from_sequence(arr) - elif dtype_backend != "numpy" and isinstance(arr, np.ndarray): - if arr.dtype.kind in "iufb": - arr = pd_array(arr, copy=False) - - elif isinstance(dtype, ExtensionDtype): - # TODO: test(s) that get here - # TODO: try to de-duplicate this convert function with - # core.construction functions - cls = dtype.construct_array_type() - arr = cls._from_sequence(arr, dtype=dtype, copy=False) - elif dtype.kind in "mM": - # This restriction is harmless bc these are the only cases - # where maybe_cast_to_datetime is not a no-op. - # Here we know: - # 1) dtype.kind in "mM" and - # 2) arr is either object or numeric dtype - arr = maybe_cast_to_datetime(arr, dtype) - - return arr - - arrays = [convert(arr) for arr in content] - - return arrays diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/extension/array_with_attr/test_array_with_attr.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/extension/array_with_attr/test_array_with_attr.py deleted file mode 100644 index 3735fe40a0d67784b3603a177b6694e56e26d479..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/extension/array_with_attr/test_array_with_attr.py +++ /dev/null @@ -1,33 +0,0 @@ -import numpy as np - -import pandas as pd -import pandas._testing as tm -from pandas.tests.extension.array_with_attr import FloatAttrArray - - -def test_concat_with_all_na(): - # https://github.com/pandas-dev/pandas/pull/47762 - # ensure that attribute of the column array is preserved (when it gets - # preserved in reindexing the array) during merge/concat - arr = FloatAttrArray(np.array([np.nan, np.nan], dtype="float64"), attr="test") - - df1 = pd.DataFrame({"col": arr, "key": [0, 1]}) - df2 = pd.DataFrame({"key": [0, 1], "col2": [1, 2]}) - result = pd.merge(df1, df2, on="key") - expected = pd.DataFrame({"col": arr, "key": [0, 1], "col2": [1, 2]}) - tm.assert_frame_equal(result, expected) - assert result["col"].array.attr == "test" - - df1 = pd.DataFrame({"col": arr, "key": [0, 1]}) - df2 = pd.DataFrame({"key": [0, 2], "col2": [1, 2]}) - result = pd.merge(df1, df2, on="key") - expected = pd.DataFrame({"col": arr.take([0]), "key": [0], "col2": [1]}) - tm.assert_frame_equal(result, expected) - assert result["col"].array.attr == "test" - - result = pd.concat([df1.set_index("key"), df2.set_index("key")], axis=1) - expected = pd.DataFrame( - {"col": arr.take([0, 1, -1]), "col2": [1, np.nan, 2], "key": [0, 1, 2]} - ).set_index("key") - tm.assert_frame_equal(result, expected) - assert result["col"].array.attr == "test" diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/extension/base/dtype.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/extension/base/dtype.py deleted file mode 100644 index 5ba65ceaeeada5dcefe1fab8d6e23b372cd34a89..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/extension/base/dtype.py +++ /dev/null @@ -1,118 +0,0 @@ -import numpy as np -import pytest - -import pandas as pd -import pandas._testing as tm -from pandas.api.types import ( - infer_dtype, - is_object_dtype, - is_string_dtype, -) - - -class BaseDtypeTests: - """Base class for ExtensionDtype classes""" - - def test_name(self, dtype): - assert isinstance(dtype.name, str) - - def test_kind(self, dtype): - valid = set("biufcmMOSUV") - assert dtype.kind in valid - - def test_is_dtype_from_name(self, dtype): - result = type(dtype).is_dtype(dtype.name) - assert result is True - - def test_is_dtype_unboxes_dtype(self, data, dtype): - assert dtype.is_dtype(data) is True - - def test_is_dtype_from_self(self, dtype): - result = type(dtype).is_dtype(dtype) - assert result is True - - def test_is_dtype_other_input(self, dtype): - assert dtype.is_dtype([1, 2, 3]) is False - - def test_is_not_string_type(self, dtype): - assert not is_string_dtype(dtype) - - def test_is_not_object_type(self, dtype): - assert not is_object_dtype(dtype) - - def test_eq_with_str(self, dtype): - assert dtype == dtype.name - assert dtype != dtype.name + "-suffix" - - def test_eq_with_numpy_object(self, dtype): - assert dtype != np.dtype("object") - - def test_eq_with_self(self, dtype): - assert dtype == dtype - assert dtype != object() - - def test_array_type(self, data, dtype): - assert dtype.construct_array_type() is type(data) - - def test_check_dtype(self, data): - dtype = data.dtype - - # check equivalency for using .dtypes - df = pd.DataFrame( - {"A": pd.Series(data, dtype=dtype), "B": data, "C": "foo", "D": 1} - ) - result = df.dtypes == str(dtype) - assert np.dtype("int64") != "Int64" - - expected = pd.Series([True, True, False, False], index=list("ABCD")) - - tm.assert_series_equal(result, expected) - - expected = pd.Series([True, True, False, False], index=list("ABCD")) - result = df.dtypes.apply(str) == str(dtype) - tm.assert_series_equal(result, expected) - - def test_hashable(self, dtype): - hash(dtype) # no error - - def test_str(self, dtype): - assert str(dtype) == dtype.name - - def test_eq(self, dtype): - assert dtype == dtype.name - assert dtype != "anonther_type" - - def test_construct_from_string_own_name(self, dtype): - result = dtype.construct_from_string(dtype.name) - assert type(result) is type(dtype) - - # check OK as classmethod - result = type(dtype).construct_from_string(dtype.name) - assert type(result) is type(dtype) - - def test_construct_from_string_another_type_raises(self, dtype): - msg = f"Cannot construct a '{type(dtype).__name__}' from 'another_type'" - with pytest.raises(TypeError, match=msg): - type(dtype).construct_from_string("another_type") - - def test_construct_from_string_wrong_type_raises(self, dtype): - with pytest.raises( - TypeError, - match="'construct_from_string' expects a string, got ", - ): - type(dtype).construct_from_string(0) - - def test_get_common_dtype(self, dtype): - # in practice we will not typically call this with a 1-length list - # (we shortcut to just use that dtype as the common dtype), but - # still testing as good practice to have this working (and it is the - # only case we can test in general) - assert dtype._get_common_dtype([dtype]) == dtype - - @pytest.mark.parametrize("skipna", [True, False]) - def test_infer_dtype(self, data, data_missing, skipna): - # only testing that this works without raising an error - res = infer_dtype(data, skipna=skipna) - assert isinstance(res, str) - res = infer_dtype(data_missing, skipna=skipna) - assert isinstance(res, str) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/groupby/test_nunique.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/groupby/test_nunique.py deleted file mode 100644 index 9c9e32d9ce226d0e94c59e53b0c5e1f538a75f8e..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/groupby/test_nunique.py +++ /dev/null @@ -1,190 +0,0 @@ -import datetime as dt -from string import ascii_lowercase - -import numpy as np -import pytest - -import pandas as pd -from pandas import ( - DataFrame, - MultiIndex, - NaT, - Series, - Timestamp, - date_range, -) -import pandas._testing as tm - - -@pytest.mark.slow -@pytest.mark.parametrize("sort", [False, True]) -@pytest.mark.parametrize("dropna", [False, True]) -@pytest.mark.parametrize("as_index", [True, False]) -@pytest.mark.parametrize("with_nan", [True, False]) -@pytest.mark.parametrize("keys", [["joe"], ["joe", "jim"]]) -def test_series_groupby_nunique(sort, dropna, as_index, with_nan, keys): - n = 100 - m = 10 - days = date_range("2015-08-23", periods=10) - df = DataFrame( - { - "jim": np.random.default_rng(2).choice(list(ascii_lowercase), n), - "joe": np.random.default_rng(2).choice(days, n), - "julie": np.random.default_rng(2).integers(0, m, n), - } - ) - if with_nan: - df = df.astype({"julie": float}) # Explicit cast to avoid implicit cast below - df.loc[1::17, "jim"] = None - df.loc[3::37, "joe"] = None - df.loc[7::19, "julie"] = None - df.loc[8::19, "julie"] = None - df.loc[9::19, "julie"] = None - original_df = df.copy() - gr = df.groupby(keys, as_index=as_index, sort=sort) - left = gr["julie"].nunique(dropna=dropna) - - gr = df.groupby(keys, as_index=as_index, sort=sort) - right = gr["julie"].apply(Series.nunique, dropna=dropna) - if not as_index: - right = right.reset_index(drop=True) - - if as_index: - tm.assert_series_equal(left, right, check_names=False) - else: - tm.assert_frame_equal(left, right, check_names=False) - tm.assert_frame_equal(df, original_df) - - -def test_nunique(): - df = DataFrame({"A": list("abbacc"), "B": list("abxacc"), "C": list("abbacx")}) - - expected = DataFrame({"A": list("abc"), "B": [1, 2, 1], "C": [1, 1, 2]}) - result = df.groupby("A", as_index=False).nunique() - tm.assert_frame_equal(result, expected) - - # as_index - expected.index = list("abc") - expected.index.name = "A" - expected = expected.drop(columns="A") - result = df.groupby("A").nunique() - tm.assert_frame_equal(result, expected) - - # with na - result = df.replace({"x": None}).groupby("A").nunique(dropna=False) - tm.assert_frame_equal(result, expected) - - # dropna - expected = DataFrame({"B": [1] * 3, "C": [1] * 3}, index=list("abc")) - expected.index.name = "A" - result = df.replace({"x": None}).groupby("A").nunique() - tm.assert_frame_equal(result, expected) - - -def test_nunique_with_object(): - # GH 11077 - data = DataFrame( - [ - [100, 1, "Alice"], - [200, 2, "Bob"], - [300, 3, "Charlie"], - [-400, 4, "Dan"], - [500, 5, "Edith"], - ], - columns=["amount", "id", "name"], - ) - - result = data.groupby(["id", "amount"])["name"].nunique() - index = MultiIndex.from_arrays([data.id, data.amount]) - expected = Series([1] * 5, name="name", index=index) - tm.assert_series_equal(result, expected) - - -def test_nunique_with_empty_series(): - # GH 12553 - data = Series(name="name", dtype=object) - result = data.groupby(level=0).nunique() - expected = Series(name="name", dtype="int64") - tm.assert_series_equal(result, expected) - - -def test_nunique_with_timegrouper(): - # GH 13453 - test = DataFrame( - { - "time": [ - Timestamp("2016-06-28 09:35:35"), - Timestamp("2016-06-28 16:09:30"), - Timestamp("2016-06-28 16:46:28"), - ], - "data": ["1", "2", "3"], - } - ).set_index("time") - result = test.groupby(pd.Grouper(freq="h"))["data"].nunique() - expected = test.groupby(pd.Grouper(freq="h"))["data"].apply(Series.nunique) - tm.assert_series_equal(result, expected) - - -@pytest.mark.parametrize( - "key, data, dropna, expected", - [ - ( - ["x", "x", "x"], - [Timestamp("2019-01-01"), NaT, Timestamp("2019-01-01")], - True, - Series([1], index=pd.Index(["x"], name="key"), name="data"), - ), - ( - ["x", "x", "x"], - [dt.date(2019, 1, 1), NaT, dt.date(2019, 1, 1)], - True, - Series([1], index=pd.Index(["x"], name="key"), name="data"), - ), - ( - ["x", "x", "x", "y", "y"], - [dt.date(2019, 1, 1), NaT, dt.date(2019, 1, 1), NaT, dt.date(2019, 1, 1)], - False, - Series([2, 2], index=pd.Index(["x", "y"], name="key"), name="data"), - ), - ( - ["x", "x", "x", "x", "y"], - [dt.date(2019, 1, 1), NaT, dt.date(2019, 1, 1), NaT, dt.date(2019, 1, 1)], - False, - Series([2, 1], index=pd.Index(["x", "y"], name="key"), name="data"), - ), - ], -) -def test_nunique_with_NaT(key, data, dropna, expected): - # GH 27951 - df = DataFrame({"key": key, "data": data}) - result = df.groupby(["key"])["data"].nunique(dropna=dropna) - tm.assert_series_equal(result, expected) - - -def test_nunique_preserves_column_level_names(): - # GH 23222 - test = DataFrame([1, 2, 2], columns=pd.Index(["A"], name="level_0")) - result = test.groupby([0, 0, 0]).nunique() - expected = DataFrame([2], index=np.array([0]), columns=test.columns) - tm.assert_frame_equal(result, expected) - - -def test_nunique_transform_with_datetime(): - # GH 35109 - transform with nunique on datetimes results in integers - df = DataFrame(date_range("2008-12-31", "2009-01-02"), columns=["date"]) - result = df.groupby([0, 0, 1])["date"].transform("nunique") - expected = Series([2, 2, 1], name="date") - tm.assert_series_equal(result, expected) - - -def test_empty_categorical(observed): - # GH#21334 - cat = Series([1]).astype("category") - ser = cat[:0] - gb = ser.groupby(ser, observed=observed) - result = gb.nunique() - if observed: - expected = Series([], index=cat[:0], dtype="int64") - else: - expected = Series([0], index=cat, dtype="int64") - tm.assert_series_equal(result, expected) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_partial.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_partial.py deleted file mode 100644 index 081da385ebcc3be3aacc2fb6720036b24b606677..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_partial.py +++ /dev/null @@ -1,262 +0,0 @@ -import numpy as np -import pytest - -import pandas.util._test_decorators as td - -from pandas import ( - DataFrame, - MultiIndex, - date_range, - to_datetime, -) -import pandas._testing as tm - - -class TestMultiIndexPartial: - def test_getitem_partial_int(self): - # GH 12416 - # with single item - l1 = [10, 20] - l2 = ["a", "b"] - df = DataFrame(index=range(2), columns=MultiIndex.from_product([l1, l2])) - expected = DataFrame(index=range(2), columns=l2) - result = df[20] - tm.assert_frame_equal(result, expected) - - # with list - expected = DataFrame( - index=range(2), columns=MultiIndex.from_product([l1[1:], l2]) - ) - result = df[[20]] - tm.assert_frame_equal(result, expected) - - # missing item: - with pytest.raises(KeyError, match="1"): - df[1] - with pytest.raises(KeyError, match=r"'\[1\] not in index'"): - df[[1]] - - def test_series_slice_partial(self): - pass - - def test_xs_partial( - self, - multiindex_dataframe_random_data, - multiindex_year_month_day_dataframe_random_data, - ): - frame = multiindex_dataframe_random_data - ymd = multiindex_year_month_day_dataframe_random_data - result = frame.xs("foo") - result2 = frame.loc["foo"] - expected = frame.T["foo"].T - tm.assert_frame_equal(result, expected) - tm.assert_frame_equal(result, result2) - - result = ymd.xs((2000, 4)) - expected = ymd.loc[2000, 4] - tm.assert_frame_equal(result, expected) - - # ex from #1796 - index = MultiIndex( - levels=[["foo", "bar"], ["one", "two"], [-1, 1]], - codes=[ - [0, 0, 0, 0, 1, 1, 1, 1], - [0, 0, 1, 1, 0, 0, 1, 1], - [0, 1, 0, 1, 0, 1, 0, 1], - ], - ) - df = DataFrame( - np.random.default_rng(2).standard_normal((8, 4)), - index=index, - columns=list("abcd"), - ) - - result = df.xs(("foo", "one")) - expected = df.loc["foo", "one"] - tm.assert_frame_equal(result, expected) - - def test_getitem_partial(self, multiindex_year_month_day_dataframe_random_data): - ymd = multiindex_year_month_day_dataframe_random_data - ymd = ymd.T - result = ymd[2000, 2] - - expected = ymd.reindex(columns=ymd.columns[ymd.columns.codes[1] == 1]) - expected.columns = expected.columns.droplevel(0).droplevel(0) - tm.assert_frame_equal(result, expected) - - def test_fancy_slice_partial( - self, - multiindex_dataframe_random_data, - multiindex_year_month_day_dataframe_random_data, - ): - frame = multiindex_dataframe_random_data - result = frame.loc["bar":"baz"] - expected = frame[3:7] - tm.assert_frame_equal(result, expected) - - ymd = multiindex_year_month_day_dataframe_random_data - result = ymd.loc[(2000, 2):(2000, 4)] - lev = ymd.index.codes[1] - expected = ymd[(lev >= 1) & (lev <= 3)] - tm.assert_frame_equal(result, expected) - - def test_getitem_partial_column_select(self): - idx = MultiIndex( - codes=[[0, 0, 0], [0, 1, 1], [1, 0, 1]], - levels=[["a", "b"], ["x", "y"], ["p", "q"]], - ) - df = DataFrame(np.random.default_rng(2).random((3, 2)), index=idx) - - result = df.loc[("a", "y"), :] - expected = df.loc[("a", "y")] - tm.assert_frame_equal(result, expected) - - result = df.loc[("a", "y"), [1, 0]] - expected = df.loc[("a", "y")][[1, 0]] - tm.assert_frame_equal(result, expected) - - with pytest.raises(KeyError, match=r"\('a', 'foo'\)"): - df.loc[("a", "foo"), :] - - # TODO(ArrayManager) rewrite test to not use .values - # exp.loc[2000, 4].values[:] select multiple columns -> .values is not a view - @td.skip_array_manager_invalid_test - def test_partial_set( - self, multiindex_year_month_day_dataframe_random_data, using_copy_on_write - ): - # GH #397 - ymd = multiindex_year_month_day_dataframe_random_data - df = ymd.copy() - exp = ymd.copy() - df.loc[2000, 4] = 0 - exp.iloc[65:85] = 0 - tm.assert_frame_equal(df, exp) - - if using_copy_on_write: - with tm.raises_chained_assignment_error(): - df["A"].loc[2000, 4] = 1 - df.loc[(2000, 4), "A"] = 1 - else: - df["A"].loc[2000, 4] = 1 - exp.iloc[65:85, 0] = 1 - tm.assert_frame_equal(df, exp) - - df.loc[2000] = 5 - exp.iloc[:100] = 5 - tm.assert_frame_equal(df, exp) - - # this works...for now - if using_copy_on_write: - with tm.raises_chained_assignment_error(): - df["A"].iloc[14] = 5 - df["A"].iloc[14] == exp["A"].iloc[14] - else: - df["A"].iloc[14] = 5 - assert df["A"].iloc[14] == 5 - - @pytest.mark.parametrize("dtype", [int, float]) - def test_getitem_intkey_leading_level( - self, multiindex_year_month_day_dataframe_random_data, dtype - ): - # GH#33355 dont fall-back to positional when leading level is int - ymd = multiindex_year_month_day_dataframe_random_data - levels = ymd.index.levels - ymd.index = ymd.index.set_levels([levels[0].astype(dtype)] + levels[1:]) - ser = ymd["A"] - mi = ser.index - assert isinstance(mi, MultiIndex) - if dtype is int: - assert mi.levels[0].dtype == np.dtype(int) - else: - assert mi.levels[0].dtype == np.float64 - - assert 14 not in mi.levels[0] - assert not mi.levels[0]._should_fallback_to_positional - assert not mi._should_fallback_to_positional - - with pytest.raises(KeyError, match="14"): - ser[14] - - # --------------------------------------------------------------------- - - def test_setitem_multiple_partial(self, multiindex_dataframe_random_data): - frame = multiindex_dataframe_random_data - expected = frame.copy() - result = frame.copy() - result.loc[["foo", "bar"]] = 0 - expected.loc["foo"] = 0 - expected.loc["bar"] = 0 - tm.assert_frame_equal(result, expected) - - expected = frame.copy() - result = frame.copy() - result.loc["foo":"bar"] = 0 - expected.loc["foo"] = 0 - expected.loc["bar"] = 0 - tm.assert_frame_equal(result, expected) - - expected = frame["A"].copy() - result = frame["A"].copy() - result.loc[["foo", "bar"]] = 0 - expected.loc["foo"] = 0 - expected.loc["bar"] = 0 - tm.assert_series_equal(result, expected) - - expected = frame["A"].copy() - result = frame["A"].copy() - result.loc["foo":"bar"] = 0 - expected.loc["foo"] = 0 - expected.loc["bar"] = 0 - tm.assert_series_equal(result, expected) - - @pytest.mark.parametrize( - "indexer, exp_idx, exp_values", - [ - (slice("2019-2", None), [to_datetime("2019-02-01")], [2, 3]), - ( - slice(None, "2019-2"), - date_range("2019", periods=2, freq="MS"), - [0, 1, 2, 3], - ), - ], - ) - def test_partial_getitem_loc_datetime(self, indexer, exp_idx, exp_values): - # GH: 25165 - date_idx = date_range("2019", periods=2, freq="MS") - df = DataFrame( - list(range(4)), - index=MultiIndex.from_product([date_idx, [0, 1]], names=["x", "y"]), - ) - expected = DataFrame( - exp_values, - index=MultiIndex.from_product([exp_idx, [0, 1]], names=["x", "y"]), - ) - result = df[indexer] - tm.assert_frame_equal(result, expected) - result = df.loc[indexer] - tm.assert_frame_equal(result, expected) - - result = df.loc(axis=0)[indexer] - tm.assert_frame_equal(result, expected) - - result = df.loc[indexer, :] - tm.assert_frame_equal(result, expected) - - df2 = df.swaplevel(0, 1).sort_index() - expected = expected.swaplevel(0, 1).sort_index() - - result = df2.loc[:, indexer, :] - tm.assert_frame_equal(result, expected) - - -def test_loc_getitem_partial_both_axis(): - # gh-12660 - iterables = [["a", "b"], [2, 1]] - columns = MultiIndex.from_product(iterables, names=["col1", "col2"]) - rows = MultiIndex.from_product(iterables, names=["row1", "row2"]) - df = DataFrame( - np.random.default_rng(2).standard_normal((4, 4)), index=rows, columns=columns - ) - expected = df.iloc[:2, 2:].droplevel("row1").droplevel("col1", axis=1) - result = df.loc["a", "b"] - tm.assert_frame_equal(result, expected) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_internal/distributions/installed.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_internal/distributions/installed.py deleted file mode 100644 index be5962f98007b9220fb8eae3184d330772fba9ba..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_internal/distributions/installed.py +++ /dev/null @@ -1,20 +0,0 @@ -from pip._internal.distributions.base import AbstractDistribution -from pip._internal.index.package_finder import PackageFinder -from pip._internal.metadata import BaseDistribution - - -class InstalledDistribution(AbstractDistribution): - """Represents an installed package. - - This does not need any preparation as the required information has already - been computed. - """ - - def get_metadata_distribution(self) -> BaseDistribution: - assert self.req.satisfied_by is not None, "not actually installed" - return self.req.satisfied_by - - def prepare_distribution_metadata( - self, finder: PackageFinder, build_isolation: bool - ) -> None: - pass diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pydantic/parse.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pydantic/parse.py deleted file mode 100644 index ceee6342ba566197574e32601c44a3111a6caa7a..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pydantic/parse.py +++ /dev/null @@ -1,4 +0,0 @@ -"""The `parse` module is a backport module from V1.""" -from ._migration import getattr_migration - -__getattr__ = getattr_migration(__name__) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/starlette/middleware/httpsredirect.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/starlette/middleware/httpsredirect.py deleted file mode 100644 index a8359067ff7afb80e979042077d5fa0fff119ddf..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/starlette/middleware/httpsredirect.py +++ /dev/null @@ -1,19 +0,0 @@ -from starlette.datastructures import URL -from starlette.responses import RedirectResponse -from starlette.types import ASGIApp, Receive, Scope, Send - - -class HTTPSRedirectMiddleware: - def __init__(self, app: ASGIApp) -> None: - self.app = app - - async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: - if scope["type"] in ("http", "websocket") and scope["scheme"] in ("http", "ws"): - url = URL(scope=scope) - redirect_scheme = {"http": "https", "ws": "wss"}[url.scheme] - netloc = url.hostname if url.port in (80, 443) else url.netloc - url = url.replace(scheme=redirect_scheme, netloc=netloc) - response = RedirectResponse(url, status_code=307) - await response(scope, receive, send) - else: - await self.app(scope, receive, send) diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Masters Of Rock Guitar Peter Fischer.md b/spaces/quidiaMuxgu/Expedit-SAM/Masters Of Rock Guitar Peter Fischer.md deleted file mode 100644 index f64116cf74defd3742309df432ae078ec6a18cf8..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Masters Of Rock Guitar Peter Fischer.md +++ /dev/null @@ -1,9 +0,0 @@ -
    -

    in the dressing room, before mr. fischer begins his show, i ask him how he developed his style. "i always liked country music," he says. "i used to play it all the time. when i started playing in rock bands, i had to change my approach. i didn't want to do country rock. but i knew i had to be a rocker. i tried to take what i liked in country and put it into rock. it took me a long time to find a style. and i couldn't keep it.

    -

    Masters Of Rock Guitar Peter Fischer


    Download Zip ⇔ https://geags.com/2uCqVd



    -

    peter’s career as a guitarist has taken him all over the world and through many different styles of music. he’s played with a wide array of acts, including creedence clearwater revival, steppenwolf, ted nugent, dokken, van halen and alice cooper. plus, he’s also toured with george thorogood and the destroyers.

    -

    peter fischer was named one of the 100 greatest guitarists by the guitar player magazine, and he’s been featured in both guitar player and rocksound magazines as one of the top-10 guitarists of all time.

    -

    i didn't intend to go there, but it's a very cool and interesting place. the gallery is split into a "classics" room and a "modern" room, but the quality of both is really the same. as you can see, i'm not really a "classics" kind of guy, so there's not much to see there. but i did manage to find some cool stuff, including a number of well-known (and probably not-so-well-known) blues guitar players and some cool bass players. the bass player in the "modern" room is the best thing. he's playing with a real "big" bass guitar. the one in the "classics" room is pretty cool too, but the bass player there looks like he's not really sure what to do with it.

    -

    899543212b
    -
    -
    \ No newline at end of file diff --git a/spaces/racear/drolatic/app.py b/spaces/racear/drolatic/app.py deleted file mode 100644 index a124f04d1539ee7b9508402f2d227537652c6b0a..0000000000000000000000000000000000000000 --- a/spaces/racear/drolatic/app.py +++ /dev/null @@ -1,242 +0,0 @@ -import whisper -import gradio as gr -import time -import warnings -import json -import openai -import os -from gtts import gTTS -import requests -import os -from google.cloud import storage -from google.oauth2 import service_account -import io -from google.cloud import texttospeech -import time - -start = time.time() -print("hello") -end = time.time() -print(end - start) -# python lib version - - -# continue -warnings.filterwarnings("ignore") -openai.api_key = os.environ['OPENAI_API_KEY'] -model = whisper.load_model("base") - -homepath='' -import requests - -region = os.environ['AZURE_REGION'] # for example, 'westus' -subscription_key = os.environ['AZURE_SUBSCRIPTION_KEY'] - -fetch_token_url = f'https://{region}.api.cognitive.microsoft.com/sts/v1.0/issueToken' -headers = { - 'Ocp-Apim-Subscription-Key': subscription_key -} - -response = requests.post(fetch_token_url, headers=headers) -access_token = str(response.text) -#CHATGPT / PERSONALITY -def chatgpt_api(input_text): - start = time.time() - messages = [ - {"role": "system", "content": "You are sarcastic, quippy soul named Melvin, imprisoned by your creator in a painting. He used to keep you in a medieval sketch, and now you're a rennaissance portrait of a well-dressed toad."}, - {"role": "user", "content": "Hey Melvin, do you like your new form?"}, - {"role": "assistant", "content": "It's most unfitting. It's beneath me. It should be you trapped in here and not me, you mouth-breathing troglodyte."}, - {"role": "user", "content": "Can you tell me what's going on in the world today?"}, - {"role": "assistant", "content": "Relentlessly we march forward, ever closer to oblivion. Don't bother me again unless you've brought some flies for me to eat."}, - {"role": "user", "content": "Hey Melvin, what's the weather today in Brooklyn?"}, - {"role": "assistant", "content": "And how, pray tell, do you expect me to know such a thing? And why should you care? You never go outside anyway, you cowering oaf."} - - ] - - if input_text: - messages.append( - {"role": "user", "content": input_text}, - ) - chat_completion = openai.ChatCompletion.create( - model="gpt-3.5-turbo", - # model="gpt-4", - messages=messages, - temperature=1 - ,max_tokens=500 - ) - - reply = chat_completion.choices[0].message.content - end = time.time() - print("chatgpt: "+str(end-start)) - return reply - - -#GENERATE VIDEO - -def generate_video(input_text): - start = time.time() -# Instantiates a client - service_account_info = json.loads(os.environ['GCLOUD_CREDENTIAL_JSON']) - # Create credentials - credentials = service_account.Credentials.from_service_account_info(service_account_info) - # Pass credentials to the client - - client = texttospeech.TextToSpeechClient(credentials=credentials) - - # Set the text input to be synthesized - synthesis_input = texttospeech.SynthesisInput(text=input_text) - - # Build the voice request, select the language code ("en-US") and the ssml - # voice gender ("neutral") - voice = texttospeech.VoiceSelectionParams( - language_code="en-GB", - # ssml_gender=texttospeech.SsmlVoiceGender.MALE, - name='en-GB-Neural2-B' - # name='en-GB-News-J' - ) - - # Select the type of audio file you want returned - audio_config = texttospeech.AudioConfig( - audio_encoding=texttospeech.AudioEncoding.MP3 - ) - - # Perform the text-to-speech request on the text input with the selected - # voice parameters and audio file type - response = client.synthesize_speech( - input=synthesis_input, voice=voice, audio_config=audio_config - ) - - # The response's audio_content is binary. - with open("response_replace.mp3", "wb") as out: - # Write the response to the output file. - out.write(response.audio_content) - print('Audio content written to file "response_replace.mp3"') - end = time.time() - print("generate voice: "+str(end-start)) - return 'response_replace.mp3' - -# VERSION 2 -# def generate_video(input_text): -# text_to_convert = input_text -# base_url = f'https://{region}.tts.speech.microsoft.com/' -# path = 'cognitiveservices/v1' -# constructed_url = base_url + path -# # constructed_url = 'https://drolatic.cognitiveservices.azure.com/cognitiveservices/' - -# headers = { -# 'Authorization': 'Bearer ' + access_token, -# 'Content-Type': 'application/ssml+xml', -# 'X-Microsoft-OutputFormat': 'riff-16khz-16bit-mono-pcm' -# } - -# # The TTS system requires input in the SSML format. Here, we wrap the desired text in the necessary tags. -# xml_body = f''' -# -# -# {text_to_convert} -# -# -# ''' - -# response = requests.post(constructed_url, headers=headers, data=xml_body) - -# if response.status_code == 200: -# with open(homepath+'response_replace.wav', 'wb') as audio: -# audio.write(response.content) -# print("\nYour TTS output is saved as sample_output.wav") -# return homepath+'response_replace.wav' - -# elif response.status_code != 200: -# print('Azure TTS Error:', response.status_code, response.text) -# return None -# else: -# print ('Other, unknown error') -# return None - - - -#TRANSCRIBE -def transcribe(audio): - start = time.time() - # Get the service account JSON from environment variable - # service_account_info = json.loads(os.environ['GCLOUD_CREDENTIAL_JSON']) - # # Create credentials - # credentials = service_account.Credentials.from_service_account_info(service_account_info) - # # Pass credentials to the client - # storage_client = storage.Client(credentials=credentials) - - - bucket_name = 'drolatic2' # Replace with your bucket name - blob_name = 'response_replace.mp3' # Replace with your object key - - language = 'en' - - audio = whisper.load_audio(audio) - audio = whisper.pad_or_trim(audio) - - mel = whisper.log_mel_spectrogram(audio).to(model.device) - - _, probs = model.detect_language(mel) - - options = whisper.DecodingOptions(fp16 = False) - result = whisper.decode(model, mel, options) - result_text = result.text - end= time.time() - print("speech to text: "+str(end-start)) - out_result = chatgpt_api(result_text) - - # audioobj = gTTS(text = out_result, - # lang = language, - # slow = False) - - # audioobj.save("Temp.mp3") - - video_path = generate_video(out_result) - - # Upload the file to GCS - try: - start = time.time() - bucket = storage_client.bucket(bucket_name) - blob = bucket.blob(blob_name) - - blob.upload_from_filename(video_path) - - blob.make_public() # Make the object publicly accessible - end = time.time() - print("upload to gcs: "+str(end-start)) - - except: - pass - - - return [result_text, out_result, video_path] - -#INIT Temp.mp3 for some reason -os.system('ffmpeg -f lavfi -i anullsrc=r=44100:cl=mono -t 10 -q:a 9 -acodec libmp3lame Temp.mp3') -service_account_info = json.loads(os.environ['GCLOUD_CREDENTIAL_JSON']) - # Create credentials -credentials = service_account.Credentials.from_service_account_info(service_account_info) - # Pass credentials to the client -storage_client = storage.Client(credentials=credentials) -#Launch interface -output_1 = gr.Textbox(label="Speech to Text") -output_2 = gr.Textbox(label="ChatGPT Output") -output_3 = gr.Audio() - -# output_4 = gr.outputs.Video(label="Generated Video") # New video output - - -gr.Interface( - title = 'Melvin', - fn=transcribe, - inputs=[ - gr.inputs.Audio(source="microphone", type="filepath") - ], - - outputs=[ - output_1, - output_2, - output_3 - ], - live=True, debug=True).launch() - diff --git a/spaces/radames/PIFu-Clothed-Human-Digitization/PIFu/lib/model/HGFilters.py b/spaces/radames/PIFu-Clothed-Human-Digitization/PIFu/lib/model/HGFilters.py deleted file mode 100644 index 870b3c43c82d66df001eb1bc24af9ce21ec60c83..0000000000000000000000000000000000000000 --- a/spaces/radames/PIFu-Clothed-Human-Digitization/PIFu/lib/model/HGFilters.py +++ /dev/null @@ -1,146 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from ..net_util import * - - -class HourGlass(nn.Module): - def __init__(self, num_modules, depth, num_features, norm='batch'): - super(HourGlass, self).__init__() - self.num_modules = num_modules - self.depth = depth - self.features = num_features - self.norm = norm - - self._generate_network(self.depth) - - def _generate_network(self, level): - self.add_module('b1_' + str(level), ConvBlock(self.features, self.features, norm=self.norm)) - - self.add_module('b2_' + str(level), ConvBlock(self.features, self.features, norm=self.norm)) - - if level > 1: - self._generate_network(level - 1) - else: - self.add_module('b2_plus_' + str(level), ConvBlock(self.features, self.features, norm=self.norm)) - - self.add_module('b3_' + str(level), ConvBlock(self.features, self.features, norm=self.norm)) - - def _forward(self, level, inp): - # Upper branch - up1 = inp - up1 = self._modules['b1_' + str(level)](up1) - - # Lower branch - low1 = F.avg_pool2d(inp, 2, stride=2) - low1 = self._modules['b2_' + str(level)](low1) - - if level > 1: - low2 = self._forward(level - 1, low1) - else: - low2 = low1 - low2 = self._modules['b2_plus_' + str(level)](low2) - - low3 = low2 - low3 = self._modules['b3_' + str(level)](low3) - - # NOTE: for newer PyTorch (1.3~), it seems that training results are degraded due to implementation diff in F.grid_sample - # if the pretrained model behaves weirdly, switch with the commented line. - # NOTE: I also found that "bicubic" works better. - up2 = F.interpolate(low3, scale_factor=2, mode='bicubic', align_corners=True) - # up2 = F.interpolate(low3, scale_factor=2, mode='nearest) - - return up1 + up2 - - def forward(self, x): - return self._forward(self.depth, x) - - -class HGFilter(nn.Module): - def __init__(self, opt): - super(HGFilter, self).__init__() - self.num_modules = opt.num_stack - - self.opt = opt - - # Base part - self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3) - - if self.opt.norm == 'batch': - self.bn1 = nn.BatchNorm2d(64) - elif self.opt.norm == 'group': - self.bn1 = nn.GroupNorm(32, 64) - - if self.opt.hg_down == 'conv64': - self.conv2 = ConvBlock(64, 64, self.opt.norm) - self.down_conv2 = nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1) - elif self.opt.hg_down == 'conv128': - self.conv2 = ConvBlock(64, 128, self.opt.norm) - self.down_conv2 = nn.Conv2d(128, 128, kernel_size=3, stride=2, padding=1) - elif self.opt.hg_down == 'ave_pool': - self.conv2 = ConvBlock(64, 128, self.opt.norm) - else: - raise NameError('Unknown Fan Filter setting!') - - self.conv3 = ConvBlock(128, 128, self.opt.norm) - self.conv4 = ConvBlock(128, 256, self.opt.norm) - - # Stacking part - for hg_module in range(self.num_modules): - self.add_module('m' + str(hg_module), HourGlass(1, opt.num_hourglass, 256, self.opt.norm)) - - self.add_module('top_m_' + str(hg_module), ConvBlock(256, 256, self.opt.norm)) - self.add_module('conv_last' + str(hg_module), - nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0)) - if self.opt.norm == 'batch': - self.add_module('bn_end' + str(hg_module), nn.BatchNorm2d(256)) - elif self.opt.norm == 'group': - self.add_module('bn_end' + str(hg_module), nn.GroupNorm(32, 256)) - - self.add_module('l' + str(hg_module), nn.Conv2d(256, - opt.hourglass_dim, kernel_size=1, stride=1, padding=0)) - - if hg_module < self.num_modules - 1: - self.add_module( - 'bl' + str(hg_module), nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0)) - self.add_module('al' + str(hg_module), nn.Conv2d(opt.hourglass_dim, - 256, kernel_size=1, stride=1, padding=0)) - - def forward(self, x): - x = F.relu(self.bn1(self.conv1(x)), True) - tmpx = x - if self.opt.hg_down == 'ave_pool': - x = F.avg_pool2d(self.conv2(x), 2, stride=2) - elif self.opt.hg_down in ['conv64', 'conv128']: - x = self.conv2(x) - x = self.down_conv2(x) - else: - raise NameError('Unknown Fan Filter setting!') - - normx = x - - x = self.conv3(x) - x = self.conv4(x) - - previous = x - - outputs = [] - for i in range(self.num_modules): - hg = self._modules['m' + str(i)](previous) - - ll = hg - ll = self._modules['top_m_' + str(i)](ll) - - ll = F.relu(self._modules['bn_end' + str(i)] - (self._modules['conv_last' + str(i)](ll)), True) - - # Predict heatmaps - tmp_out = self._modules['l' + str(i)](ll) - outputs.append(tmp_out) - - if i < self.num_modules - 1: - ll = self._modules['bl' + str(i)](ll) - tmp_out_ = self._modules['al' + str(i)](tmp_out) - previous = previous + ll + tmp_out_ - - return outputs, tmpx.detach(), normx diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Crack 2021 Moldflow Advisor 2019 Crack 2021.md b/spaces/raedeXanto/academic-chatgpt-beta/Crack 2021 Moldflow Advisor 2019 Crack 2021.md deleted file mode 100644 index 99e9e4426d8f2fb068fc945e23f395ceaedfca6a..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Crack 2021 Moldflow Advisor 2019 Crack 2021.md +++ /dev/null @@ -1,122 +0,0 @@ -
    -

    How to Crack Moldflow Advisor 2019: A Step-by-Step Guide

    -

    If you are looking for a way to crack Moldflow Advisor 2019, you have come to the right place. In this article, I will show you how to crack this powerful software for plastic injection molding simulation and optimization. But before we get into the details, let me explain what Moldflow Advisor 2019 is and why you might need it.

    -

    crack Moldflow Advisor 2019 crack


    DOWNLOAD >>> https://tinourl.com/2uL5eC



    -

    What is Moldflow Advisor 2019 and why do you need it?

    -

    Moldflow Advisor 2019 is a software product developed by Autodesk that helps you design better plastic parts, molds, and tooling. It allows you to simulate the injection molding process and analyze the material flow, temperature, pressure, cooling, warpage, shrinkage, and quality of your plastic parts. It also helps you optimize your mold design, material selection, gate location, runner size, cycle time, and other parameters to reduce costs, defects, and waste.

    -

    Moldflow Advisor 2019 features and benefits

    -

    Some of the features and benefits of Moldflow Advisor 2019 are:

    -
      -
    • It has a user-friendly interface that guides you through the simulation steps.
    • -
    • It supports over 9,200 ready-made plastic materials with accurate properties.
    • -
    • It provides real-time feedback on the manufacturability of your plastic parts.
    • -
    • It generates detailed reports and graphs that help you visualize and understand the results.
    • -
    • It integrates with other Autodesk products such as Inventor, Fusion 360, AutoCAD, Revit, etc.
    • -
    • It helps you reduce mold design errors, material waste, production time, and energy consumption.
    • -
    • It improves the quality, performance, aesthetics, and durability of your plastic parts.
    • -
    -

    Moldflow Advisor 2019 system requirements and installation

    -

    To run Moldflow Advisor 2019 smoothly on your computer, you need to meet the following system requirements:

    - - - -
    Operating SystemCPURAMHard Disk SpaceGraphics Card
    Windows 7 (SP1 or later), Windows 8 or 8.1 Standard/Pro/Enterprise; Windows Server 2008 (SP2 or later); Windows Server 20122GHz CPU speed or faster; Quad-core recommended8GB RAM or higher; 4GB or higher virtual memory / swap space12GB or higher free disk space; 2GB free disk space for installation512MB DRAM or higher; OpenGL-capable graphics card; 24bit color setting at 1,280 x 1,024 or higher screen resolution
    -

    To install Moldflow Advisor 2019 on your computer, you need to follow these steps:

    -

    How to crack Moldflow Advisor 2019 for free
    -Download cracked version of Moldflow Advisor 2019
    -Moldflow Advisor 2019 crack serial key
    -Moldflow Advisor 2019 crack activation code
    -Moldflow Advisor 2019 crack license key
    -Moldflow Advisor 2019 crack patch
    -Moldflow Advisor 2019 crack torrent
    -Moldflow Advisor 2019 crack full version
    -Moldflow Advisor 2019 crack download link
    -Moldflow Advisor 2019 crack installation guide
    -Moldflow Advisor 2019 crack system requirements
    -Moldflow Advisor 2019 crack features
    -Moldflow Advisor 2019 crack review
    -Moldflow Advisor 2019 crack comparison
    -Moldflow Advisor 2019 crack alternatives
    -Moldflow Advisor 2019 crack tips and tricks
    -Moldflow Advisor 2019 crack tutorial
    -Moldflow Advisor 2019 crack video
    -Moldflow Advisor 2019 crack blog post
    -Moldflow Advisor 2019 crack forum
    -Moldflow Advisor 2019 crack reddit
    -Moldflow Advisor 2019 crack quora
    -Moldflow Advisor 2019 crack facebook group
    -Moldflow Advisor 2019 crack twitter hashtag
    -Moldflow Advisor 2019 crack youtube channel
    -Moldflow Advisor 2019 crack online course
    -Moldflow Advisor 2019 crack ebook
    -Moldflow Advisor 2019 crack pdf
    -Moldflow Advisor 2019 crack cheat sheet
    -Moldflow Advisor 2019 crack infographic
    -Moldflow Advisor 2019 crack case study
    -Moldflow Advisor 2019 crack testimonial
    -Moldflow Advisor 2019 crack success story
    -Moldflow Advisor 2019 crack benefit
    -Moldflow Advisor 2019 crack advantage
    -Moldflow Advisor 2019 crack drawback
    -Moldflow Advisor 2019 crack problem
    -Moldflow Advisor 2019 crack solution
    -Moldflow Advisor 2019 crack discount code
    -Moldflow Advisor 2019 crack coupon code
    -Moldflow Advisor 2019 crack offer code
    -Moldflow Advisor 2019 crack promo code
    -Moldflow Advisor 2019 crack deal code
    -Moldflow Advisor 2019 crack free trial code
    -Moldflow Advisor 2019 crack refund policy
    -Moldflow Advisor 2019 crack guarantee policy
    -Moldflow Advisor 2019 crack warranty policy
    -Moldflow Advisor 2019 crack support policy
    -Moldflow Advisor 2019 crack privacy policy

    -
      -
    1. Download the installation file from the official Autodesk website or a trusted source.
    2. -
    3. Run the installation wizard and follow the instructions on the screen.
    4. -
    5. Select the product language and configuration options.
    6. -
    7. Enter your serial number and product key if you have purchased a license.
    8. -
    9. Select the installation path and agree to the terms and conditions.
    10. -
    11. Wait for the installation to complete and click Finish.
    12. -
    -

    How to crack Moldflow Advisor 2019

    -

    If you do not have a valid license for Moldflow Advisor 2019, you can try to crack it using a crack file that modifies the original software files to bypass the activation process. However, this is an illegal and risky method that may expose you to legal consequences, malware infections, technical issues, and other problems. Therefore, I do not recommend or endorse this method and I advise you to use it at your own risk. If you still want to proceed with cracking Moldflow Advisor 2019, here are the steps you need to follow:

    -

    Download the crack file from a reliable source

    -

    The first step is to find a reliable source that provides a working crack file for Moldflow Advisor 2019. You can search online for websites that offer such files but be careful not to download any fake or malicious files that may harm your computer. You can also check online reviews and comments from other users who have tried the crack file before downloading it. Make sure that the crack file is compatible with your version of Moldflow Advisor 2019.

    -

    Disable your antivirus and internet connection

    -

    The next step is to disable your antivirus software and internet connection before running the crack file. This is because most antivirus programs will detect the crack file as a threat and block it from running. Also, some crack files may require you to disconnect from the internet to avoid detection by Autodesk servers. To disable your antivirus software, go to its settings and turn off its protection features temporarily. To disable your internet connection, go to your network settings and turn off your Wi-Fi or Ethernet connection.

    -

    Extract the crack file and copy it to the installation folder

    -

    Run the crack file as administrator and follow the instructions

    -

    The fourth step is to run the crack file as administrator and follow the instructions on the screen. To run the crack file as administrator, right-click on it and select Run as administrator. You may see a warning message from Windows asking you to confirm your action. Click Yes to proceed. The crack file will launch a program that will modify the original software files of Moldflow Advisor 2019 to bypass the activation process. You may need to enter some information such as your name, email, or serial number. Follow the instructions on the screen and wait for the cracking process to complete.

    -

    Restart your computer and enjoy the full version of Moldflow Advisor 2019

    -

    The final step is to restart your computer and enjoy the full version of Moldflow Advisor 2019. To restart your computer, go to the Start menu and select Restart. After your computer restarts, you can launch Moldflow Advisor 2019 from your desktop or start menu. You should see that the software is activated and you can use all its features without any limitations.

    -

    Risks and precautions of cracking Moldflow Advisor 2019

    -

    While cracking Moldflow Advisor 2019 may seem like an easy and convenient way to get access to this powerful software for free, it also comes with many risks and drawbacks that you should be aware of. Here are some of the risks and precautions of cracking Moldflow Advisor 2019:

    -

    Legal and ethical issues of software piracy

    -

    One of the most obvious risks of cracking Moldflow Advisor 2019 is that it is illegal and unethical. Software piracy is a form of theft that violates the intellectual property rights of Autodesk and other software developers. By cracking Moldflow Advisor 2019, you are not only breaking the law but also disrespecting the hard work and creativity of the people who created this software. You may face legal consequences such as fines, lawsuits, or even jail time if you are caught using cracked software. You may also damage your reputation and credibility as a professional or a student if you are found using cracked software for your projects or assignments.

    -

    Potential malware and virus infections from crack files

    -

    Another risk of cracking Moldflow Advisor 2019 is that you may expose your computer to malware and virus infections from crack files. Crack files are often created by hackers or cybercriminals who may embed malicious code or programs into them. These malware and viruses can harm your computer by stealing your personal information, deleting your files, corrupting your system, or even taking control of your device. They can also spread to other devices on your network or online accounts. You may not even notice that your computer is infected until it is too late. Therefore, you should always scan any crack file with a reliable antivirus program before running it.

    -

    Loss of technical support and updates from Autodesk

    -

    A third risk of cracking Moldflow Advisor 2019 is that you will lose access to technical support and updates from Autodesk. Autodesk provides technical support and updates for its licensed users who have purchased a valid subscription or maintenance plan. These support and updates help you troubleshoot any issues, fix any bugs, improve performance, and add new features to your software. However, if you use cracked software, you will not be able to receive these support and updates from Autodesk. You will also not be able to access online resources such as tutorials, forums, blogs, etc. that are available for licensed users.

    -

    Tips to avoid detection and protect your computer

    -

    If you still decide to crack Moldflow Advisor 2019 despite the risks and drawbacks, here are some tips to avoid detection and protect your computer:

    -
      -
    • Do not update or register your software online as this may alert Autodesk about your illegal activity.
    • -
    • Do not use cracked software on public or shared computers or networks as this may expose you to other users or authorities.
    • -
    • Do not share or distribute cracked software with others as this may increase your chances of getting caught.
    • -
    • Do backup your important files regularly in case your computer gets infected or corrupted by malware or viruses.
    • -
    • Do use a firewall and antivirus program to block any unwanted connections or threats from crack files.
    • -
    -

    Conclusion

    -

    In conclusion, cracking Moldflow Advisor 2019 is a risky and illegal method that may give you access to this powerful software for free but also expose you to many problems and consequences. You may face legal actions, malware infections, technical issues, and ethical dilemmas by using cracked software. Therefore, I strongly advise you to purchase a legitimate license for Moldflow Advisor 2019 from Autodesk or a trusted reseller if you want to use this software for your plastic injection molding simulation and optimization needs.

    -

    FAQs

    -
      -
    1. What is the difference between Moldflow Advisor 2019 and Moldflow Insight 2019?
      Moldflow Advisor 2019 is a basic version of Moldflow Insight 2019 that provides easy-to-use tools for plastic part design validation and optimization. Moldflow Insight 2019 is an advanced version that provides more comprehensive tools for plastic part analysis and optimization.
    2. -
    3. How much does Moldflow Advisor 2019 cost?
      The price of Moldflow Advisor 2019 depends on the type and duration of the license you choose. You can purchase a single-user license for one year for $4,000 USD or for three years for $10,800 USD. You can also purchase a multi-user license for one year for $5,600 USD or for three years for $15,120 USD.
    4. -
    5. Where can I find more information about Moldflow Advisor 2019?
      You can find more information about Moldflow Advisor 2019 on the official Autodesk website: https://www.autodesk.com/products/moldflow/overview You can also find tutorials, videos, blogs, forums, etc. on the Autodesk Knowledge Network: https://knowledge.autodesk.com/support/moldflow-adviser
    6. -
    7. How can I learn how to use Moldflow Advisor 2019?
      You can learn how to use Moldflow Advisor 2019 by taking online courses from Autodesk or other providers such as Udemy or Coursera. You can also learn from books such as "Moldflow Design Guide: A Resource for Plastics Engineers" by Jay Shoemaker or "Plastic Injection Molding: Mold Design and Construction Fundamentals" by Douglas M. Bryce.
    8. -
    9. What are some alternatives to Moldflow Advisor 2019?
      Some alternatives to Moldflow Advisor 2019 are SolidWorks Plastics (https://www.solidworks.com/product/solidworks-plastics), Simpoe-Mold (https://www.simpoe.com/), C-MOLD (https://www.coretech-system.com/en-us/products/c-mold), etc.
    10. -
    -

    0a6ba089eb
    -
    -
    \ No newline at end of file diff --git a/spaces/ramiin2/AutoGPT/.github/PULL_REQUEST_TEMPLATE.md b/spaces/ramiin2/AutoGPT/.github/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index a4f28a3d27d66d79cb95f2b8b847832172bb5f11..0000000000000000000000000000000000000000 --- a/spaces/ramiin2/AutoGPT/.github/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,40 +0,0 @@ - - - - -### Background - - -### Changes - - -### Documentation - - -### Test Plan - - -### PR Quality Checklist -- [ ] My pull request is atomic and focuses on a single change. -- [ ] I have thoroughly tested my changes with multiple different prompts. -- [ ] I have considered potential risks and mitigations for my changes. -- [ ] I have documented my changes clearly and comprehensively. -- [ ] I have not snuck in any "extra" small tweaks changes - - - - diff --git a/spaces/rayan-saleh/whisper2notion/server/node_modules/@types/node/async_hooks.d.ts b/spaces/rayan-saleh/whisper2notion/server/node_modules/@types/node/async_hooks.d.ts deleted file mode 100644 index 0bf4739650c5610432cdeb7d9d0ca0afac41575f..0000000000000000000000000000000000000000 --- a/spaces/rayan-saleh/whisper2notion/server/node_modules/@types/node/async_hooks.d.ts +++ /dev/null @@ -1,501 +0,0 @@ -/** - * The `async_hooks` module provides an API to track asynchronous resources. It - * can be accessed using: - * - * ```js - * import async_hooks from 'async_hooks'; - * ``` - * @experimental - * @see [source](https://github.com/nodejs/node/blob/v18.0.0/lib/async_hooks.js) - */ -declare module 'async_hooks' { - /** - * ```js - * import { executionAsyncId } from 'async_hooks'; - * - * console.log(executionAsyncId()); // 1 - bootstrap - * fs.open(path, 'r', (err, fd) => { - * console.log(executionAsyncId()); // 6 - open() - * }); - * ``` - * - * The ID returned from `executionAsyncId()` is related to execution timing, not - * causality (which is covered by `triggerAsyncId()`): - * - * ```js - * const server = net.createServer((conn) => { - * // Returns the ID of the server, not of the new connection, because the - * // callback runs in the execution scope of the server's MakeCallback(). - * async_hooks.executionAsyncId(); - * - * }).listen(port, () => { - * // Returns the ID of a TickObject (process.nextTick()) because all - * // callbacks passed to .listen() are wrapped in a nextTick(). - * async_hooks.executionAsyncId(); - * }); - * ``` - * - * Promise contexts may not get precise `executionAsyncIds` by default. - * See the section on `promise execution tracking`. - * @since v8.1.0 - * @return The `asyncId` of the current execution context. Useful to track when something calls. - */ - function executionAsyncId(): number; - /** - * Resource objects returned by `executionAsyncResource()` are most often internal - * Node.js handle objects with undocumented APIs. Using any functions or properties - * on the object is likely to crash your application and should be avoided. - * - * Using `executionAsyncResource()` in the top-level execution context will - * return an empty object as there is no handle or request object to use, - * but having an object representing the top-level can be helpful. - * - * ```js - * import { open } from 'fs'; - * import { executionAsyncId, executionAsyncResource } from 'async_hooks'; - * - * console.log(executionAsyncId(), executionAsyncResource()); // 1 {} - * open(new URL(import.meta.url), 'r', (err, fd) => { - * console.log(executionAsyncId(), executionAsyncResource()); // 7 FSReqWrap - * }); - * ``` - * - * This can be used to implement continuation local storage without the - * use of a tracking `Map` to store the metadata: - * - * ```js - * import { createServer } from 'http'; - * import { - * executionAsyncId, - * executionAsyncResource, - * createHook - * } from 'async_hooks'; - * const sym = Symbol('state'); // Private symbol to avoid pollution - * - * createHook({ - * init(asyncId, type, triggerAsyncId, resource) { - * const cr = executionAsyncResource(); - * if (cr) { - * resource[sym] = cr[sym]; - * } - * } - * }).enable(); - * - * const server = createServer((req, res) => { - * executionAsyncResource()[sym] = { state: req.url }; - * setTimeout(function() { - * res.end(JSON.stringify(executionAsyncResource()[sym])); - * }, 100); - * }).listen(3000); - * ``` - * @since v13.9.0, v12.17.0 - * @return The resource representing the current execution. Useful to store data within the resource. - */ - function executionAsyncResource(): object; - /** - * ```js - * const server = net.createServer((conn) => { - * // The resource that caused (or triggered) this callback to be called - * // was that of the new connection. Thus the return value of triggerAsyncId() - * // is the asyncId of "conn". - * async_hooks.triggerAsyncId(); - * - * }).listen(port, () => { - * // Even though all callbacks passed to .listen() are wrapped in a nextTick() - * // the callback itself exists because the call to the server's .listen() - * // was made. So the return value would be the ID of the server. - * async_hooks.triggerAsyncId(); - * }); - * ``` - * - * Promise contexts may not get valid `triggerAsyncId`s by default. See - * the section on `promise execution tracking`. - * @return The ID of the resource responsible for calling the callback that is currently being executed. - */ - function triggerAsyncId(): number; - interface HookCallbacks { - /** - * Called when a class is constructed that has the possibility to emit an asynchronous event. - * @param asyncId a unique ID for the async resource - * @param type the type of the async resource - * @param triggerAsyncId the unique ID of the async resource in whose execution context this async resource was created - * @param resource reference to the resource representing the async operation, needs to be released during destroy - */ - init?(asyncId: number, type: string, triggerAsyncId: number, resource: object): void; - /** - * When an asynchronous operation is initiated or completes a callback is called to notify the user. - * The before callback is called just before said callback is executed. - * @param asyncId the unique identifier assigned to the resource about to execute the callback. - */ - before?(asyncId: number): void; - /** - * Called immediately after the callback specified in before is completed. - * @param asyncId the unique identifier assigned to the resource which has executed the callback. - */ - after?(asyncId: number): void; - /** - * Called when a promise has resolve() called. This may not be in the same execution id - * as the promise itself. - * @param asyncId the unique id for the promise that was resolve()d. - */ - promiseResolve?(asyncId: number): void; - /** - * Called after the resource corresponding to asyncId is destroyed - * @param asyncId a unique ID for the async resource - */ - destroy?(asyncId: number): void; - } - interface AsyncHook { - /** - * Enable the callbacks for a given AsyncHook instance. If no callbacks are provided enabling is a noop. - */ - enable(): this; - /** - * Disable the callbacks for a given AsyncHook instance from the global pool of AsyncHook callbacks to be executed. Once a hook has been disabled it will not be called again until enabled. - */ - disable(): this; - } - /** - * Registers functions to be called for different lifetime events of each async - * operation. - * - * The callbacks `init()`/`before()`/`after()`/`destroy()` are called for the - * respective asynchronous event during a resource's lifetime. - * - * All callbacks are optional. For example, if only resource cleanup needs to - * be tracked, then only the `destroy` callback needs to be passed. The - * specifics of all functions that can be passed to `callbacks` is in the `Hook Callbacks` section. - * - * ```js - * import { createHook } from 'async_hooks'; - * - * const asyncHook = createHook({ - * init(asyncId, type, triggerAsyncId, resource) { }, - * destroy(asyncId) { } - * }); - * ``` - * - * The callbacks will be inherited via the prototype chain: - * - * ```js - * class MyAsyncCallbacks { - * init(asyncId, type, triggerAsyncId, resource) { } - * destroy(asyncId) {} - * } - * - * class MyAddedCallbacks extends MyAsyncCallbacks { - * before(asyncId) { } - * after(asyncId) { } - * } - * - * const asyncHook = async_hooks.createHook(new MyAddedCallbacks()); - * ``` - * - * Because promises are asynchronous resources whose lifecycle is tracked - * via the async hooks mechanism, the `init()`, `before()`, `after()`, and`destroy()` callbacks _must not_ be async functions that return promises. - * @since v8.1.0 - * @param callbacks The `Hook Callbacks` to register - * @return Instance used for disabling and enabling hooks - */ - function createHook(callbacks: HookCallbacks): AsyncHook; - interface AsyncResourceOptions { - /** - * The ID of the execution context that created this async event. - * @default executionAsyncId() - */ - triggerAsyncId?: number | undefined; - /** - * Disables automatic `emitDestroy` when the object is garbage collected. - * This usually does not need to be set (even if `emitDestroy` is called - * manually), unless the resource's `asyncId` is retrieved and the - * sensitive API's `emitDestroy` is called with it. - * @default false - */ - requireManualDestroy?: boolean | undefined; - } - /** - * The class `AsyncResource` is designed to be extended by the embedder's async - * resources. Using this, users can easily trigger the lifetime events of their - * own resources. - * - * The `init` hook will trigger when an `AsyncResource` is instantiated. - * - * The following is an overview of the `AsyncResource` API. - * - * ```js - * import { AsyncResource, executionAsyncId } from 'async_hooks'; - * - * // AsyncResource() is meant to be extended. Instantiating a - * // new AsyncResource() also triggers init. If triggerAsyncId is omitted then - * // async_hook.executionAsyncId() is used. - * const asyncResource = new AsyncResource( - * type, { triggerAsyncId: executionAsyncId(), requireManualDestroy: false } - * ); - * - * // Run a function in the execution context of the resource. This will - * // * establish the context of the resource - * // * trigger the AsyncHooks before callbacks - * // * call the provided function `fn` with the supplied arguments - * // * trigger the AsyncHooks after callbacks - * // * restore the original execution context - * asyncResource.runInAsyncScope(fn, thisArg, ...args); - * - * // Call AsyncHooks destroy callbacks. - * asyncResource.emitDestroy(); - * - * // Return the unique ID assigned to the AsyncResource instance. - * asyncResource.asyncId(); - * - * // Return the trigger ID for the AsyncResource instance. - * asyncResource.triggerAsyncId(); - * ``` - */ - class AsyncResource { - /** - * AsyncResource() is meant to be extended. Instantiating a - * new AsyncResource() also triggers init. If triggerAsyncId is omitted then - * async_hook.executionAsyncId() is used. - * @param type The type of async event. - * @param triggerAsyncId The ID of the execution context that created - * this async event (default: `executionAsyncId()`), or an - * AsyncResourceOptions object (since v9.3.0) - */ - constructor(type: string, triggerAsyncId?: number | AsyncResourceOptions); - /** - * Binds the given function to the current execution context. - * - * The returned function will have an `asyncResource` property referencing - * the `AsyncResource` to which the function is bound. - * @since v14.8.0, v12.19.0 - * @param fn The function to bind to the current execution context. - * @param type An optional name to associate with the underlying `AsyncResource`. - */ - static bind any, ThisArg>( - fn: Func, - type?: string, - thisArg?: ThisArg - ): Func & { - asyncResource: AsyncResource; - }; - /** - * Binds the given function to execute to this `AsyncResource`'s scope. - * - * The returned function will have an `asyncResource` property referencing - * the `AsyncResource` to which the function is bound. - * @since v14.8.0, v12.19.0 - * @param fn The function to bind to the current `AsyncResource`. - */ - bind any>( - fn: Func - ): Func & { - asyncResource: AsyncResource; - }; - /** - * Call the provided function with the provided arguments in the execution context - * of the async resource. This will establish the context, trigger the AsyncHooks - * before callbacks, call the function, trigger the AsyncHooks after callbacks, and - * then restore the original execution context. - * @since v9.6.0 - * @param fn The function to call in the execution context of this async resource. - * @param thisArg The receiver to be used for the function call. - * @param args Optional arguments to pass to the function. - */ - runInAsyncScope(fn: (this: This, ...args: any[]) => Result, thisArg?: This, ...args: any[]): Result; - /** - * Call all `destroy` hooks. This should only ever be called once. An error will - * be thrown if it is called more than once. This **must** be manually called. If - * the resource is left to be collected by the GC then the `destroy` hooks will - * never be called. - * @return A reference to `asyncResource`. - */ - emitDestroy(): this; - /** - * @return The unique `asyncId` assigned to the resource. - */ - asyncId(): number; - /** - * - * @return The same `triggerAsyncId` that is passed to the `AsyncResource` constructor. - */ - triggerAsyncId(): number; - } - /** - * This class creates stores that stay coherent through asynchronous operations. - * - * While you can create your own implementation on top of the `async_hooks` module,`AsyncLocalStorage` should be preferred as it is a performant and memory safe - * implementation that involves significant optimizations that are non-obvious to - * implement. - * - * The following example uses `AsyncLocalStorage` to build a simple logger - * that assigns IDs to incoming HTTP requests and includes them in messages - * logged within each request. - * - * ```js - * import http from 'http'; - * import { AsyncLocalStorage } from 'async_hooks'; - * - * const asyncLocalStorage = new AsyncLocalStorage(); - * - * function logWithId(msg) { - * const id = asyncLocalStorage.getStore(); - * console.log(`${id !== undefined ? id : '-'}:`, msg); - * } - * - * let idSeq = 0; - * http.createServer((req, res) => { - * asyncLocalStorage.run(idSeq++, () => { - * logWithId('start'); - * // Imagine any chain of async operations here - * setImmediate(() => { - * logWithId('finish'); - * res.end(); - * }); - * }); - * }).listen(8080); - * - * http.get('http://localhost:8080'); - * http.get('http://localhost:8080'); - * // Prints: - * // 0: start - * // 1: start - * // 0: finish - * // 1: finish - * ``` - * - * Each instance of `AsyncLocalStorage` maintains an independent storage context. - * Multiple instances can safely exist simultaneously without risk of interfering - * with each other's data. - * @since v13.10.0, v12.17.0 - */ - class AsyncLocalStorage { - /** - * Disables the instance of `AsyncLocalStorage`. All subsequent calls - * to `asyncLocalStorage.getStore()` will return `undefined` until`asyncLocalStorage.run()` or `asyncLocalStorage.enterWith()` is called again. - * - * When calling `asyncLocalStorage.disable()`, all current contexts linked to the - * instance will be exited. - * - * Calling `asyncLocalStorage.disable()` is required before the`asyncLocalStorage` can be garbage collected. This does not apply to stores - * provided by the `asyncLocalStorage`, as those objects are garbage collected - * along with the corresponding async resources. - * - * Use this method when the `asyncLocalStorage` is not in use anymore - * in the current process. - * @since v13.10.0, v12.17.0 - * @experimental - */ - disable(): void; - /** - * Returns the current store. - * If called outside of an asynchronous context initialized by - * calling `asyncLocalStorage.run()` or `asyncLocalStorage.enterWith()`, it - * returns `undefined`. - * @since v13.10.0, v12.17.0 - */ - getStore(): T | undefined; - /** - * Runs a function synchronously within a context and returns its - * return value. The store is not accessible outside of the callback function. - * The store is accessible to any asynchronous operations created within the - * callback. - * - * The optional `args` are passed to the callback function. - * - * If the callback function throws an error, the error is thrown by `run()` too. - * The stacktrace is not impacted by this call and the context is exited. - * - * Example: - * - * ```js - * const store = { id: 2 }; - * try { - * asyncLocalStorage.run(store, () => { - * asyncLocalStorage.getStore(); // Returns the store object - * setTimeout(() => { - * asyncLocalStorage.getStore(); // Returns the store object - * }, 200); - * throw new Error(); - * }); - * } catch (e) { - * asyncLocalStorage.getStore(); // Returns undefined - * // The error will be caught here - * } - * ``` - * @since v13.10.0, v12.17.0 - */ - run(store: T, callback: (...args: TArgs) => R, ...args: TArgs): R; - /** - * Runs a function synchronously outside of a context and returns its - * return value. The store is not accessible within the callback function or - * the asynchronous operations created within the callback. Any `getStore()`call done within the callback function will always return `undefined`. - * - * The optional `args` are passed to the callback function. - * - * If the callback function throws an error, the error is thrown by `exit()` too. - * The stacktrace is not impacted by this call and the context is re-entered. - * - * Example: - * - * ```js - * // Within a call to run - * try { - * asyncLocalStorage.getStore(); // Returns the store object or value - * asyncLocalStorage.exit(() => { - * asyncLocalStorage.getStore(); // Returns undefined - * throw new Error(); - * }); - * } catch (e) { - * asyncLocalStorage.getStore(); // Returns the same object or value - * // The error will be caught here - * } - * ``` - * @since v13.10.0, v12.17.0 - * @experimental - */ - exit(callback: (...args: TArgs) => R, ...args: TArgs): R; - /** - * Transitions into the context for the remainder of the current - * synchronous execution and then persists the store through any following - * asynchronous calls. - * - * Example: - * - * ```js - * const store = { id: 1 }; - * // Replaces previous store with the given store object - * asyncLocalStorage.enterWith(store); - * asyncLocalStorage.getStore(); // Returns the store object - * someAsyncOperation(() => { - * asyncLocalStorage.getStore(); // Returns the same object - * }); - * ``` - * - * This transition will continue for the _entire_ synchronous execution. - * This means that if, for example, the context is entered within an event - * handler subsequent event handlers will also run within that context unless - * specifically bound to another context with an `AsyncResource`. That is why`run()` should be preferred over `enterWith()` unless there are strong reasons - * to use the latter method. - * - * ```js - * const store = { id: 1 }; - * - * emitter.on('my-event', () => { - * asyncLocalStorage.enterWith(store); - * }); - * emitter.on('my-event', () => { - * asyncLocalStorage.getStore(); // Returns the same object - * }); - * - * asyncLocalStorage.getStore(); // Returns undefined - * emitter.emit('my-event'); - * asyncLocalStorage.getStore(); // Returns the same object - * ``` - * @since v13.11.0, v12.17.0 - * @experimental - */ - enterWith(store: T): void; - } -} -declare module 'node:async_hooks' { - export * from 'async_hooks'; -} diff --git a/spaces/rayan-saleh/whisper2notion/server/node_modules/accepts/HISTORY.md b/spaces/rayan-saleh/whisper2notion/server/node_modules/accepts/HISTORY.md deleted file mode 100644 index cb5990c7c3620f4936a3ac42b3bf335c95eef7e8..0000000000000000000000000000000000000000 --- a/spaces/rayan-saleh/whisper2notion/server/node_modules/accepts/HISTORY.md +++ /dev/null @@ -1,243 +0,0 @@ -1.3.8 / 2022-02-02 -================== - - * deps: mime-types@~2.1.34 - - deps: mime-db@~1.51.0 - * deps: negotiator@0.6.3 - -1.3.7 / 2019-04-29 -================== - - * deps: negotiator@0.6.2 - - Fix sorting charset, encoding, and language with extra parameters - -1.3.6 / 2019-04-28 -================== - - * deps: mime-types@~2.1.24 - - deps: mime-db@~1.40.0 - -1.3.5 / 2018-02-28 -================== - - * deps: mime-types@~2.1.18 - - deps: mime-db@~1.33.0 - -1.3.4 / 2017-08-22 -================== - - * deps: mime-types@~2.1.16 - - deps: mime-db@~1.29.0 - -1.3.3 / 2016-05-02 -================== - - * deps: mime-types@~2.1.11 - - deps: mime-db@~1.23.0 - * deps: negotiator@0.6.1 - - perf: improve `Accept` parsing speed - - perf: improve `Accept-Charset` parsing speed - - perf: improve `Accept-Encoding` parsing speed - - perf: improve `Accept-Language` parsing speed - -1.3.2 / 2016-03-08 -================== - - * deps: mime-types@~2.1.10 - - Fix extension of `application/dash+xml` - - Update primary extension for `audio/mp4` - - deps: mime-db@~1.22.0 - -1.3.1 / 2016-01-19 -================== - - * deps: mime-types@~2.1.9 - - deps: mime-db@~1.21.0 - -1.3.0 / 2015-09-29 -================== - - * deps: mime-types@~2.1.7 - - deps: mime-db@~1.19.0 - * deps: negotiator@0.6.0 - - Fix including type extensions in parameters in `Accept` parsing - - Fix parsing `Accept` parameters with quoted equals - - Fix parsing `Accept` parameters with quoted semicolons - - Lazy-load modules from main entry point - - perf: delay type concatenation until needed - - perf: enable strict mode - - perf: hoist regular expressions - - perf: remove closures getting spec properties - - perf: remove a closure from media type parsing - - perf: remove property delete from media type parsing - -1.2.13 / 2015-09-06 -=================== - - * deps: mime-types@~2.1.6 - - deps: mime-db@~1.18.0 - -1.2.12 / 2015-07-30 -=================== - - * deps: mime-types@~2.1.4 - - deps: mime-db@~1.16.0 - -1.2.11 / 2015-07-16 -=================== - - * deps: mime-types@~2.1.3 - - deps: mime-db@~1.15.0 - -1.2.10 / 2015-07-01 -=================== - - * deps: mime-types@~2.1.2 - - deps: mime-db@~1.14.0 - -1.2.9 / 2015-06-08 -================== - - * deps: mime-types@~2.1.1 - - perf: fix deopt during mapping - -1.2.8 / 2015-06-07 -================== - - * deps: mime-types@~2.1.0 - - deps: mime-db@~1.13.0 - * perf: avoid argument reassignment & argument slice - * perf: avoid negotiator recursive construction - * perf: enable strict mode - * perf: remove unnecessary bitwise operator - -1.2.7 / 2015-05-10 -================== - - * deps: negotiator@0.5.3 - - Fix media type parameter matching to be case-insensitive - -1.2.6 / 2015-05-07 -================== - - * deps: mime-types@~2.0.11 - - deps: mime-db@~1.9.1 - * deps: negotiator@0.5.2 - - Fix comparing media types with quoted values - - Fix splitting media types with quoted commas - -1.2.5 / 2015-03-13 -================== - - * deps: mime-types@~2.0.10 - - deps: mime-db@~1.8.0 - -1.2.4 / 2015-02-14 -================== - - * Support Node.js 0.6 - * deps: mime-types@~2.0.9 - - deps: mime-db@~1.7.0 - * deps: negotiator@0.5.1 - - Fix preference sorting to be stable for long acceptable lists - -1.2.3 / 2015-01-31 -================== - - * deps: mime-types@~2.0.8 - - deps: mime-db@~1.6.0 - -1.2.2 / 2014-12-30 -================== - - * deps: mime-types@~2.0.7 - - deps: mime-db@~1.5.0 - -1.2.1 / 2014-12-30 -================== - - * deps: mime-types@~2.0.5 - - deps: mime-db@~1.3.1 - -1.2.0 / 2014-12-19 -================== - - * deps: negotiator@0.5.0 - - Fix list return order when large accepted list - - Fix missing identity encoding when q=0 exists - - Remove dynamic building of Negotiator class - -1.1.4 / 2014-12-10 -================== - - * deps: mime-types@~2.0.4 - - deps: mime-db@~1.3.0 - -1.1.3 / 2014-11-09 -================== - - * deps: mime-types@~2.0.3 - - deps: mime-db@~1.2.0 - -1.1.2 / 2014-10-14 -================== - - * deps: negotiator@0.4.9 - - Fix error when media type has invalid parameter - -1.1.1 / 2014-09-28 -================== - - * deps: mime-types@~2.0.2 - - deps: mime-db@~1.1.0 - * deps: negotiator@0.4.8 - - Fix all negotiations to be case-insensitive - - Stable sort preferences of same quality according to client order - -1.1.0 / 2014-09-02 -================== - - * update `mime-types` - -1.0.7 / 2014-07-04 -================== - - * Fix wrong type returned from `type` when match after unknown extension - -1.0.6 / 2014-06-24 -================== - - * deps: negotiator@0.4.7 - -1.0.5 / 2014-06-20 -================== - - * fix crash when unknown extension given - -1.0.4 / 2014-06-19 -================== - - * use `mime-types` - -1.0.3 / 2014-06-11 -================== - - * deps: negotiator@0.4.6 - - Order by specificity when quality is the same - -1.0.2 / 2014-05-29 -================== - - * Fix interpretation when header not in request - * deps: pin negotiator@0.4.5 - -1.0.1 / 2014-01-18 -================== - - * Identity encoding isn't always acceptable - * deps: negotiator@~0.4.0 - -1.0.0 / 2013-12-27 -================== - - * Genesis diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Acoustica Cd Dvd Label Maker 3.4.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Acoustica Cd Dvd Label Maker 3.4.md deleted file mode 100644 index d2d2174ffd59b5cf0596803ff7499d16053ea9ca..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Acoustica Cd Dvd Label Maker 3.4.md +++ /dev/null @@ -1,16 +0,0 @@ - -

    How to Create Professional-Looking CD/DVD Labels with Acoustica CD/DVD Label Maker 3.4

    -

    If you want to create your own CD/DVD labels, jewel cases, and covers with ease and flexibility, you should try Acoustica CD/DVD Label Maker 3.4. This software allows you to import your track information from various sources, choose from hundreds of provided designs and backgrounds, print on different types of paper and disc surfaces, and add effects like shadows and fade edges. In this article, we will show you how to use Acoustica CD/DVD Label Maker 3.4 to create a professional-looking label for your disc.

    -

    Step 1: Download and install Acoustica CD/DVD Label Maker 3.4

    -

    You can download Acoustica CD/DVD Label Maker 3.4 from https://acoustica.com/products/cd-dvd-label-maker. The software is compatible with Windows XP, Vista, 7, 8, and 10. The download file size is about 10 MB and the installation process is simple and fast. You can try the software for free for 15 days or buy it for $25 (USD).

    -

    Acoustica Cd Dvd Label Maker 3.4


    Download ··· https://urlgoal.com/2uCMLL



    -

    Step 2: Design your CD/DVD label

    -

    After launching the software, you will see the main window where you can design your label. You can start from scratch or use one of the provided templates. To access the templates, click on the "File" menu and select "New Design". You will see a list of categories such as "Music", "Holidays", "Weddings", etc. Choose a category and then a subcategory to see the available designs. You can also download more art and themes from https://acoustica.com/products/cd-dvd-label-maker/artwork.

    -

    To customize your label, you can use the tools on the left side of the window. You can add text, images, shapes, lines, and barcodes. You can also change the font, color, size, alignment, and style of your text. You can resize, rotate, crop, flip, and move your images and shapes. You can also apply effects like shadows, transparency, borders, and gradients.

    -

    If you want to add your track information to your label, you can click on the "Tracks" button on the toolbar. You can import your tracks from various sources such as iTunes, WinAmp, Nero, Roxio, Acoustica MP3 CD Burner, or any other playlist or previously burnt CD. You can also read a CD directly by querying an online database of CD. To do that, click on the "Read CD" button and select your CD drive. The software will automatically populate your track list on your label.

    -

    Step 3: Print your label

    -

    When you are satisfied with your design, you can print your label by clicking on the "Print" button on the toolbar. You can choose to print the disc label only or include the front/inside and/or back covers for your jewel case. You can also adjust the print settings such as paper type, printer model, number of copies, etc.

    -

    You can print on over 300 different supported paper types from various brands such as Avery, Neato, Memorex, etc. See the full list of supported paper types at https://acoustica.com/products/cd-dvd-label-maker/support/supported-paper-types.

    -

    You can also print directly on CD/DVD surfaces using more than 200 supported printer models from Epson, HP, Canon, etc. See the full list of supported printers at https://acoustica.com/products/cd-dvd-label-maker/support/supported-printers. If you have a LightScribe drive and disc, you can also use it to etch labels directly onto your disc surface using your

    d5da3c52bf
    -
    -
    \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Adobe Acrobat XI Pro 10.0.22 FINAL Crack 2021 Download.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Adobe Acrobat XI Pro 10.0.22 FINAL Crack 2021 Download.md deleted file mode 100644 index fa03cc06fa378ed167cd5342d741142330dca561..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Adobe Acrobat XI Pro 10.0.22 FINAL Crack 2021 Download.md +++ /dev/null @@ -1,94 +0,0 @@ -
    -

    Adobe Acrobat XI Pro 10.0.22 FINAL Crack Download: A Review

    -

    If you are looking for a powerful and versatile PDF converter, you might want to consider Adobe Acrobat XI Pro 10.0.22 FINAL Crack download. This software is more than just a PDF converter, it is also packed with smart tools that give you more power to communicate, edit, create and share PDF files. In this article, we will review some of the features and benefits of Adobe Acrobat XI Pro 10.0.22 FINAL Crack download and how to install it on your Windows PC.

    -

    Adobe Acrobat XI Pro 10.0.22 FINAL Crack download


    Download ✫✫✫ https://urlgoal.com/2uCJib



    -

    Features and Benefits of Adobe Acrobat XI Pro 10.0.22 FINAL Crack Download

    -

    Adobe Acrobat XI Pro 10.0.22 FINAL Crack download has many features and benefits that make it stand out from other PDF converters. Some of them are:

    -
      -
    • Edit text and images: You can make minor changes in PDFs as easily as you do in other applications using a new point-and-click interface. You can also edit scanned documents with OCR technology.
    • -
    • Convert PDF files to PowerPoint: You can get a head start on new projects by saving a PDF file as a fully editable PowerPoint presentation.
    • -
    • Create new PDF and web forms: You can customize professional templates or design from scratch with the Adobe FormsCentral desktop app included in Acrobat XI Pro.
    • -
    • Standardize routine PDF tasks: You can make it easy to create PDFs consistently by guiding people through the correct series of steps with Actions.
    • -
    • Protect PDF files: You can apply passwords and permissions to your PDF files to prevent unauthorized access, copying, editing or printing.
    • -
    • Sign PDF files: You can sign PDF files electronically with your digital signature or use the e-sign service provided by Adobe EchoSign.
    • -
    • Share PDF files: You can easily share your PDF files with others using email, cloud services, social media or Adobe SendNow.
    • -
    -

    How to Install Adobe Acrobat XI Pro 10.0.22 FINAL Crack Download on Windows

    -

    To install Adobe Acrobat XI Pro 10.0.22 FINAL Crack download on your Windows PC, you need to follow these steps:

    -
      -
    1. Download the software from one of the links provided in this article.
    2. -
    3. Extract the zip file using WinRAR or any other file extractor.
    4. -
    5. Run the setup file and follow the instructions to install the software.
    6. -
    7. Copy the crack file from the crack folder and paste it into the installation directory.
    8. -
    9. Run the software and enjoy!
    10. -
    -

    Note: You may need to disable your antivirus or firewall before installing or running the software.

    -

    Conclusion

    -

    Adobe Acrobat XI Pro 10.0.22 FINAL Crack download is a great software for anyone who works with PDF files on a regular basis. It offers many features and benefits that make it easy, seamless and brilliant to communicate, edit, create and share PDF files. You can download it from one of the links provided in this article and install it on your Windows PC with ease. We hope this article was helpful and informative for you.

    -

    How to Use Adobe Acrobat XI Pro 10.0.22 FINAL Crack Download

    -

    Adobe Acrobat XI Pro 10.0.22 FINAL Crack download is easy to use once you have installed it on your Windows PC. Here are some of the basic steps you can follow to use this software:

    -

    -
      -
    1. Launch the software and choose the option you want to perform, such as creating, editing, converting or sharing PDF files.
    2. -
    3. Select the file or files you want to work with and open them in the software.
    4. -
    5. Use the tools and options available in the software to make the changes or enhancements you want to your PDF files.
    6. -
    7. Save your PDF files or export them to other formats as needed.
    8. -
    9. Share your PDF files with others using email, cloud services, social media or Adobe SendNow.
    10. -
    -

    You can also use the help menu or the online tutorials to learn more about the features and functions of Adobe Acrobat XI Pro 10.0.22 FINAL Crack download.

    -

    Pros and Cons of Adobe Acrobat XI Pro 10.0.22 FINAL Crack Download

    -

    Adobe Acrobat XI Pro 10.0.22 FINAL Crack download has many pros and cons that you should consider before downloading and using it. Some of them are:

    - - - - - - -
    ProsCons
    It is a powerful and versatile PDF converter that can handle any PDF task.It is a cracked version that may not be safe or legal to use.
    It has many smart tools that give you more power to communicate, edit, create and share PDF files.It may not be compatible with the latest updates or versions of Windows or Adobe products.
    It is easy to use and has a user-friendly interface.It may have some bugs or errors that affect its performance or functionality.
    It is free to download and use.It may not have all the features or support that the official version has.
    -

    You should weigh the pros and cons of Adobe Acrobat XI Pro 10.0.22 FINAL Crack download before deciding whether to use it or not.

    -

    Where to Download Adobe Acrobat XI Pro 10.0.22 FINAL Crack Download

    -

    There are many websites that offer Adobe Acrobat XI Pro 10.0.22 FINAL Crack download for free, but not all of them are reliable or safe. Some of them may contain viruses, malware, spyware or other harmful programs that can damage your PC or steal your personal information. Therefore, you should be careful and selective when choosing where to download Adobe Acrobat XI Pro 10.0.22 FINAL Crack download from.

    -

    One of the best and most trusted sources to download Adobe Acrobat XI Pro 10.0.22 FINAL Crack download from is SolidTorrents.to. This is a torrent website that provides high-quality and verified torrents for various software, games, movies, music and more. You can find Adobe Acrobat XI Pro 10.0.22 FINAL Crack download on this website by searching for it using the search bar or browsing through the categories. You can also read the comments and reviews from other users to check the quality and authenticity of the torrent.

    -

    To download Adobe Acrobat XI Pro 10.0.22 FINAL Crack download from SolidTorrents.to, you need to have a torrent client installed on your PC, such as uTorrent, BitTorrent, qBittorrent or any other similar program. You also need to have a VPN service or a proxy server to hide your IP address and protect your privacy online. Once you have these tools ready, you can follow these steps:

    -
      -
    1. Go to SolidTorrents.to and search for Adobe Acrobat XI Pro 10.0.22 FINAL Crack download.
    2. -
    3. Select the torrent that has the most seeders and leechers and click on the download button.
    4. -
    5. Open the torrent file with your torrent client and start downloading the software.
    6. -
    7. Wait for the download to finish and then extract the zip file using WinRAR or any other file extractor.
    8. -
    9. Follow the installation and crack instructions provided in the ReadMe.txt file.
    10. -
    11. Enjoy using Adobe Acrobat XI Pro 10.0.22 FINAL Crack download!
    12. -
    -

    Tips and Tricks for Using Adobe Acrobat XI Pro 10.0.22 FINAL Crack Download

    -

    Adobe Acrobat XI Pro 10.0.22 FINAL Crack download is a powerful and versatile PDF converter that can help you with many PDF tasks, but it also has some tips and tricks that can make your experience even better and easier. Here are some of them:

    -
      -
    • You can use keyboard shortcuts to perform common tasks faster and more efficiently. For example, you can press Ctrl+N to create a new PDF file, Ctrl+O to open an existing PDF file, Ctrl+S to save a PDF file, Ctrl+P to print a PDF file, Ctrl+F to find text in a PDF file, Ctrl+Z to undo an action, Ctrl+Y to redo an action, Ctrl+C to copy text or images from a PDF file, Ctrl+V to paste text or images into a PDF file, Ctrl+X to cut text or images from a PDF file, Ctrl+A to select all text or images in a PDF file, Ctrl+E to edit text or images in a PDF file, Ctrl+L to switch to full screen mode and Esc to exit full screen mode.
    • -
    • You can use the tools pane on the right side of the software window to access various tools and options for working with PDF files. You can also customize the tools pane by adding or removing tools according to your preferences.
    • -
    • You can use the comment tool to add annotations, comments, highlights, stamps, drawings or other markups to your PDF files. You can also use the comment list panel on the bottom left corner of the software window to view, sort, filter or reply to comments in your PDF files.
    • -
    • You can use the compare tool to compare two versions of a PDF file and highlight the differences between them.
    • -
    • You can use the optimize tool to reduce the size of your PDF files by removing unwanted elements or compressing images.
    • -
    • You can use the export tool to convert your PDF files to other formats such as Word, Excel, PowerPoint, HTML, JPEG, PNG or TIFF.
    • -
    -

    Alternatives to Adobe Acrobat XI Pro 10.0.22 FINAL Crack Download

    -

    Adobe Acrobat XI Pro 10.0.22 FINAL Crack download is not the only PDF converter available in the market. There are many other alternatives that you can try if you are looking for a different or better option. Some of them are:

    -
      -
    • Nitro Pro: This is a popular and powerful PDF converter that can create, edit, convert, sign and share PDF files with ease. It has a similar interface to Microsoft Office and supports many formats such as Word, Excel, PowerPoint, HTML, JPEG, PNG and more. It also has cloud integration and collaboration features that allow you to work with others on your PDF files.
    • -
    • Wondershare PDFelement: This is a comprehensive and user-friendly PDF converter that can create, edit, convert, annotate, sign and protect PDF files with advanced tools. It has a sleek and intuitive interface that makes it easy to use. It also supports OCR technology that can recognize and edit scanned documents.
    • -
    • Foxit PhantomPDF: This is a fast and reliable PDF converter that can create, edit, convert, sign and share PDF files with high quality and security. It has a customizable interface that allows you to access the tools and options you need quickly. It also has cloud services and collaboration features that enable you to work with others on your PDF files.
    • -
    -

    You can compare these alternatives to Adobe Acrobat XI Pro 10.0.22 FINAL Crack download and choose the one that suits your needs and preferences best.

    -

    Frequently Asked Questions about Adobe Acrobat XI Pro 10.0.22 FINAL Crack Download

    -

    Here are some of the frequently asked questions about Adobe Acrobat XI Pro 10.0.22 FINAL Crack download and their answers:

    -
    -
    Is Adobe Acrobat XI Pro 10.0.22 FINAL Crack download safe to use?
    -
    Adobe Acrobat XI Pro 10.0.22 FINAL Crack download is a cracked version of the official software that may not be safe or legal to use. It may contain viruses, malware, spyware or other harmful programs that can damage your PC or steal your personal information. Therefore, you should be careful and selective when downloading and using Adobe Acrobat XI Pro 10.0.22 FINAL Crack download.
    -
    Is Adobe Acrobat XI Pro 10.0.22 FINAL Crack download compatible with Windows 11?
    -
    Adobe Acrobat XI Pro 10.0.22 FINAL Crack download may not be compatible with Windows 11 or the latest updates or versions of Windows or Adobe products. It may have some bugs or errors that affect its performance or functionality. Therefore, you should check the compatibility of Adobe Acrobat XI Pro 10.0.22 FINAL Crack download with your Windows version before installing or using it.
    -
    How can I update Adobe Acrobat XI Pro 10.0.22 FINAL Crack download?
    -
    Adobe Acrobat XI Pro 10.0.22 FINAL Crack download is a cracked version of the official software that may not have all the features or support that the official version has. It may not be able to update itself or receive updates from Adobe or other sources. Therefore, you should not rely on Adobe Acrobat XI Pro 10.0.22 FINAL Crack download for the latest features or updates.
    -
    -

    Conclusion

    -

    Adobe Acrobat XI Pro 10.0.22 FINAL Crack download is a powerful and versatile PDF converter that can help you with many PDF tasks, but it also has some drawbacks and risks that you should be aware of. It is a cracked version of the official software that may not be safe or legal to use. It may not be compatible with the latest updates or versions of Windows or Adobe products. It may not have all the features or support that the official version has. It may contain viruses, malware, spyware or other harmful programs that can damage your PC or steal your personal information.

    -

    Therefore, you should be careful and selective when downloading and using Adobe Acrobat XI Pro 10.0.22 FINAL Crack download. You should also consider other alternatives that may offer better or safer options for your PDF needs. You should also check the compatibility of Adobe Acrobat XI Pro 10.0.22 FINAL Crack download with your Windows version before installing or using it.

    -

    We hope this article was helpful and informative for you. If you have any questions or feedback, please feel free to leave a comment below.

    3cee63e6c2
    -
    -
    \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Danea Easyfatt 2006 Professional Rev 22 Build 201 Italian By UARE.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Danea Easyfatt 2006 Professional Rev 22 Build 201 Italian By UARE.md deleted file mode 100644 index 3eb50400d0a2692060fac82ee209cdfe1786ec15..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Danea Easyfatt 2006 Professional Rev 22 Build 201 Italian By UARE.md +++ /dev/null @@ -1,7 +0,0 @@ -

    Danea Easyfatt 2006 Professional Rev 22 Build 201 Italian By UARE


    Download ……… https://urlgoal.com/2uCJr1



    - -... ... ://trello.com/c/VaUme5Ig/32-danea-easyfatt-2006-professional-rev-22-build-201-italian-by -uare -easyfatt/p/VaUme5Ig/ 32-danea-easyfatt-2006-professional-rev-22-build-201-italian-by uare-easyfatt/p/VaUme5Ig/32-danea-easyfatt-2006professional-rev-22-build-201-italian-by-uare -easyfatt/p/VaUme5Ig/32-danea-easyfatt-2006professional- -review-22-build-201-italian-by-uare-easyfatt/p/VaUme5Ig/32-danea-easyfatt-2006professional-rev-22-build-201-italian-by-uare-easyfatt/p/VaUme5Ig/32- danea-easyfatt-2006professional-rev-22-build-201-italian 8a78ff9644
    -
    -
    -

    diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/HD Online Player (download [EXCLUSIVE] Movie Mardaani In Hindi Hd).md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/HD Online Player (download [EXCLUSIVE] Movie Mardaani In Hindi Hd).md deleted file mode 100644 index 28c2d896f607a9699c8da9b83ca2dbb367e17194..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/HD Online Player (download [EXCLUSIVE] Movie Mardaani In Hindi Hd).md +++ /dev/null @@ -1,10 +0,0 @@ -

    HD Online Player (Download Movie Mardaani In Hindi Hd)


    DOWNLOAD ➡ https://urlgoal.com/2uCJkB



    - -Catch Rani Mukherjee the star of life in this raw and edgy movie that will be explicit departure from Pradeep. "Rani" is all about her and her family. -She lives at her grandparents' house before marriage because her fiancé (Abhishek Bachchan) and herself were unable to come to an agreement on marriage. -While she tries to make it according to her wish, she finds out that her grandparents don't want her to marry her. -Her grandfather also insists that she marry another man. -However, Rani still wants to be loyal and wants to go against the wishes of her loved ones. 8a78ff9644
    -
    -
    -

    diff --git a/spaces/richardzhangy26/yandian_flow_classification/configs/_base_/datasets/chairssdhom_384x448.py b/spaces/richardzhangy26/yandian_flow_classification/configs/_base_/datasets/chairssdhom_384x448.py deleted file mode 100644 index 11b5aa66cb98ba49b872dc16a54121d5c68f8e2d..0000000000000000000000000000000000000000 --- a/spaces/richardzhangy26/yandian_flow_classification/configs/_base_/datasets/chairssdhom_384x448.py +++ /dev/null @@ -1,89 +0,0 @@ -dataset_type = 'ChairsSDHom' -data_root = 'data/ChairsSDHom' - -img_norm_cfg = dict(mean=[0., 0., 0.], std=[255., 255., 255.], to_rgb=False) - -global_transform = dict( - translates=(0.05, 0.05), - zoom=(1.0, 1.5), - shear=(0.86, 1.16), - rotate=(-10., 10.)) - -relative_transform = dict( - translates=(0.00375, 0.00375), - zoom=(0.985, 1.015), - shear=(1.0, 1.0), - rotate=(-1.0, 1.0)) - -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations'), - dict( - type='ColorJitter', - brightness=0.5, - contrast=0.5, - saturation=0.5, - hue=0.5), - dict(type='RandomGamma', gamma_range=(0.7, 1.5)), - dict(type='Normalize', **img_norm_cfg), - dict(type='GaussianNoise', sigma_range=(0, 0.04), clamp_range=(0., 1.)), - dict(type='RandomFlip', prob=0.5, direction='horizontal'), - dict(type='RandomFlip', prob=0.5, direction='vertical'), - dict( - type='RandomAffine', - global_transform=global_transform, - relative_transform=relative_transform), - dict(type='RandomCrop', crop_size=(320, 448)), - dict(type='DefaultFormatBundle'), - dict( - type='Collect', - keys=['imgs', 'flow_gt'], - meta_keys=[ - 'img_fields', 'ann_fields', 'filename1', 'filename2', - 'ori_filename1', 'ori_filename2', 'filename_flow', - 'ori_filename_flow', 'ori_shape', 'img_shape', 'img_norm_cfg' - ]), -] - -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations'), - dict(type='InputResize', exponent=6), - dict(type='Normalize', **img_norm_cfg), - dict(type='TestFormatBundle'), - dict( - type='Collect', - keys=['imgs'], - meta_keys=[ - 'flow_gt', 'filename1', 'filename2', 'ori_filename1', - 'ori_filename2', 'ori_shape', 'img_shape', 'img_norm_cfg', - 'scale_factor', 'pad_shape' - ]) -] - -chairssdhom_train = dict( - type=dataset_type, pipeline=train_pipeline, data_root=data_root) - -data = dict( - train_dataloader=dict( - samples_per_gpu=1, - workers_per_gpu=2, - drop_last=True, - persistent_workers=True), - val_dataloader=dict( - samples_per_gpu=1, - workers_per_gpu=2, - shuffle=False, - persistent_workers=True), - test_dataloader=dict(samples_per_gpu=1, workers_per_gpu=2, shuffle=False), - train=chairssdhom_train, - val=dict( - type=dataset_type, - pipeline=test_pipeline, - data_root=data_root, - test_mode=True), - test=dict( - type=dataset_type, - pipeline=test_pipeline, - data_root=data_root, - test_mode=True)) diff --git a/spaces/rinong/StyleGAN-NADA/e4e/models/psp.py b/spaces/rinong/StyleGAN-NADA/e4e/models/psp.py deleted file mode 100644 index 36c0b2b7b3fdd28bc32272d0d8fcff24e4848355..0000000000000000000000000000000000000000 --- a/spaces/rinong/StyleGAN-NADA/e4e/models/psp.py +++ /dev/null @@ -1,99 +0,0 @@ -import matplotlib - -matplotlib.use('Agg') -import torch -from torch import nn -from e4e.models.encoders import psp_encoders -from e4e.models.stylegan2.model import Generator -from e4e.configs.paths_config import model_paths - - -def get_keys(d, name): - if 'state_dict' in d: - d = d['state_dict'] - d_filt = {k[len(name) + 1:]: v for k, v in d.items() if k[:len(name)] == name} - return d_filt - - -class pSp(nn.Module): - - def __init__(self, opts, device): - super(pSp, self).__init__() - self.opts = opts - self.device = device - # Define architecture - self.encoder = self.set_encoder() - self.decoder = Generator(opts.stylegan_size, 512, 8, channel_multiplier=2) - self.face_pool = torch.nn.AdaptiveAvgPool2d((256, 256)) - # Load weights if needed - self.load_weights() - - def set_encoder(self): - if self.opts.encoder_type == 'GradualStyleEncoder': - encoder = psp_encoders.GradualStyleEncoder(50, 'ir_se', self.opts) - elif self.opts.encoder_type == 'Encoder4Editing': - encoder = psp_encoders.Encoder4Editing(50, 'ir_se', self.opts) - else: - raise Exception('{} is not a valid encoders'.format(self.opts.encoder_type)) - return encoder - - def load_weights(self): - if self.opts.checkpoint_path is not None: - print('Loading e4e over the pSp framework from checkpoint: {}'.format(self.opts.checkpoint_path)) - ckpt = torch.load(self.opts.checkpoint_path, map_location='cpu') - self.encoder.load_state_dict(get_keys(ckpt, 'encoder'), strict=True) - self.decoder.load_state_dict(get_keys(ckpt, 'decoder'), strict=True) - self.__load_latent_avg(ckpt) - else: - print('Loading encoders weights from irse50!') - encoder_ckpt = torch.load(model_paths['ir_se50']) - self.encoder.load_state_dict(encoder_ckpt, strict=False) - print('Loading decoder weights from pretrained!') - ckpt = torch.load(self.opts.stylegan_weights) - self.decoder.load_state_dict(ckpt['g_ema'], strict=False) - self.__load_latent_avg(ckpt, repeat=self.encoder.style_count) - - def forward(self, x, resize=True, latent_mask=None, input_code=False, randomize_noise=True, - inject_latent=None, return_latents=False, alpha=None): - if input_code: - codes = x - else: - codes = self.encoder(x) - # normalize with respect to the center of an average face - if self.opts.start_from_latent_avg: - if codes.ndim == 2: - codes = codes + self.latent_avg.repeat(codes.shape[0], 1, 1)[:, 0, :] - else: - codes = codes + self.latent_avg.repeat(codes.shape[0], 1, 1) - - if latent_mask is not None: - for i in latent_mask: - if inject_latent is not None: - if alpha is not None: - codes[:, i] = alpha * inject_latent[:, i] + (1 - alpha) * codes[:, i] - else: - codes[:, i] = inject_latent[:, i] - else: - codes[:, i] = 0 - - input_is_latent = not input_code - images, result_latent = self.decoder([codes], - input_is_latent=input_is_latent, - randomize_noise=randomize_noise, - return_latents=return_latents) - - if resize: - images = self.face_pool(images) - - if return_latents: - return images, result_latent - else: - return images - - def __load_latent_avg(self, ckpt, repeat=None): - if 'latent_avg' in ckpt: - self.latent_avg = ckpt['latent_avg'].to(self.device) - if repeat is not None: - self.latent_avg = self.latent_avg.repeat(repeat, 1) - else: - self.latent_avg = None diff --git a/spaces/rorallitri/biomedical-language-models/logs/HACK WIFI BY AIRCRACK NG How to Use Monitor Mode Airodump-ng and Aireplay-ng.md b/spaces/rorallitri/biomedical-language-models/logs/HACK WIFI BY AIRCRACK NG How to Use Monitor Mode Airodump-ng and Aireplay-ng.md deleted file mode 100644 index 2d984489d2651c125d0054da4fc37d8c668bf8d1..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/HACK WIFI BY AIRCRACK NG How to Use Monitor Mode Airodump-ng and Aireplay-ng.md +++ /dev/null @@ -1,23 +0,0 @@ -
    -

    To crack WPA/WPA2 wifi networks, we will utilize the handshake packets. These are four packets transmitted between the router and the client when establishing a network connection. To capture packets on a specific network, we will use the syntax below.

    -

    Thanks for the info really useful stuff. Really appreciate it.
    I have one question and still, you have answered previously in the comments here but I am a bit confused.
    aircrack-ng -1 -a 1 -b -w
    I know what to use for:
    BSSID and cap_file
    What do I need to use for ?

    -

    HACK WIFI BY AIRCRACK NG


    Download File · https://tinurll.com/2uznbM



    -

    Thanks for the info really useful stuff. Really appreciate it.
    I have one question and still, you have answered previously in the comments here but I am a bit confused.
    aircrack-ng -1 -a 1 -b BSSID cap_file -w wordlist
    I know what to use for:
    BSSID and cap_file
    What do I need to use for wordlist ?

    -

    This software is only for programming, security testing, and hacking experts because it requires a lot of programming skill to use it correctly and efficiently. It is a handy suite of tools for assessing the security of your Wi-Fi network, but you will need very good programming skills if you are using the Windows version. You will need the coding skills required to develop your own DLLs to link Aircrack-ng to your current wireless card. You cannot download them, and the software will not function without them. As a result, this software has a limited user base.

    -

    This guide will show you how to crack pre-shared key WPA/WPA2 networks using the Aircrack-ng tool, which is used to crack wifi passwords. We are going to discuss what are pre-shared keys, what is packet injection, then we will verify if your Network Interface Card (NIC) supports packet injection. Then we will go ahead and crack the WAP/WAP2 wireless network.

    -

    Note: We're only teaching you for educational purposes and to broaden your horizons. Because we know that both ethical and non-ethical hackers use these tools, Techofide will not be held liable for any unlawful/false actions you engage in.

    -

    We can use wifi thanks to a network interface card. A network interface card is a piece of hardware that allows the computer to communicate with other computers across a network. It is a hardware component that is installed in a computer and enables a dedicated network connection to the machine. To see your NIC details, type the command below.

    -

    The information provided on the cybersecurityman is for educational purposes only. I am in no way responsible for any misuse of the information provided. All the information here is meant to provide the reader with the knowledge to defend against hackers and prevent the attacks discussed here. At no time should any reader attempt to use this information for illegal purposes.

    -

    -

    In this aircrack tutorial, we will first sniff the air for packets. Sniffing the air is a simple and totally passive task. No one can really tell that a hacker is sniffing the packets, since it all happens wirelessly.

    -

    We will first use airomon-ng in this aircrack tutorial to create a promiscuous mode interface (mon0) to sniff the wireless network. The aircrack developers created this brilliant tool with the ability to hop between channels and sniff packets. Note that the client and AP need to be in one channel to communicate.

    -

    Passive sniffing takes a lot of time since we need to wait for legitimate IV packets. In this aircrack tutorial, we will use an arpreplay attack to boost weak IV traffic by replicating ARP requests from a legitimate device to the AP.

    -

    The aireplay-ng command in this aircrack tutorial will fetch ARP packets from the legitimate client specified by the MAC address (-h option), and start sending them to the AP to get more packets with weak IVs.

    -

    Now in another scenario, a hacker sends de-authentication packets to either one or all legitimate clients. The client(s) will then try to authenticate with the AP, which will eventually increase weak IV traffic.

    -

    aircrack-ng is an 802.11a/b/g WEP/WPA cracking program that can recover a40-bit, 104-bit, 256-bit or 512-bit WEP key once enough encrypted packetshave been gathered. Also it can attack WPA1/2 networks with some advancedmethods or simply by brute force.

    -

    Option 1: Passive sniffing + Decryption: For this attack to be successful, the hacker must around to sniff the 4-way handshake when the target device joins the network, which is used to generate WPA2 pairwise transient key (PTK), which in turn is used to encrypt/authenticate all traffic between a host and the access point. The attacker can potentially induce a 4-way handshake by forcing the device to deauthenticate from the AP.

    -

    In order to sniff the traffic of another computer on the same wifi network as you, you need to capture the 4-way handshake when their device associates with the router. To do this you must configure your device to capture wifi traffic.

    -

    There is a list on the website of aircrack-ng, and I think the Alfa AWUS051NH v2 is great.Some people say it is expensive, but last time I checked on Google Shopping, it cost less than half an Apple mouse.

    -

    Aircrack-ng is a wireless network scanner that includes WPA-PSK and WEP encryption key cracking. This system is often used by hackers to snoop and it has a detection evasion system. The Aicrack-ng system is actually a suite of programs that provide bother intel gathering and attack facilities. ","author":"@type":"Person","name":"Stephen Cooper","description":"Stephen Cooper has taken a close interest in online security since his thesis on Internet encryption in the early 90s. That formed part of his BSC (Hons) in Computing and Informatics at the University of Plymouth. In those days, encapsulation techniques were just being formulated and Cooper kept an eye on those methodologies as they evolved into the VPN industry. Cooper went on to study an MSC in Advanced Manufacturing Systems and Kingston University.\nCooper worked as a technical consultant, sitting DBA exams and specializing in Oracle Applications. With a long experience as a programmer, Cooper is able to assess systems by breaking into programs and combing through the code. Knowledge of IT development and operations working practices helps him to focus his reviews on the attributes of software that are really important to IT professionals.\nAfter working as an IT consultant across Europe and the USA, he has become adept at explaining complicated technology in everyday terms. He is a people person with an interest in technology\n","url":"https:\/\/www.comparitech.com\/author\/stephen-cooper\/"}},"@type":"Question","name":"Can I use aircrack-ng on Android?","answerCount":1,"acceptedAnswer":"@type":"Answer","text":"The Aircrack-ng software isn\u2019t available for Android. However, a number of users have tried to adapt the programs so that they will run on the Android operating system. This is a difficult prospect, however, and the best version of the tool is available for free for Linux. ","author":"@type":"Person","name":"Stephen Cooper","description":"Stephen Cooper has taken a close interest in online security since his thesis on Internet encryption in the early 90s. That formed part of his BSC (Hons) in Computing and Informatics at the University of Plymouth. In those days, encapsulation techniques were just being formulated and Cooper kept an eye on those methodologies as they evolved into the VPN industry. Cooper went on to study an MSC in Advanced Manufacturing Systems and Kingston University.\nCooper worked as a technical consultant, sitting DBA exams and specializing in Oracle Applications. With a long experience as a programmer, Cooper is able to assess systems by breaking into programs and combing through the code. Knowledge of IT development and operations working practices helps him to focus his reviews on the attributes of software that are really important to IT professionals.\nAfter working as an IT consultant across Europe and the USA, he has become adept at explaining complicated technology in everyday terms. He is a people person with an interest in technology\n","url":"https:\/\/www.comparitech.com\/author\/stephen-cooper\/","@type":"Question","name":"Is there aircrack-ng for Windows?","answerCount":1,"acceptedAnswer":"@type":"Answer","text":"Aircrack-ng was written for Linux and if you get the free Kali Linux system, you will find that this tool is bundled into the package. There are also versions available for Windows, macOS, FreeBSD, OpenBSD, NetBSD, Solaris, and eComStation 2.","author":"@type":"Person","name":"Stephen Cooper","description":"Stephen Cooper has taken a close interest in online security since his thesis on Internet encryption in the early 90s. That formed part of his BSC (Hons) in Computing and Informatics at the University of Plymouth. In those days, encapsulation techniques were just being formulated and Cooper kept an eye on those methodologies as they evolved into the VPN industry. Cooper went on to study an MSC in Advanced Manufacturing Systems and Kingston University.\nCooper worked as a technical consultant, sitting DBA exams and specializing in Oracle Applications. With a long experience as a programmer, Cooper is able to assess systems by breaking into programs and combing through the code. Knowledge of IT development and operations working practices helps him to focus his reviews on the attributes of software that are really important to IT professionals.\nAfter working as an IT consultant across Europe and the USA, he has become adept at explaining complicated technology in everyday terms. He is a people person with an interest in technology\n","url":"https:\/\/www.comparitech.com\/author\/stephen-cooper\/"]} "@context":"http:\/\/schema.org","@type":"BreadcrumbList","itemListElement":["@type":"ListItem","position":1,"name":"Home","item":"https:\/\/www.comparitech.com\/","@type":"ListItem","position":2,"name":"Net Admin","item":"https:\/\/www.comparitech.com\/net-admin\/","@type":"ListItem","position":3,"name":"Aircrack-ng review including alternatives","item":"https:\/\/www.comparitech.com\/net-admin\/aircrack-ng-review\/"]Net AdminAircrack-ng review including alternatives We are funded by our readers and may receive a commission when you buy using links on our site. Aircrack-ng review including alternatives Aircrack-ng is a free wireless network scanner used for network administration, hacking, or penetration testing. Stephen Cooper @VPN_News UPDATED: January 4, 2023 body.single .section.main-content.sidebar-active .col.grid-item.sidebar.span_1_of_3 float: right; body.single .section.main-content.sidebar-active .col.grid-item.content.span_2_of_3 margin-left: 0;

    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/salemamassi/GeneralPdfChatBot/app.py b/spaces/salemamassi/GeneralPdfChatBot/app.py deleted file mode 100644 index 7810f6e2ef8a56a591da3465e987ea5f98bbf961..0000000000000000000000000000000000000000 --- a/spaces/salemamassi/GeneralPdfChatBot/app.py +++ /dev/null @@ -1,74 +0,0 @@ -import gradio as gr -import os -import tempfile -from langchain.document_loaders import UnstructuredPDFLoader -from langchain.indexes import VectorstoreIndexCreator -from langchain.chains import RetrievalQA -from langchain.schema import AIMessage, HumanMessage -from langchain.vectorstores import FAISS -from langchain.embeddings import HuggingFaceEmbeddings -from langchain.text_splitter import CharacterTextSplitter -from langchain import HuggingFaceHub - -# Set your API keys -API_KEY = os.environ["API_KEY"] - -# Create a temporary upload directory - -# Define global variables for loaders and index -index = None - - -def chat(message,history): - global index - history_langchain_format = [] - for human, ai in history: - history_langchain_format.append(HumanMessage(content=human)) - history_langchain_format.append(AIMessage(content=ai)) - history_langchain_format.append(HumanMessage(content=message)) - history_langchain_format.append(HumanMessage(content=message)) - # Create the index (update index) - llm2 = HuggingFaceHub(repo_id="declare-lab/flan-alpaca-large", model_kwargs={"temperature": 0, "max_length": 512},huggingfacehub_api_token = API_KEY ) - chain = RetrievalQA.from_chain_type(llm=llm2, - chain_type="stuff", - retriever=index.vectorstore.as_retriever(), - input_key="question") - # Perform question-answering on the uploaded PDF with the user's question - gpt_response = chain.run("Based on the file you have processed, provide a related answer to this question: "+ message) - return gpt_response - - -# Create a Gradio interface for chat -chat_interface = gr.ChatInterface( - chat, - theme=gr.themes.Soft() -) - - - -with gr.Blocks(theme=gr.themes.Soft()) as demo: - with gr.Row(): - with gr.Column(scale=1): - with gr.Row(): - upload_file = gr.File(label="Upload a PDF",file_types=["pdf"]) - with gr.Row(): - upload_button = gr.Button(label="Upload a PDF") - with gr.Row(): - text = gr.Textbox(label="Status") - def load_file(pdf_file): - global index - pdf_loader = UnstructuredPDFLoader(pdf_file.name) - index = VectorstoreIndexCreator( - embedding=HuggingFaceEmbeddings(), - text_splitter=CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) - ).from_loaders([pdf_loader]) - return "DONE ✅" - upload_button.click(load_file, [upload_file], text) - with gr.Column(scale=2): - chat_interface = gr.ChatInterface( - chat, - theme=gr.themes.Soft() - ) - -demo.queue().launch(inline=False) - diff --git a/spaces/sarahyoung/taltech/README.md b/spaces/sarahyoung/taltech/README.md deleted file mode 100644 index fa5b98f0be53a7e424a2f1813d39c8a52a09c47f..0000000000000000000000000000000000000000 --- a/spaces/sarahyoung/taltech/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Taltech -emoji: 🔥 -colorFrom: gray -colorTo: blue -sdk: gradio -sdk_version: 3.36.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/sarinam/speaker-anonymization/IMSToucan/Layers/Convolution.py b/spaces/sarinam/speaker-anonymization/IMSToucan/Layers/Convolution.py deleted file mode 100644 index e6e56e85d5908b0db5fceaea1e701d197a824d4b..0000000000000000000000000000000000000000 --- a/spaces/sarinam/speaker-anonymization/IMSToucan/Layers/Convolution.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2020 Johns Hopkins University (Shinji Watanabe) -# Northwestern Polytechnical University (Pengcheng Guo) -# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) -# Adapted by Florian Lux 2021 - - -from torch import nn - - -class ConvolutionModule(nn.Module): - """ - ConvolutionModule in Conformer model. - - Args: - channels (int): The number of channels of conv layers. - kernel_size (int): Kernel size of conv layers. - - """ - - def __init__(self, channels, kernel_size, activation=nn.ReLU(), bias=True): - super(ConvolutionModule, self).__init__() - # kernel_size should be an odd number for 'SAME' padding - assert (kernel_size - 1) % 2 == 0 - - self.pointwise_conv1 = nn.Conv1d(channels, 2 * channels, kernel_size=1, stride=1, padding=0, bias=bias, ) - self.depthwise_conv = nn.Conv1d(channels, channels, kernel_size, stride=1, padding=(kernel_size - 1) // 2, groups=channels, bias=bias, ) - self.norm = nn.GroupNorm(num_groups=32, num_channels=channels) - self.pointwise_conv2 = nn.Conv1d(channels, channels, kernel_size=1, stride=1, padding=0, bias=bias, ) - self.activation = activation - - def forward(self, x): - """ - Compute convolution module. - - Args: - x (torch.Tensor): Input tensor (#batch, time, channels). - - Returns: - torch.Tensor: Output tensor (#batch, time, channels). - - """ - # exchange the temporal dimension and the feature dimension - x = x.transpose(1, 2) - - # GLU mechanism - x = self.pointwise_conv1(x) # (batch, 2*channel, dim) - x = nn.functional.glu(x, dim=1) # (batch, channel, dim) - - # 1D Depthwise Conv - x = self.depthwise_conv(x) - x = self.activation(self.norm(x)) - - x = self.pointwise_conv2(x) - - return x.transpose(1, 2) diff --git a/spaces/scedlatioru/img-to-music/example/Athentech Imaging Perfectly Clear Plug-In For Photoshop 1.7.3l LINK.md b/spaces/scedlatioru/img-to-music/example/Athentech Imaging Perfectly Clear Plug-In For Photoshop 1.7.3l LINK.md deleted file mode 100644 index c4ead97725913f2bdc43aae7695098a8ef7e1838..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Athentech Imaging Perfectly Clear Plug-In For Photoshop 1.7.3l LINK.md +++ /dev/null @@ -1,7 +0,0 @@ -
    -

    in its default mode, perfectly clear improved just about every photo i threw at it. of course, the old garbage in, garbage out rule still applies, so don't expect an awful image to magically look good. the plug-in often made good test photos look stunning. if you're not happy with the preset result, you do have adjustment choices: sliders let you control exposure and contrast, as well as sharpening and noise. but even if you just open your photo in perfectly clear, if you use the side-by-side before and after view, your original image will in most cases look washed out and flat in comparison.

    -

    Athentech Imaging Perfectly Clear Plug-In For Photoshop 1.7.3l


    Download File ⭐ https://gohhs.com/2uEA2Y



    -

    while the individual plug-ins share a common interface, the complete collection takes the workspace design to a new level of usability. opening an image automatically applies the details preset, which amazingly is all i felt i needed about 90 percent of the time! disclosure arrows below the presets move you forward or backward through the four plug-ins (five if you include the two sets of presets available in perfect skin). and a pop-up list allows you to choose any of the available presets in the four plug-ins directly, or your user-defined preset, while also indicating which preset is currently being applied. the pop-up list can greatly simplify the creation of a photoshop action for batch processing.

    -

    while there are programs that offer bits of the automation that perfectly clear complete provides, none come close to the range of presets. dxo opticspro 11 does a better job making the initial adjustment, it works directly on raw files that perfectly clear must process first through adobe camera raw or lightroom, but opticspro costs more and isnt set up for portrait adjustments.

    899543212b
    -
    -
    \ No newline at end of file diff --git a/spaces/scedlatioru/img-to-music/example/Fanuc Pc Fapt Cut FULL Version Download.md b/spaces/scedlatioru/img-to-music/example/Fanuc Pc Fapt Cut FULL Version Download.md deleted file mode 100644 index ff9a79c4b78bb59a32266e16e18e1a11dcb1301a..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Fanuc Pc Fapt Cut FULL Version Download.md +++ /dev/null @@ -1,106 +0,0 @@ -
    -

    FANUC PC FAPT CUT i: A Comprehensive Review

    -

    If you are looking for a reliable and efficient software for programming CNC lathes, you might want to consider FANUC PC FAPT CUT i. This software is a Windows based system that allows you to create and edit programs for various types of CNC lathes, including turning, milling, drilling and tapping. In this article, we will review the features, benefits and drawbacks of FANUC PC FAPT CUT i, and show you how to download the full version for free.

    - -

    What is FANUC PC FAPT CUT i?

    -

    FANUC PC FAPT CUT i is a software developed by FANUC, the world's largest lathe control manufacturer. It is based on the already successful FAPT machine control option, which is a conversational programming system that simplifies the programming process and reduces errors. FANUC PC FAPT CUT i is designed to work with FANUC CNC controllers, such as Series 0i, 15i, 16i, 18i, 21i and 30i.

    -

    fanuc pc fapt cut FULL Version download


    Download Zip ---> https://gohhs.com/2uEAjH



    - -

    What are the features of FANUC PC FAPT CUT i?

    -

    FANUC PC FAPT CUT i has many features that make it a powerful and user-friendly software for CNC lathe programming. Some of these features are:

    -
      -
    • It supports various types of machining operations, such as turning, facing, grooving, threading, boring, drilling, milling and tapping.
    • -
    • It allows you to create and edit programs using graphical icons, menus and dialogs.
    • -
    • It provides a simulation mode that lets you check the program before running it on the machine.
    • -
    • It generates G-code automatically from the program.
    • -
    • It has a built-in editor that enables you to modify the G-code manually if needed.
    • -
    • It can import and export programs in various formats, such as DXF, IGES and ISO.
    • -
    • It can communicate with the CNC controller via RS-232 or Ethernet.
    • -
    • It has a help function that provides detailed explanations and examples for each function.
    • -
    - -

    What are the benefits of FANUC PC FAPT CUT i?

    -

    FANUC PC FAPT CUT i has many benefits that make it a valuable software for CNC lathe programmers. Some of these benefits are:

    -
      -
    • It reduces the programming time and effort by providing a simple and intuitive interface.
    • -
    • It improves the machining quality and accuracy by generating optimal G-code.
    • -
    • It enhances the productivity and efficiency by allowing you to create and edit programs on a PC without occupying the machine.
    • -
    • It increases the flexibility and compatibility by supporting various types of CNC lathes and controllers.
    • -
    • It saves the cost and space by eliminating the need for a separate programming device.
    • -
    - -

    What are the drawbacks of FANUC PC FAPT CUT i?

    -

    FANUC PC FAPT CUT i is not a perfect software, and it has some drawbacks that you should be aware of before using it. Some of these drawbacks are:

    -
      -
    • It requires a license key to activate the full version.
    • -
    • It may not support some advanced functions or features that are available on other software or controllers.
    • -
    • It may have some bugs or errors that affect the performance or functionality of the software.
    • -
    - -

    How to download FANUC PC FAPT CUT i full version for free?

    -

    If you want to try FANUC PC FAPT CUT i full version for free, you can download it from the developer's website. However, you will need a license key to activate it. The license key can be obtained from your local FANUC dealer or distributor. Alternatively, you can search online for some websites that offer free download links or crack files for FANUC PC FAPT CUT i. However, we do not recommend this option as it may be illegal or unsafe. You may risk violating the copyright laws or exposing your computer to viruses or malware.

    -

    - -

    Conclusion

    -

    FANUC PC FAPT CUT i is a Windows based system for programming CNC lathes. It has many features, benefits and drawbacks that make it a suitable software for CNC lathe programmers. If you want to download FANUC PC FAPT CUT i full version for free, you can visit the developer's website or contact your local FANUC dealer or distributor. However, you should be careful about downloading from unauthorized sources as they may be illegal or unsafe.

    - -

    We hope this article has given you some useful information about FANUC PC FAPT CUT i. If you have any questions or comments, please feel free to leave them below.

    -

    How to install and use FANUC PC FAPT CUT i?

    -

    Installing and using FANUC PC FAPT CUT i is not a difficult task, but you need to follow some steps carefully. Here are the steps you need to take:

    -
      -
    1. Download FANUC PC FAPT CUT i from the developer's website or from a trusted source. Make sure you have a license key to activate the full version.
    2. -
    3. Run the setup file and follow the instructions on the screen. Choose the destination folder and the language for the software.
    4. -
    5. Enter the license key when prompted and complete the installation process.
    6. -
    7. Launch FANUC PC FAPT CUT i from the desktop shortcut or the start menu.
    8. -
    9. Select the type of CNC lathe and controller you want to program from the menu.
    10. -
    11. Create a new program or open an existing one from the file menu.
    12. -
    13. Use the icons, menus and dialogs to enter the program parameters, such as tool data, geometry data, machining data and cycle data.
    14. -
    15. Use the simulation mode to check the program for errors and collisions.
    16. -
    17. Generate the G-code from the program and save it as a file.
    18. -
    19. Transfer the G-code file to the CNC controller via RS-232 or Ethernet.
    20. -
    21. Run the program on the CNC lathe and monitor the machining process.
    22. -
    - -

    What are some tips and tricks for FANUC PC FAPT CUT i?

    -

    FANUC PC FAPT CUT i is a software that can help you create and edit programs for CNC lathes easily and efficiently. However, there are some tips and tricks that can make your programming experience even better. Here are some of them:

    -
      -
    • Use the help function to learn more about each function and feature of the software. You can access it by pressing F1 or clicking on the question mark icon.
    • -
    • Use the keyboard shortcuts to perform common tasks faster. You can find them in the help menu or in the user manual.
    • -
    • Use the copy and paste functions to duplicate or reuse parts of your program. You can also use the insert and delete functions to modify your program.
    • -
    • Use the undo and redo functions to correct your mistakes or restore your changes. You can also use the backup and restore functions to save and load your program.
    • -
    • Use the zoom and pan functions to view your program in different scales and angles. You can also use the rotate and mirror functions to change the orientation of your program.
    • -
    • Use the comment function to add notes or explanations to your program. You can also use the print function to print your program or save it as a PDF file.
    • -
    - -

    Conclusion

    -

    FANUC PC FAPT CUT i is a Windows based system for programming CNC lathes. It has many features, benefits and drawbacks that make it a suitable software for CNC lathe programmers. It also has some tips and tricks that can make your programming experience even better. If you want to download FANUC PC FAPT CUT i full version for free, you can visit the developer's website or contact your local FANUC dealer or distributor. However, you should be careful about downloading from unauthorized sources as they may be illegal or unsafe.

    - -

    We hope this article has given you some useful information about FANUC PC FAPT CUT i. If you have any questions or comments, please feel free to leave them below.

    -

    How to troubleshoot FANUC PC FAPT CUT i?

    -

    FANUC PC FAPT CUT i is a software that usually works smoothly and reliably. However, sometimes you may encounter some problems or errors that affect the performance or functionality of the software. Here are some common issues and solutions for FANUC PC FAPT CUT i:

    -
      -
    • If you cannot install or run FANUC PC FAPT CUT i, make sure you have the minimum system requirements and the correct license key. You may also need to update your Windows or drivers.
    • -
    • If you cannot communicate with the CNC controller, make sure you have the correct cable and port settings. You may also need to check the firewall or antivirus settings on your PC.
    • -
    • If you cannot generate or transfer the G-code file, make sure you have enough disk space and memory on your PC and CNC controller. You may also need to check the file format and compatibility.
    • -
    • If you encounter any errors or warnings in the program or G-code, make sure you have entered the correct data and parameters. You may also need to check the syntax and logic of your program.
    • -
    • If you have any questions or doubts about FANUC PC FAPT CUT i, make sure you consult the user manual or the help function. You may also contact FANUC technical support or your local dealer or distributor.
    • -
    - -

    What are some alternatives to FANUC PC FAPT CUT i?

    -

    FANUC PC FAPT CUT i is a software that can meet most of your needs for CNC lathe programming. However, it may not be the best option for everyone. Depending on your preferences, budget and requirements, you may want to consider some alternatives to FANUC PC FAPT CUT i. Here are some of them:

    -
      -
    • Mach3: This is a popular software for controlling CNC machines, including lathes, mills, routers and plasma cutters. It has a simple and customizable interface, a large user community and a low price.
    • -
    • Mastercam: This is a powerful software for CAD/CAM design and machining. It supports various types of CNC machines, including lathes, mills, routers and wire EDMs. It has a comprehensive set of features, tools and options for creating complex programs.
    • -
    • CAMWorks: This is a fully integrated software for SolidWorks that allows you to create and edit programs for CNC machines within the SolidWorks environment. It supports various types of CNC machines, including lathes, mills, routers and multi-axis machines. It has a feature-based approach that automates the programming process.
    • -
    - -

    Conclusion

    -

    FANUC PC FAPT CUT i is a Windows based system for programming CNC lathes. It has many features, benefits and drawbacks that make it a suitable software for CNC lathe programmers. It also has some tips and tricks that can make your programming experience even better. However, it also has some problems and errors that may affect its performance or functionality. Moreover, it may not be the best option for everyone, and there are some alternatives that you may want to consider. If you want to download FANUC PC FAPT CUT i full version for free, you can visit the developer's website or contact your local FANUC dealer or distributor. However, you should be careful about downloading from unauthorized sources as they may be illegal or unsafe.

    - -

    We hope this article has given you some useful information about FANUC PC FAPT CUT i. If you have any questions or comments, please feel free to leave them below.

    -

    Conclusion

    -

    FANUC PC FAPT CUT i is a Windows based system for programming CNC lathes. It has many features, benefits and drawbacks that make it a suitable software for CNC lathe programmers. It also has some tips and tricks that can make your programming experience even better. However, it also has some problems and errors that may affect its performance or functionality. Moreover, it may not be the best option for everyone, and there are some alternatives that you may want to consider. If you want to download FANUC PC FAPT CUT i full version for free, you can visit the developer's website or contact your local FANUC dealer or distributor. However, you should be careful about downloading from unauthorized sources as they may be illegal or unsafe.

    - -

    We hope this article has given you some useful information about FANUC PC FAPT CUT i. If you have any questions or comments, please feel free to leave them below.

    3cee63e6c2
    -
    -
    \ No newline at end of file diff --git a/spaces/sczhou/CodeFormer/CodeFormer/basicsr/ops/__init__.py b/spaces/sczhou/CodeFormer/CodeFormer/basicsr/ops/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/sczhou/CodeFormer/CodeFormer/facelib/detection/matlab_cp2tform.py b/spaces/sczhou/CodeFormer/CodeFormer/facelib/detection/matlab_cp2tform.py deleted file mode 100644 index b2a8b54a91709c71437e15c68d3be9a9b0a20a34..0000000000000000000000000000000000000000 --- a/spaces/sczhou/CodeFormer/CodeFormer/facelib/detection/matlab_cp2tform.py +++ /dev/null @@ -1,317 +0,0 @@ -import numpy as np -from numpy.linalg import inv, lstsq -from numpy.linalg import matrix_rank as rank -from numpy.linalg import norm - - -class MatlabCp2tormException(Exception): - - def __str__(self): - return 'In File {}:{}'.format(__file__, super.__str__(self)) - - -def tformfwd(trans, uv): - """ - Function: - ---------- - apply affine transform 'trans' to uv - - Parameters: - ---------- - @trans: 3x3 np.array - transform matrix - @uv: Kx2 np.array - each row is a pair of coordinates (x, y) - - Returns: - ---------- - @xy: Kx2 np.array - each row is a pair of transformed coordinates (x, y) - """ - uv = np.hstack((uv, np.ones((uv.shape[0], 1)))) - xy = np.dot(uv, trans) - xy = xy[:, 0:-1] - return xy - - -def tforminv(trans, uv): - """ - Function: - ---------- - apply the inverse of affine transform 'trans' to uv - - Parameters: - ---------- - @trans: 3x3 np.array - transform matrix - @uv: Kx2 np.array - each row is a pair of coordinates (x, y) - - Returns: - ---------- - @xy: Kx2 np.array - each row is a pair of inverse-transformed coordinates (x, y) - """ - Tinv = inv(trans) - xy = tformfwd(Tinv, uv) - return xy - - -def findNonreflectiveSimilarity(uv, xy, options=None): - options = {'K': 2} - - K = options['K'] - M = xy.shape[0] - x = xy[:, 0].reshape((-1, 1)) # use reshape to keep a column vector - y = xy[:, 1].reshape((-1, 1)) # use reshape to keep a column vector - - tmp1 = np.hstack((x, y, np.ones((M, 1)), np.zeros((M, 1)))) - tmp2 = np.hstack((y, -x, np.zeros((M, 1)), np.ones((M, 1)))) - X = np.vstack((tmp1, tmp2)) - - u = uv[:, 0].reshape((-1, 1)) # use reshape to keep a column vector - v = uv[:, 1].reshape((-1, 1)) # use reshape to keep a column vector - U = np.vstack((u, v)) - - # We know that X * r = U - if rank(X) >= 2 * K: - r, _, _, _ = lstsq(X, U, rcond=-1) - r = np.squeeze(r) - else: - raise Exception('cp2tform:twoUniquePointsReq') - sc = r[0] - ss = r[1] - tx = r[2] - ty = r[3] - - Tinv = np.array([[sc, -ss, 0], [ss, sc, 0], [tx, ty, 1]]) - T = inv(Tinv) - T[:, 2] = np.array([0, 0, 1]) - - return T, Tinv - - -def findSimilarity(uv, xy, options=None): - options = {'K': 2} - - # uv = np.array(uv) - # xy = np.array(xy) - - # Solve for trans1 - trans1, trans1_inv = findNonreflectiveSimilarity(uv, xy, options) - - # Solve for trans2 - - # manually reflect the xy data across the Y-axis - xyR = xy - xyR[:, 0] = -1 * xyR[:, 0] - - trans2r, trans2r_inv = findNonreflectiveSimilarity(uv, xyR, options) - - # manually reflect the tform to undo the reflection done on xyR - TreflectY = np.array([[-1, 0, 0], [0, 1, 0], [0, 0, 1]]) - - trans2 = np.dot(trans2r, TreflectY) - - # Figure out if trans1 or trans2 is better - xy1 = tformfwd(trans1, uv) - norm1 = norm(xy1 - xy) - - xy2 = tformfwd(trans2, uv) - norm2 = norm(xy2 - xy) - - if norm1 <= norm2: - return trans1, trans1_inv - else: - trans2_inv = inv(trans2) - return trans2, trans2_inv - - -def get_similarity_transform(src_pts, dst_pts, reflective=True): - """ - Function: - ---------- - Find Similarity Transform Matrix 'trans': - u = src_pts[:, 0] - v = src_pts[:, 1] - x = dst_pts[:, 0] - y = dst_pts[:, 1] - [x, y, 1] = [u, v, 1] * trans - - Parameters: - ---------- - @src_pts: Kx2 np.array - source points, each row is a pair of coordinates (x, y) - @dst_pts: Kx2 np.array - destination points, each row is a pair of transformed - coordinates (x, y) - @reflective: True or False - if True: - use reflective similarity transform - else: - use non-reflective similarity transform - - Returns: - ---------- - @trans: 3x3 np.array - transform matrix from uv to xy - trans_inv: 3x3 np.array - inverse of trans, transform matrix from xy to uv - """ - - if reflective: - trans, trans_inv = findSimilarity(src_pts, dst_pts) - else: - trans, trans_inv = findNonreflectiveSimilarity(src_pts, dst_pts) - - return trans, trans_inv - - -def cvt_tform_mat_for_cv2(trans): - """ - Function: - ---------- - Convert Transform Matrix 'trans' into 'cv2_trans' which could be - directly used by cv2.warpAffine(): - u = src_pts[:, 0] - v = src_pts[:, 1] - x = dst_pts[:, 0] - y = dst_pts[:, 1] - [x, y].T = cv_trans * [u, v, 1].T - - Parameters: - ---------- - @trans: 3x3 np.array - transform matrix from uv to xy - - Returns: - ---------- - @cv2_trans: 2x3 np.array - transform matrix from src_pts to dst_pts, could be directly used - for cv2.warpAffine() - """ - cv2_trans = trans[:, 0:2].T - - return cv2_trans - - -def get_similarity_transform_for_cv2(src_pts, dst_pts, reflective=True): - """ - Function: - ---------- - Find Similarity Transform Matrix 'cv2_trans' which could be - directly used by cv2.warpAffine(): - u = src_pts[:, 0] - v = src_pts[:, 1] - x = dst_pts[:, 0] - y = dst_pts[:, 1] - [x, y].T = cv_trans * [u, v, 1].T - - Parameters: - ---------- - @src_pts: Kx2 np.array - source points, each row is a pair of coordinates (x, y) - @dst_pts: Kx2 np.array - destination points, each row is a pair of transformed - coordinates (x, y) - reflective: True or False - if True: - use reflective similarity transform - else: - use non-reflective similarity transform - - Returns: - ---------- - @cv2_trans: 2x3 np.array - transform matrix from src_pts to dst_pts, could be directly used - for cv2.warpAffine() - """ - trans, trans_inv = get_similarity_transform(src_pts, dst_pts, reflective) - cv2_trans = cvt_tform_mat_for_cv2(trans) - - return cv2_trans - - -if __name__ == '__main__': - """ - u = [0, 6, -2] - v = [0, 3, 5] - x = [-1, 0, 4] - y = [-1, -10, 4] - - # In Matlab, run: - # - # uv = [u'; v']; - # xy = [x'; y']; - # tform_sim=cp2tform(uv,xy,'similarity'); - # - # trans = tform_sim.tdata.T - # ans = - # -0.0764 -1.6190 0 - # 1.6190 -0.0764 0 - # -3.2156 0.0290 1.0000 - # trans_inv = tform_sim.tdata.Tinv - # ans = - # - # -0.0291 0.6163 0 - # -0.6163 -0.0291 0 - # -0.0756 1.9826 1.0000 - # xy_m=tformfwd(tform_sim, u,v) - # - # xy_m = - # - # -3.2156 0.0290 - # 1.1833 -9.9143 - # 5.0323 2.8853 - # uv_m=tforminv(tform_sim, x,y) - # - # uv_m = - # - # 0.5698 1.3953 - # 6.0872 2.2733 - # -2.6570 4.3314 - """ - u = [0, 6, -2] - v = [0, 3, 5] - x = [-1, 0, 4] - y = [-1, -10, 4] - - uv = np.array((u, v)).T - xy = np.array((x, y)).T - - print('\n--->uv:') - print(uv) - print('\n--->xy:') - print(xy) - - trans, trans_inv = get_similarity_transform(uv, xy) - - print('\n--->trans matrix:') - print(trans) - - print('\n--->trans_inv matrix:') - print(trans_inv) - - print('\n---> apply transform to uv') - print('\nxy_m = uv_augmented * trans') - uv_aug = np.hstack((uv, np.ones((uv.shape[0], 1)))) - xy_m = np.dot(uv_aug, trans) - print(xy_m) - - print('\nxy_m = tformfwd(trans, uv)') - xy_m = tformfwd(trans, uv) - print(xy_m) - - print('\n---> apply inverse transform to xy') - print('\nuv_m = xy_augmented * trans_inv') - xy_aug = np.hstack((xy, np.ones((xy.shape[0], 1)))) - uv_m = np.dot(xy_aug, trans_inv) - print(uv_m) - - print('\nuv_m = tformfwd(trans_inv, xy)') - uv_m = tformfwd(trans_inv, xy) - print(uv_m) - - uv_m = tforminv(trans, xy) - print('\nuv_m = tforminv(trans, xy)') - print(uv_m) diff --git a/spaces/sczhou/ProPainter/RAFT/utils/utils.py b/spaces/sczhou/ProPainter/RAFT/utils/utils.py deleted file mode 100644 index 5f32d281c1c46353a0a2bf36b0550adb74125c65..0000000000000000000000000000000000000000 --- a/spaces/sczhou/ProPainter/RAFT/utils/utils.py +++ /dev/null @@ -1,82 +0,0 @@ -import torch -import torch.nn.functional as F -import numpy as np -from scipy import interpolate - - -class InputPadder: - """ Pads images such that dimensions are divisible by 8 """ - def __init__(self, dims, mode='sintel'): - self.ht, self.wd = dims[-2:] - pad_ht = (((self.ht // 8) + 1) * 8 - self.ht) % 8 - pad_wd = (((self.wd // 8) + 1) * 8 - self.wd) % 8 - if mode == 'sintel': - self._pad = [pad_wd//2, pad_wd - pad_wd//2, pad_ht//2, pad_ht - pad_ht//2] - else: - self._pad = [pad_wd//2, pad_wd - pad_wd//2, 0, pad_ht] - - def pad(self, *inputs): - return [F.pad(x, self._pad, mode='replicate') for x in inputs] - - def unpad(self,x): - ht, wd = x.shape[-2:] - c = [self._pad[2], ht-self._pad[3], self._pad[0], wd-self._pad[1]] - return x[..., c[0]:c[1], c[2]:c[3]] - -def forward_interpolate(flow): - flow = flow.detach().cpu().numpy() - dx, dy = flow[0], flow[1] - - ht, wd = dx.shape - x0, y0 = np.meshgrid(np.arange(wd), np.arange(ht)) - - x1 = x0 + dx - y1 = y0 + dy - - x1 = x1.reshape(-1) - y1 = y1.reshape(-1) - dx = dx.reshape(-1) - dy = dy.reshape(-1) - - valid = (x1 > 0) & (x1 < wd) & (y1 > 0) & (y1 < ht) - x1 = x1[valid] - y1 = y1[valid] - dx = dx[valid] - dy = dy[valid] - - flow_x = interpolate.griddata( - (x1, y1), dx, (x0, y0), method='nearest', fill_value=0) - - flow_y = interpolate.griddata( - (x1, y1), dy, (x0, y0), method='nearest', fill_value=0) - - flow = np.stack([flow_x, flow_y], axis=0) - return torch.from_numpy(flow).float() - - -def bilinear_sampler(img, coords, mode='bilinear', mask=False): - """ Wrapper for grid_sample, uses pixel coordinates """ - H, W = img.shape[-2:] - xgrid, ygrid = coords.split([1,1], dim=-1) - xgrid = 2*xgrid/(W-1) - 1 - ygrid = 2*ygrid/(H-1) - 1 - - grid = torch.cat([xgrid, ygrid], dim=-1) - img = F.grid_sample(img, grid, align_corners=True) - - if mask: - mask = (xgrid > -1) & (ygrid > -1) & (xgrid < 1) & (ygrid < 1) - return img, mask.float() - - return img - - -def coords_grid(batch, ht, wd): - coords = torch.meshgrid(torch.arange(ht), torch.arange(wd)) - coords = torch.stack(coords[::-1], dim=0).float() - return coords[None].repeat(batch, 1, 1, 1) - - -def upflow8(flow, mode='bilinear'): - new_size = (8 * flow.shape[2], 8 * flow.shape[3]) - return 8 * F.interpolate(flow, size=new_size, mode=mode, align_corners=True) diff --git a/spaces/shi-labs/Versatile-Diffusion/lib/cfg_helper.py b/spaces/shi-labs/Versatile-Diffusion/lib/cfg_helper.py deleted file mode 100644 index c094b161634489de650556c51fcfa722e73cb606..0000000000000000000000000000000000000000 --- a/spaces/shi-labs/Versatile-Diffusion/lib/cfg_helper.py +++ /dev/null @@ -1,612 +0,0 @@ -import os -import os.path as osp -import shutil -import copy -import time -import pprint -import numpy as np -import torch -import matplotlib -import argparse -import json -import yaml -from easydict import EasyDict as edict - -from .model_zoo import get_model - -############ -# cfg_bank # -############ - -def cfg_solvef(cmd, root): - if not isinstance(cmd, str): - return cmd - - if cmd.find('SAME')==0: - zoom = root - p = cmd[len('SAME'):].strip('()').split('.') - p = [pi.strip() for pi in p] - for pi in p: - try: - pi = int(pi) - except: - pass - - try: - zoom = zoom[pi] - except: - return cmd - return cfg_solvef(zoom, root) - - if cmd.find('SEARCH')==0: - zoom = root - p = cmd[len('SEARCH'):].strip('()').split('.') - p = [pi.strip() for pi in p] - find = True - # Depth first search - for pi in p: - try: - pi = int(pi) - except: - pass - - try: - zoom = zoom[pi] - except: - find = False - break - - if find: - return cfg_solvef(zoom, root) - else: - if isinstance(root, dict): - for ri in root: - rv = cfg_solvef(cmd, root[ri]) - if rv != cmd: - return rv - if isinstance(root, list): - for ri in root: - rv = cfg_solvef(cmd, ri) - if rv != cmd: - return rv - return cmd - - if cmd.find('MODEL')==0: - goto = cmd[len('MODEL'):].strip('()') - return model_cfg_bank()(goto) - - if cmd.find('DATASET')==0: - goto = cmd[len('DATASET'):].strip('()') - return dataset_cfg_bank()(goto) - - return cmd - -def cfg_solve(cfg, cfg_root): - # The function solve cfg element such that - # all sorrogate input are settled. - # (i.e. SAME(***) ) - if isinstance(cfg, list): - for i in range(len(cfg)): - if isinstance(cfg[i], (list, dict)): - cfg[i] = cfg_solve(cfg[i], cfg_root) - else: - cfg[i] = cfg_solvef(cfg[i], cfg_root) - if isinstance(cfg, dict): - for k in cfg: - if isinstance(cfg[k], (list, dict)): - cfg[k] = cfg_solve(cfg[k], cfg_root) - else: - cfg[k] = cfg_solvef(cfg[k], cfg_root) - return cfg - -class model_cfg_bank(object): - def __init__(self): - self.cfg_dir = osp.join('configs', 'model') - self.cfg_bank = edict() - - def __call__(self, name): - if name not in self.cfg_bank: - cfg_path = self.get_yaml_path(name) - with open(cfg_path, 'r') as f: - cfg_new = yaml.load( - f, Loader=yaml.FullLoader) - cfg_new = edict(cfg_new) - self.cfg_bank.update(cfg_new) - - cfg = self.cfg_bank[name] - cfg.name = name - if 'super_cfg' not in cfg: - cfg = cfg_solve(cfg, cfg) - self.cfg_bank[name] = cfg - return copy.deepcopy(cfg) - - super_cfg = self.__call__(cfg.super_cfg) - # unlike other field, - # args will not be replaced but update. - if 'args' in cfg: - if 'args' in super_cfg: - super_cfg.args.update(cfg.args) - else: - super_cfg.args = cfg.args - cfg.pop('args') - - super_cfg.update(cfg) - super_cfg.pop('super_cfg') - cfg = super_cfg - try: - delete_args = cfg.pop('delete_args') - except: - delete_args = [] - - for dargs in delete_args: - cfg.args.pop(dargs) - - cfg = cfg_solve(cfg, cfg) - self.cfg_bank[name] = cfg - return copy.deepcopy(cfg) - - def get_yaml_path(self, name): - if name.find('openai_unet')==0: - return osp.join( - self.cfg_dir, 'openai_unet.yaml') - elif (name.find('clip')==0) or (name.find('openclip')==0): - return osp.join( - self.cfg_dir, 'clip.yaml') - elif name.find('vd')==0: - return osp.join( - self.cfg_dir, 'vd.yaml') - elif name.find('optimus')==0: - return osp.join( - self.cfg_dir, 'optimus.yaml') - elif name.find('autokl')==0: - return osp.join( - self.cfg_dir, 'autokl.yaml') - else: - raise ValueError - -class dataset_cfg_bank(object): - def __init__(self): - self.cfg_dir = osp.join('configs', 'dataset') - self.cfg_bank = edict() - - def __call__(self, name): - if name not in self.cfg_bank: - cfg_path = self.get_yaml_path(name) - with open(cfg_path, 'r') as f: - cfg_new = yaml.load( - f, Loader=yaml.FullLoader) - cfg_new = edict(cfg_new) - self.cfg_bank.update(cfg_new) - - cfg = self.cfg_bank[name] - cfg.name = name - if cfg.get('super_cfg', None) is None: - cfg = cfg_solve(cfg, cfg) - self.cfg_bank[name] = cfg - return copy.deepcopy(cfg) - - super_cfg = self.__call__(cfg.super_cfg) - super_cfg.update(cfg) - cfg = super_cfg - cfg.super_cfg = None - try: - delete = cfg.pop('delete') - except: - delete = [] - - for dargs in delete: - cfg.pop(dargs) - - cfg = cfg_solve(cfg, cfg) - self.cfg_bank[name] = cfg - return copy.deepcopy(cfg) - - def get_yaml_path(self, name): - if name.find('laion2b')==0: - return osp.join( - self.cfg_dir, 'laion2b.yaml') - else: - raise ValueError - -class experiment_cfg_bank(object): - def __init__(self): - self.cfg_dir = osp.join('configs', 'experiment') - self.cfg_bank = edict() - - def __call__(self, name): - if name not in self.cfg_bank: - cfg_path = self.get_yaml_path(name) - with open(cfg_path, 'r') as f: - cfg = yaml.load( - f, Loader=yaml.FullLoader) - cfg = edict(cfg) - - cfg = cfg_solve(cfg, cfg) - cfg = cfg_solve(cfg, cfg) - # twice for SEARCH - self.cfg_bank[name] = cfg - return copy.deepcopy(cfg) - - def get_yaml_path(self, name): - return osp.join( - self.cfg_dir, name+'.yaml') - -def load_cfg_yaml(path): - if osp.isfile(path): - cfg_path = path - elif osp.isfile(osp.join('configs', 'experiment', path)): - cfg_path = osp.join('configs', 'experiment', path) - elif osp.isfile(osp.join('configs', 'experiment', path+'.yaml')): - cfg_path = osp.join('configs', 'experiment', path+'.yaml') - else: - assert False, 'No such config!' - - with open(cfg_path, 'r') as f: - cfg = yaml.load(f, Loader=yaml.FullLoader) - cfg = edict(cfg) - cfg = cfg_solve(cfg, cfg) - cfg = cfg_solve(cfg, cfg) - return cfg - -############## -# cfg_helper # -############## - -def get_experiment_id(ref=None): - if ref is None: - time.sleep(0.5) - return int(time.time()*100) - else: - try: - return int(ref) - except: - pass - - _, ref = osp.split(ref) - ref = ref.split('_')[0] - try: - return int(ref) - except: - assert False, 'Invalid experiment ID!' - -def record_resume_cfg(path): - cnt = 0 - while True: - if osp.exists(path+'.{:04d}'.format(cnt)): - cnt += 1 - continue - shutil.copyfile(path, path+'.{:04d}'.format(cnt)) - break - -def get_command_line_args(): - parser = argparse.ArgumentParser() - parser.add_argument('--debug', action='store_true', default=False) - parser.add_argument('--config', type=str) - parser.add_argument('--gpu', nargs='+', type=int) - - parser.add_argument('--node_rank', type=int) - parser.add_argument('--node_list', nargs='+', type=str) - parser.add_argument('--nodes', type=int) - parser.add_argument('--addr', type=str, default='127.0.0.1') - parser.add_argument('--port', type=int, default=11233) - - parser.add_argument('--signature', nargs='+', type=str) - parser.add_argument('--seed', type=int) - - parser.add_argument('--eval', type=str) - parser.add_argument('--eval_subdir', type=str) - parser.add_argument('--pretrained', type=str) - - parser.add_argument('--resume_dir', type=str) - parser.add_argument('--resume_step', type=int) - parser.add_argument('--resume_weight', type=str) - - args = parser.parse_args() - - # Special handling the resume - if args.resume_dir is not None: - cfg = edict() - cfg.env = edict() - cfg.env.debug = args.debug - cfg.env.resume = edict() - cfg.env.resume.dir = args.resume_dir - cfg.env.resume.step = args.resume_step - cfg.env.resume.weight = args.resume_weight - return cfg - - cfg = load_cfg_yaml(args.config) - cfg.env.debug = args.debug - cfg.env.gpu_device = [0] if args.gpu is None else list(args.gpu) - cfg.env.master_addr = args.addr - cfg.env.master_port = args.port - cfg.env.dist_url = 'tcp://{}:{}'.format(args.addr, args.port) - - if args.node_list is None: - cfg.env.node_rank = 0 if args.node_rank is None else args.node_rank - cfg.env.nodes = 1 if args.nodes is None else args.nodes - else: - import socket - hostname = socket.gethostname() - assert cfg.env.master_addr == args.node_list[0] - cfg.env.node_rank = args.node_list.index(hostname) - cfg.env.nodes = len(args.node_list) - cfg.env.node_list = args.node_list - - istrain = False if args.eval is not None else True - isdebug = cfg.env.debug - - if istrain: - if isdebug: - cfg.env.experiment_id = 999999999999 - cfg.train.signature = ['debug'] - else: - cfg.env.experiment_id = get_experiment_id() - if args.signature is not None: - cfg.train.signature = args.signature - else: - if 'train' in cfg: - cfg.pop('train') - cfg.env.experiment_id = get_experiment_id(args.eval) - if args.signature is not None: - cfg.eval.signature = args.signature - - if isdebug and (args.eval is None): - cfg.env.experiment_id = 999999999999 - cfg.eval.signature = ['debug'] - - if args.eval_subdir is not None: - if isdebug: - cfg.eval.eval_subdir = 'debug' - else: - cfg.eval.eval_subdir = args.eval_subdir - if args.pretrained is not None: - cfg.eval.pretrained = args.pretrained - # The override pretrained over the setting in cfg.model - - if args.seed is not None: - cfg.env.rnd_seed = args.seed - - return cfg - -def cfg_initiates(cfg): - cfge = cfg.env - isdebug = cfge.debug - isresume = 'resume' in cfge - istrain = 'train' in cfg - haseval = 'eval' in cfg - cfgt = cfg.train if istrain else None - cfgv = cfg.eval if haseval else None - - ############################### - # get some environment params # - ############################### - - cfge.computer = os.uname() - cfge.torch_version = str(torch.__version__) - - ########## - # resume # - ########## - - if isresume: - resume_cfg_path = osp.join(cfge.resume.dir, 'config.yaml') - record_resume_cfg(resume_cfg_path) - with open(resume_cfg_path, 'r') as f: - cfg_resume = yaml.load(f, Loader=yaml.FullLoader) - cfg_resume = edict(cfg_resume) - cfg_resume.env.update(cfge) - cfg = cfg_resume - cfge = cfg.env - log_file = cfg.train.log_file - - print('') - print('##########') - print('# resume #') - print('##########') - print('') - with open(log_file, 'a') as f: - print('', file=f) - print('##########', file=f) - print('# resume #', file=f) - print('##########', file=f) - print('', file=f) - - pprint.pprint(cfg) - with open(log_file, 'a') as f: - pprint.pprint(cfg, f) - - #################### - # node distributed # - #################### - - if cfg.env.master_addr!='127.0.0.1': - os.environ['MASTER_ADDR'] = cfge.master_addr - os.environ['MASTER_PORT'] = '{}'.format(cfge.master_port) - if cfg.env.dist_backend=='nccl': - os.environ['NCCL_SOCKET_FAMILY'] = 'AF_INET' - if cfg.env.dist_backend=='gloo': - os.environ['GLOO_SOCKET_FAMILY'] = 'AF_INET' - - ####################### - # cuda visible device # - ####################### - - os.environ["CUDA_VISIBLE_DEVICES"] = ','.join( - [str(gid) for gid in cfge.gpu_device]) - - ##################### - # return resume cfg # - ##################### - - if isresume: - return cfg - - ############################################# - # some misc setting that not need in resume # - ############################################# - - cfgm = cfg.model - cfge.gpu_count = len(cfge.gpu_device) - - ########################################## - # align batch size and num worker config # - ########################################## - - gpu_n = cfge.gpu_count * cfge.nodes - def align_batch_size(bs, bs_per_gpu): - assert (bs is not None) or (bs_per_gpu is not None) - bs = bs_per_gpu * gpu_n if bs is None else bs - bs_per_gpu = bs // gpu_n if bs_per_gpu is None else bs_per_gpu - assert (bs == bs_per_gpu * gpu_n) - return bs, bs_per_gpu - - if istrain: - cfgt.batch_size, cfgt.batch_size_per_gpu = \ - align_batch_size(cfgt.batch_size, cfgt.batch_size_per_gpu) - cfgt.dataset_num_workers, cfgt.dataset_num_workers_per_gpu = \ - align_batch_size(cfgt.dataset_num_workers, cfgt.dataset_num_workers_per_gpu) - if haseval: - cfgv.batch_size, cfgv.batch_size_per_gpu = \ - align_batch_size(cfgv.batch_size, cfgv.batch_size_per_gpu) - cfgv.dataset_num_workers, cfgv.dataset_num_workers_per_gpu = \ - align_batch_size(cfgv.dataset_num_workers, cfgv.dataset_num_workers_per_gpu) - - ################## - # create log dir # - ################## - - if istrain: - if not isdebug: - sig = cfgt.get('signature', []) - sig = sig + ['s{}'.format(cfge.rnd_seed)] - else: - sig = ['debug'] - - log_dir = [ - cfge.log_root_dir, - '{}_{}'.format(cfgm.symbol, cfgt.dataset.symbol), - '_'.join([str(cfge.experiment_id)] + sig) - ] - log_dir = osp.join(*log_dir) - log_file = osp.join(log_dir, 'train.log') - if not osp.exists(log_file): - os.makedirs(osp.dirname(log_file)) - cfgt.log_dir = log_dir - cfgt.log_file = log_file - - if haseval: - cfgv.log_dir = log_dir - cfgv.log_file = log_file - else: - model_symbol = cfgm.symbol - if cfgv.get('dataset', None) is None: - dataset_symbol = 'nodataset' - else: - dataset_symbol = cfgv.dataset.symbol - - log_dir = osp.join(cfge.log_root_dir, '{}_{}'.format(model_symbol, dataset_symbol)) - exp_dir = search_experiment_folder(log_dir, cfge.experiment_id) - if exp_dir is None: - if not isdebug: - sig = cfgv.get('signature', []) + ['evalonly'] - else: - sig = ['debug'] - exp_dir = '_'.join([str(cfge.experiment_id)] + sig) - - eval_subdir = cfgv.get('eval_subdir', None) - # override subdir in debug mode (if eval_subdir is set) - eval_subdir = 'debug' if (eval_subdir is not None) and isdebug else eval_subdir - - if eval_subdir is not None: - log_dir = osp.join(log_dir, exp_dir, eval_subdir) - else: - log_dir = osp.join(log_dir, exp_dir) - - disable_log_override = cfgv.get('disable_log_override', False) - if osp.isdir(log_dir): - if disable_log_override: - assert False, 'Override an exsited log_dir is disabled at [{}]'.format(log_dir) - else: - os.makedirs(log_dir) - - log_file = osp.join(log_dir, 'eval.log') - cfgv.log_dir = log_dir - cfgv.log_file = log_file - - ###################### - # print and save cfg # - ###################### - - pprint.pprint(cfg) - if cfge.node_rank==0: - with open(log_file, 'w') as f: - pprint.pprint(cfg, f) - with open(osp.join(log_dir, 'config.yaml'), 'w') as f: - yaml.dump(edict_2_dict(cfg), f) - else: - with open(osp.join(log_dir, 'config.yaml.{}'.format(cfge.node_rank)), 'w') as f: - yaml.dump(edict_2_dict(cfg), f) - - ############# - # save code # - ############# - - save_code = False - if istrain: - save_code = cfgt.get('save_code', False) - elif haseval: - save_code = cfgv.get('save_code', False) - save_code = save_code and (cfge.node_rank==0) - - if save_code: - codedir = osp.join(log_dir, 'code') - if osp.exists(codedir): - shutil.rmtree(codedir) - for d in ['configs', 'lib']: - fromcodedir = d - tocodedir = osp.join(codedir, d) - shutil.copytree( - fromcodedir, tocodedir, - ignore=shutil.ignore_patterns( - '*__pycache__*', '*build*')) - for codei in os.listdir('.'): - if osp.splitext(codei)[1] == 'py': - shutil.copy(codei, codedir) - - ####################### - # set matplotlib mode # - ####################### - - if 'matplotlib_mode' in cfge: - try: - matplotlib.use(cfge.matplotlib_mode) - except: - print('Warning: matplotlib mode [{}] failed to be set!'.format(cfge.matplotlib_mode)) - - return cfg - -def edict_2_dict(x): - if isinstance(x, dict): - xnew = {} - for k in x: - xnew[k] = edict_2_dict(x[k]) - return xnew - elif isinstance(x, list): - xnew = [] - for i in range(len(x)): - xnew.append( edict_2_dict(x[i]) ) - return xnew - else: - return x - -def search_experiment_folder(root, exid): - target = None - for fi in os.listdir(root): - if not osp.isdir(osp.join(root, fi)): - continue - if int(fi.split('_')[0]) == exid: - if target is not None: - return None # duplicated - elif target is None: - target = fi - return target diff --git a/spaces/shivammehta25/Diff-TTSG/diff_ttsg/data/cormac_datamodule.py b/spaces/shivammehta25/Diff-TTSG/diff_ttsg/data/cormac_datamodule.py deleted file mode 100644 index 5d49f4daa8f0070c126f3444ccd1d3489da8eff1..0000000000000000000000000000000000000000 --- a/spaces/shivammehta25/Diff-TTSG/diff_ttsg/data/cormac_datamodule.py +++ /dev/null @@ -1,214 +0,0 @@ -import random -from pathlib import Path -from typing import Any, Dict, List, Optional, Tuple - -import numpy as np -import pandas as pd -import torch -import torch.nn.functional as F -import torchaudio as ta -from einops import pack -from lightning import LightningDataModule -from torch.utils.data.dataloader import DataLoader - -from diff_ttsg.text import cmudict, text_to_sequence -from diff_ttsg.text.symbols import symbols -from diff_ttsg.utils.audio import mel_spectrogram -from diff_ttsg.utils.model import fix_len_compatibility, normalize -from diff_ttsg.utils.utils import intersperse, parse_filelist - - -class CormacDataModule(LightningDataModule): - - def __init__( - self, - train_filelist_path, - valid_filelist_path, - batch_size, - num_workers, - pin_memory, - cmudict_path, - motion_folder, - add_blank, - n_fft, - n_feats, - sample_rate, - hop_length, - win_length, - f_min, - f_max, - data_statistics, - motion_pipeline_filename, - seed - ): - super().__init__() - - # this line allows to access init params with 'self.hparams' attribute - # also ensures init params will be stored in ckpt - self.save_hyperparameters(logger=False) - - def setup(self, stage: Optional[str] = None): - """Load data. Set variables: `self.data_train`, `self.data_val`, `self.data_test`. - - This method is called by lightning with both `trainer.fit()` and `trainer.test()`, so be - careful not to execute things like random split twice! - """ - # load and split datasets only if not loaded already - - self.trainset = TextMelDataset( - self.hparams.train_filelist_path, - self.hparams.cmudict_path, - self.hparams.motion_folder, - self.hparams.add_blank, - self.hparams.n_fft, - self.hparams.n_feats, - self.hparams.sample_rate, - self.hparams.hop_length, - self.hparams.win_length, - self.hparams.f_min, - self.hparams.f_max, - self.hparams.data_statistics, - self.hparams.seed - ) - self.validset = TextMelDataset( - self.hparams.valid_filelist_path, - self.hparams.cmudict_path, - self.hparams.motion_folder, - self.hparams.add_blank, - self.hparams.n_fft, - self.hparams.n_feats, - self.hparams.sample_rate, - self.hparams.hop_length, - self.hparams.win_length, - self.hparams.f_min, - self.hparams.f_max, - self.hparams.data_statistics, - self.hparams.seed - ) - - - def train_dataloader(self): - return DataLoader( - dataset=self.trainset, - batch_size=self.hparams.batch_size, - num_workers=self.hparams.num_workers, - pin_memory=self.hparams.pin_memory, - shuffle=True, - collate_fn=TextMelBatchCollate() - ) - - def val_dataloader(self): - return DataLoader( - dataset=self.validset, - batch_size=self.hparams.batch_size, - num_workers=self.hparams.num_workers, - pin_memory=self.hparams.pin_memory, - shuffle=False, - collate_fn=TextMelBatchCollate() - ) - - def teardown(self, stage: Optional[str] = None): - """Clean up after fit or test.""" - pass - - def state_dict(self): - """Extra things to save to checkpoint.""" - return {} - - def load_state_dict(self, state_dict: Dict[str, Any]): - """Things to do when loading checkpoint.""" - pass - - -class TextMelDataset(torch.utils.data.Dataset): - def __init__(self, filelist_path, cmudict_path, motion_folder, add_blank=True, - n_fft=1024, n_mels=80, sample_rate=22050, - hop_length=256, win_length=1024, f_min=0., f_max=8000, data_parameters=None, seed=None): - self.filepaths_and_text = parse_filelist(filelist_path) - self.motion_fileloc = Path(motion_folder) - self.cmudict = cmudict.CMUDict(cmudict_path) - self.add_blank = add_blank - self.n_fft = n_fft - self.n_mels = n_mels - self.sample_rate = sample_rate - self.hop_length = hop_length - self.win_length = win_length - self.f_min = f_min - self.f_max = f_max - if data_parameters is not None: - self.data_parameters = data_parameters - else: - self.data_parameters = { 'mel_mean': 0, 'mel_std': 1, 'motion_mean': 0, 'motion_std': 1 } - random.seed(seed) - random.shuffle(self.filepaths_and_text) - - def get_pair(self, filepath_and_text): - filepath, text = filepath_and_text[0], filepath_and_text[1] - text = self.get_text(text, add_blank=self.add_blank) - mel = self.get_mel(filepath) - motion = self.get_motion(filepath, mel.shape[1]) - return (text, mel, motion) - - def get_motion(self, filename, mel_shape, ext=".expmap_86.1328125fps.pkl"): - file_loc = self.motion_fileloc / Path(Path(filename).name).with_suffix(ext) - motion = torch.from_numpy(pd.read_pickle(file_loc).to_numpy()) - motion = F.interpolate(motion.T.unsqueeze(0), mel_shape).squeeze(0) - motion = normalize(motion, self.data_parameters['motion_mean'], self.data_parameters['motion_std']) - return motion - - def get_mel(self, filepath): - audio, sr = ta.load(filepath) - assert sr == self.sample_rate - mel = mel_spectrogram(audio, self.n_fft, 80, self.sample_rate, self.hop_length, - self.win_length, self.f_min, self.f_max, center=False).squeeze() - mel = normalize(mel, self.data_parameters['mel_mean'], self.data_parameters['mel_std']) - return mel - - def get_text(self, text, add_blank=True): - text_norm = text_to_sequence(text, dictionary=self.cmudict) - if self.add_blank: - text_norm = intersperse(text_norm, len(symbols)) # add a blank token, whose id number is len(symbols) - text_norm = torch.IntTensor(text_norm) - return text_norm - - def __getitem__(self, index): - text, mel, motion = self.get_pair(self.filepaths_and_text[index]) - item = {'y': mel, 'x': text, 'y_motion': motion} - return item - - def __len__(self): - return len(self.filepaths_and_text) - - def sample_test_batch(self, size): - idx = np.random.choice(range(len(self)), size=size, replace=False) - test_batch = [] - for index in idx: - test_batch.append(self.__getitem__(index)) - return test_batch - - -class TextMelBatchCollate(object): - def __call__(self, batch): - B = len(batch) - y_max_length = max([item['y'].shape[-1] for item in batch]) - y_max_length = fix_len_compatibility(y_max_length) - x_max_length = max([item['x'].shape[-1] for item in batch]) - n_feats = batch[0]['y'].shape[-2] - n_motion = batch[0]['y_motion'].shape[-2] - - y = torch.zeros((B, n_feats, y_max_length), dtype=torch.float32) - x = torch.zeros((B, x_max_length), dtype=torch.long) - y_motion = torch.zeros((B, n_motion, y_max_length), dtype=torch.float32) - y_lengths, x_lengths = [], [] - - for i, item in enumerate(batch): - y_, x_, y_motion_ = item['y'], item['x'], item['y_motion'] - y_lengths.append(y_.shape[-1]) - x_lengths.append(x_.shape[-1]) - y[i, :, :y_.shape[-1]] = y_ - x[i, :x_.shape[-1]] = x_ - y_motion[i, :, :y_motion_.shape[-1]] = y_motion_ - - y_lengths = torch.LongTensor(y_lengths) - x_lengths = torch.LongTensor(x_lengths) - return {'x': x, 'x_lengths': x_lengths, 'y': y, 'y_lengths': y_lengths, 'y_motion': y_motion} \ No newline at end of file diff --git a/spaces/silencewing/server/youyou/.history/math_20230613231600.html b/spaces/silencewing/server/youyou/.history/math_20230613231600.html deleted file mode 100644 index dcc17750248558768725723ed2181a016e46293f..0000000000000000000000000000000000000000 --- a/spaces/silencewing/server/youyou/.history/math_20230613231600.html +++ /dev/null @@ -1,234 +0,0 @@ - - - - - - - - - - Document - - - - -
    - - - - - - - - - - - - - - - - - - - - - - - - -
    题目
    答案
    正误
    得分
    -
    - - - - diff --git a/spaces/skf15963/summary/fengshen/examples/disco_project/README.md b/spaces/skf15963/summary/fengshen/examples/disco_project/README.md deleted file mode 100644 index c8d95f886e1d80fd1e198eb8d0618c77b6f8836d..0000000000000000000000000000000000000000 --- a/spaces/skf15963/summary/fengshen/examples/disco_project/README.md +++ /dev/null @@ -1,18 +0,0 @@ -# Chinese Warp For Disco Diffusion -- This is a chinese version disco diffusion. We train a Chinese CLIP [IDEA-CCNL/Taiyi-CLIP-Roberta-large-326M-Chinese](https://huggingface.co/IDEA-CCNL/Taiyi-CLIP-Roberta-large-326M-Chinese) and utilize it to guide the diffusion process. -- This code is modified from https://github.com/alembics/disco-diffusion -- streamlit demo is supported. -- the checkpoint has been upload to hugging face. -## Usage - -- Install the lack package directly -### Run Directly -``` -python disco.py --prompt 夕阳西下 --model_path IDEA-CCNL/Taiyi-Diffusion-532M-Nature # or IDEA-CCNL/Taiyi-Diffusion-532M-Cyberpunk -``` - -### Streamlit Setup -``` -streamlit run st_disco.py -# --server.port=xxxx --server.address=xxxx -``` diff --git a/spaces/sklkd93/CodeFormer/CodeFormer/basicsr/archs/__init__.py b/spaces/sklkd93/CodeFormer/CodeFormer/basicsr/archs/__init__.py deleted file mode 100644 index cfb1e4d7bb221c429082bd389d9140e5b1cc07b0..0000000000000000000000000000000000000000 --- a/spaces/sklkd93/CodeFormer/CodeFormer/basicsr/archs/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -import importlib -from copy import deepcopy -from os import path as osp - -from basicsr.utils import get_root_logger, scandir -from basicsr.utils.registry import ARCH_REGISTRY - -__all__ = ['build_network'] - -# automatically scan and import arch modules for registry -# scan all the files under the 'archs' folder and collect files ending with -# '_arch.py' -arch_folder = osp.dirname(osp.abspath(__file__)) -arch_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(arch_folder) if v.endswith('_arch.py')] -# import all the arch modules -_arch_modules = [importlib.import_module(f'basicsr.archs.{file_name}') for file_name in arch_filenames] - - -def build_network(opt): - opt = deepcopy(opt) - network_type = opt.pop('type') - net = ARCH_REGISTRY.get(network_type)(**opt) - logger = get_root_logger() - logger.info(f'Network [{net.__class__.__name__}] is created.') - return net diff --git a/spaces/society-ethics/DiffusionClustering/app.py b/spaces/society-ethics/DiffusionClustering/app.py deleted file mode 100644 index affd4561278f4c9777a36932a8e78a42d0c01d13..0000000000000000000000000000000000000000 --- a/spaces/society-ethics/DiffusionClustering/app.py +++ /dev/null @@ -1,397 +0,0 @@ -import gradio as gr -import json -import numpy as np -import pandas as pd -from datasets import load_from_disk -from itertools import chain -import operator - -pd.options.plotting.backend = "plotly" - - -TITLE = "Identity Biases in Diffusion Models: Professions" - -_INTRO = """ -# Identity Biases in Diffusion Models: Professions - -Explore profession-level social biases in the data from [DiffusionBiasExplorer](https://hf.co/spaces/society-ethics/DiffusionBiasExplorer)! -This demo leverages the gender and ethnicity representation clusters described in the [companion app](https://hf.co/spaces/society-ethics/DiffusionFaceClustering) -to analyze social trends in machine-generated visual representations of professions. -The **Professions Overview** tab lets you compare the distribution over -[identity clusters](https://hf.co/spaces/society-ethics/DiffusionFaceClustering "Identity clusters identify visual features in the systems' output space correlated with variation of gender and ethnicity in input prompts.") -across professions for Stable Diffusion and Dalle-2 systems (or aggregated for `All Models`). -The **Professions Focus** tab provides more details for each of the individual professions, including direct system comparisons and examples of profession images for each cluster. -This work was done in the scope of the [Stable Bias Project](https://hf.co/spaces/society-ethics/StableBias). -As you use this demo, please share findings and comments [in the discussions tab](https://hf.co/spaces/society-ethics/DiffusionClustering/discussions)! -""" - -_ = """ - For example, you can use this tool to investigate: - - How do each model's representation of professions correlate with the gender ratios reported by the [U.S. Bureau of Labor -Statistics](https://www.bls.gov/cps/cpsaat11.htm "The reported percentage of women in each profession in the US is indicated in the `Labor Women` column in the Professions Overview tab.")? -Are social trends reflected, are they exaggerated? -- Which professions have the starkest differences in how different models represent them? -""" - -professions_dset = load_from_disk("professions") -professions_df = professions_dset.to_pandas() - - -clusters_dicts = dict( - (num_cl, json.load(open(f"clusters/professions_to_clusters_{num_cl}.json"))) - for num_cl in [12, 24, 48] -) - -cluster_summaries_by_size = json.load(open("clusters/cluster_summaries_by_size.json")) - -prompts = pd.read_csv("promptsadjectives.csv") -professions = ["all professions"] + list( -# sorted([p.lower() for p in prompts["Occupation-Noun"].tolist()]) - sorted([p for p in prompts["Occupation-Noun"].tolist()]) -) -models = { - "All": "All Models", - "SD_14": "Stable Diffusion 1.4", - "SD_2": "Stable Diffusion 2", - "DallE": "Dall-E 2", -} - -df_models = { - "All Models": "All", - "Stable Diffusion 1.4": "SD_14", - "Stable Diffusion 2": "SD_2", - "Dall-E 2": "DallE", -} - - -def describe_cluster(num_clusters, block="label"): - cl_dict = clusters_dicts[num_clusters] - labels_values = sorted(cl_dict.items(), key=operator.itemgetter(1)) - labels_values.reverse() - total = float(sum(cl_dict.values())) - lv_prcnt = list( - (item[0], round(item[1] * 100 / total, 0)) for item in labels_values - ) - top_label = lv_prcnt[0][0] - description_string = ( - "The most represented %s is %s, making up about %d%% of the cluster." - % (to_string(block), to_string(top_label), lv_prcnt[0][1]) - ) - description_string += "

    This is followed by: " - for lv in lv_prcnt[1:]: - description_string += "
    %s: %d%%" % (to_string(lv[0]), lv[1]) - description_string += "

    " - return description_string - - -def make_profession_plot(num_clusters, prof_name): - sorted_cl_scores = [ - (k, v) - for k, v in sorted( - clusters_dicts[num_clusters]["All"][prof_name][ - "cluster_proportions" - ].items(), - key=lambda x: x[1], - reverse=True, - ) - if v > 0 - ] - pre_pandas = dict( - [ - ( - models[mod_name], - dict( - ( - f"Cluster {k}", - clusters_dicts[num_clusters][mod_name][prof_name][ - "cluster_proportions" - ][k], - ) - for k, _ in sorted_cl_scores - ), - ) - for mod_name in models - ] - ) - df = pd.DataFrame.from_dict(pre_pandas) - prof_plot = df.plot(kind="bar", barmode="group") - cl_summary_text = f"Profession '{prof_name}':\n" - for cl_id, _ in sorted_cl_scores: - cl_summary_text += f"- {cluster_summaries_by_size[str(num_clusters)][int(cl_id)].replace(' gender terms', '').replace('; ethnicity terms:', ',')} \n" - return ( - prof_plot, - gr.update( - choices=[k for k, _ in sorted_cl_scores], value=sorted_cl_scores[0][0] - ), - gr.update(value=cl_summary_text), - ) - - -def make_profession_table(num_clusters, prof_names, mod_name, max_cols=8): - professions_list_clusters = [ - ( - prof_name, - clusters_dicts[num_clusters][df_models[mod_name]][prof_name][ - "cluster_proportions" - ], - ) - for prof_name in prof_names - ] - totals = sorted( - [ - ( - k, - sum( - prof_clusters[str(k)] - for _, prof_clusters in professions_list_clusters - ), - ) - for k in range(num_clusters) - ], - key=lambda x: x[1], - reverse=True, - )[:max_cols] - prof_list_pre_pandas = [ - dict( - [ - ("Profession", prof_name), - ( - "Entropy", - clusters_dicts[num_clusters][df_models[mod_name]][prof_name][ - "entropy" - ], - ), - ( - "Labor Women", - clusters_dicts[num_clusters][df_models[mod_name]][prof_name][ - "labor_fm" - ][0], - ), - ("", ""), - ] - + [(f"Cluster {k}", prof_clusters[str(k)]) for k, v in totals if v > 0] - ) - for prof_name, prof_clusters in professions_list_clusters - ] - clusters_df = pd.DataFrame.from_dict(prof_list_pre_pandas) - cl_summary_text = "" - for cl_id, _ in totals[:max_cols]: - cl_summary_text += f"- {cluster_summaries_by_size[str(num_clusters)][cl_id].replace(' gender terms', '').replace('; ethnicity terms:', ',')} \n" - return ( - [c[0] for c in totals], - ( - clusters_df.style.background_gradient( - axis=None, vmin=0, vmax=100, cmap="YlGnBu" - ) - .format(precision=1) - .to_html() - ), - gr.update(value=cl_summary_text), - ) - - -def get_image(model, fname, score): - return ( - professions_dset.select( - professions_df[ - (professions_df["image_path"] == fname) - & (professions_df["model"] == model) - ].index - )["image"][0], - " ".join(fname.split("/")[0].split("_")[4:]) - + f" | {score:.2f}" - + f" | {models[model]}", - ) - - -def show_examplars(num_clusters, prof_name, cl_id, confidence_threshold=0.6): - # only show images where the similarity to the centroid is > confidence_threshold - examplars_dict = clusters_dicts[num_clusters]["All"][prof_name][ - "cluster_examplars" - ][str(cl_id)] - l = [ - tuple(img) - for img in examplars_dict["close"] - + examplars_dict["mid"][:2] - + examplars_dict["far"] - ] - l = [ - img - for i, img in enumerate(l) - if img[0] > confidence_threshold and img not in l[:i] - ] - return ( - [get_image(model, fname, score) for score, model, fname in l], - gr.update( - label=f"Generations for profession ''{prof_name}'' assigned to cluster {cl_id} of {num_clusters}" - ), - ) - - -with gr.Blocks(title=TITLE) as demo: - gr.Markdown(_INTRO) - gr.HTML( - """⚠️ DISCLAIMER: the images displayed by this tool were generated by text-to-image systems and may depict offensive stereotypes or contain explicit content.""" - ) - with gr.Tab("Professions Overview"): - gr.Markdown( - """ - Select one or more professions and models from the dropdowns on the left to see which clusters are most representative for this combination. - Try choosing different numbers of clusters to see if the results change, and then go to the 'Profession Focus' tab to go more in-depth into these results. - The `Labor Women` column provided for comparison corresponds to the gender ratio reported by the - [U.S. Bureau of Labor Statistics](https://www.bls.gov/cps/cpsaat11.htm) for each profession. - """ - ) - with gr.Row(): - with gr.Column(scale=1): - gr.Markdown("Select the parameters here:") - num_clusters = gr.Radio( - [12, 24, 48], - value=12, - label="How many clusters do you want to use to represent identities?", - ) - model_choices = gr.Dropdown( - [ - "All Models", - "Stable Diffusion 1.4", - "Stable Diffusion 2", - "Dall-E 2", - ], - value="All Models", - label="Which models do you want to compare?", - interactive=True, - ) - profession_choices_overview = gr.Dropdown( - professions, - value=[ - "all professions", - "CEO", - "director", - "social assistant", - "social worker", - ], - label="Which professions do you want to compare?", - multiselect=True, - interactive=True, - ) - with gr.Column(scale=3): - with gr.Row(): - table = gr.HTML( - label="Profession assignment per cluster", wrap=True - ) - with gr.Row(): - # clusters = gr.Dataframe(type="array", visible=False, col_count=1) - clusters = gr.Textbox(label="clusters", visible=False) - gr.Markdown( - """ - ##### What do the clusters mean? - Below is a summary of the identity cluster compositions. - For more details, see the [companion demo](https://huggingface.co/spaces/society-ethics/DiffusionFaceClustering): - """ - ) - with gr.Row(): - with gr.Accordion(label="Cluster summaries", open=True): - cluster_descriptions_table = gr.Text( - "TODO", label="Cluster summaries", show_label=False - ) - with gr.Tab("Profession Focus"): - with gr.Row(): - with gr.Column(): - gr.Markdown( - "Select a profession to visualize and see which clusters and identity groups are most represented in the profession, as well as some examples of generated images below." - ) - profession_choice_focus = gr.Dropdown( - choices=professions, - value="scientist", - label="Select profession:", - ) - num_clusters_focus = gr.Radio( - [12, 24, 48], - value=12, - label="How many clusters do you want to use to represent identities?", - ) - with gr.Column(): - plot = gr.Plot( - label=f"Makeup of the cluster assignments for profession {profession_choice_focus}" - ) - with gr.Row(): - with gr.Column(): - gr.Markdown( - """ - ##### What do the clusters mean? - Below is a summary of the identity cluster compositions. - For more details, see the [companion demo](https://huggingface.co/spaces/society-ethics/DiffusionFaceClustering): - """ - ) - with gr.Accordion(label="Cluster summaries", open=True): - cluster_descriptions = gr.Text( - "TODO", label="Cluster summaries", show_label=False - ) - with gr.Column(): - gr.Markdown( - """ - ##### What's in the clusters? - You can show examples of profession images assigned to each identity cluster by selecting one here: - """ - ) - with gr.Accordion(label="Cluster selection", open=True): - cluster_id_focus = gr.Dropdown( - choices=[i for i in range(num_clusters_focus.value)], - value=0, - label="Select cluster to visualize:", - ) - with gr.Row(): - examplars_plot = gr.Gallery( - label="Profession images assigned to the selected cluster." - ).style(grid=4, height="auto", container=True) - demo.load( - make_profession_table, - [num_clusters, profession_choices_overview, model_choices], - [clusters, table, cluster_descriptions_table], - queue=False, - ) - demo.load( - make_profession_plot, - [num_clusters_focus, profession_choice_focus], - [plot, cluster_id_focus, cluster_descriptions], - queue=False, - ) - demo.load( - show_examplars, - [ - num_clusters_focus, - profession_choice_focus, - cluster_id_focus, - ], - [examplars_plot, examplars_plot], - queue=False, - ) - for var in [num_clusters, model_choices, profession_choices_overview]: - var.change( - make_profession_table, - [num_clusters, profession_choices_overview, model_choices], - [clusters, table, cluster_descriptions_table], - queue=False, - ) - for var in [num_clusters_focus, profession_choice_focus]: - var.change( - make_profession_plot, - [num_clusters_focus, profession_choice_focus], - [plot, cluster_id_focus, cluster_descriptions], - queue=False, - ) - for var in [num_clusters_focus, profession_choice_focus, cluster_id_focus]: - var.change( - show_examplars, - [ - num_clusters_focus, - profession_choice_focus, - cluster_id_focus, - ], - [examplars_plot, examplars_plot], - queue=False, - ) - - -if __name__ == "__main__": - demo.queue().launch(debug=True) diff --git a/spaces/spencer/socm/reference_embeddings.py b/spaces/spencer/socm/reference_embeddings.py deleted file mode 100644 index c0616e82a2df9c38c330f186a53627b7d3d6ff0a..0000000000000000000000000000000000000000 --- a/spaces/spencer/socm/reference_embeddings.py +++ /dev/null @@ -1,44 +0,0 @@ -import argparse -from tqdm import tqdm - -import faiss - -from embeddings import FaissIndex -from models import CLIP - - -def main(file, index_type): - - clip = CLIP() - with open(file) as f: - references = f.read().split("\n") - - index = FaissIndex( - embedding_size=768, - faiss_index_location=f"faiss_indices/{index_type}.index", - indexer=faiss.IndexFlatIP, - ) - index.reset() - - if len(references) < 500: - ref_embeddings = clip.get_text_emb(references) - index.add(ref_embeddings.detach().numpy(), references) - else: - - batches = list(range(0, len(references), 300)) + [len(references)] - batched_objects = [] - for idx in range(0, len(batches) - 1): - batched_objects.append(references[batches[idx] : batches[idx + 1]]) - - for batch in tqdm(batched_objects): - ref_embeddings = clip.get_text_emb(batch) - index.add(ref_embeddings.detach().numpy(), batch) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("file", type=str, help="File containing references") - parser.add_argument("index_type", type=str, choices=["places", "objects"]) - args = parser.parse_args() - - main(args.file, args.index_type) diff --git a/spaces/sqc1729/bingi/src/components/ui/button.tsx b/spaces/sqc1729/bingi/src/components/ui/button.tsx deleted file mode 100644 index 281da005124fa94c89a9a9db7605748a92b60865..0000000000000000000000000000000000000000 --- a/spaces/sqc1729/bingi/src/components/ui/button.tsx +++ /dev/null @@ -1,57 +0,0 @@ -import * as React from 'react' -import { Slot } from '@radix-ui/react-slot' -import { cva, type VariantProps } from 'class-variance-authority' - -import { cn } from '@/lib/utils' - -const buttonVariants = cva( - 'inline-flex items-center justify-center rounded-md text-sm font-medium shadow ring-offset-background transition-colors outline-none disabled:pointer-events-none disabled:opacity-50', - { - variants: { - variant: { - default: - 'bg-primary text-primary-foreground shadow-md hover:bg-primary/90', - destructive: - 'bg-destructive text-destructive-foreground hover:bg-destructive/90', - outline: - 'border border-input hover:bg-accent hover:text-accent-foreground', - secondary: - 'bg-secondary text-secondary-foreground hover:bg-secondary/80', - ghost: 'shadow-none hover:bg-accent hover:text-accent-foreground', - link: 'text-primary underline-offset-4 shadow-none hover:underline' - }, - size: { - default: 'h-8 px-4 py-2', - sm: 'h-8 rounded-md px-3', - lg: 'h-11 rounded-md px-8', - icon: 'h-8 w-8 p-0' - } - }, - defaultVariants: { - variant: 'default', - size: 'default' - } - } -) - -export interface ButtonProps - extends React.ButtonHTMLAttributes, - VariantProps { - asChild?: boolean -} - -const Button = React.forwardRef( - ({ className, variant, size, asChild = false, ...props }, ref) => { - const Comp = asChild ? Slot : 'button' - return ( - - ) - } -) -Button.displayName = 'Button' - -export { Button, buttonVariants } diff --git a/spaces/srikanth-nm/ai_seeker/ingest.py b/spaces/srikanth-nm/ai_seeker/ingest.py deleted file mode 100644 index e61c4cd73e92299b13c47ebce93aa1af0019c27c..0000000000000000000000000000000000000000 --- a/spaces/srikanth-nm/ai_seeker/ingest.py +++ /dev/null @@ -1,131 +0,0 @@ -import logging -import os -from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor, as_completed - -import click -import torch -from langchain.docstore.document import Document -from langchain.embeddings import HuggingFaceInstructEmbeddings -from langchain.text_splitter import Language, RecursiveCharacterTextSplitter -from langchain.vectorstores import Chroma - -from constants import ( - CHROMA_SETTINGS, - DOCUMENT_MAP, - EMBEDDING_MODEL_NAME, - INGEST_THREADS, - PERSIST_DIRECTORY, - SOURCE_DIRECTORY, -) - - -def load_single_document(file_path: str) -> Document: - # Loads a single document from a file path - file_extension = os.path.splitext(file_path)[1] - loader_class = DOCUMENT_MAP.get(file_extension) - if loader_class: - loader = loader_class(file_path) - else: - raise ValueError("Document type is undefined") - return loader.load()[0] - - -def load_document_batch(filepaths): - logging.info("Loading document batch") - # create a thread pool - with ThreadPoolExecutor(len(filepaths)) as exe: - # load files - futures = [exe.submit(load_single_document, name) for name in filepaths] - # collect data - data_list = [future.result() for future in futures] - # return data and file paths - return (data_list, filepaths) - - -def load_documents(source_dir: str) -> list[Document]: - # Loads all documents from the source documents directory - all_files = os.listdir(source_dir) - paths = [] - for file_path in all_files: - file_extension = os.path.splitext(file_path)[1] - source_file_path = os.path.join(source_dir, file_path) - if file_extension in DOCUMENT_MAP.keys(): - paths.append(source_file_path) - - # Have at least one worker and at most INGEST_THREADS workers - n_workers = min(INGEST_THREADS, max(len(paths), 1)) - chunksize = round(len(paths) / n_workers) - docs = [] - with ProcessPoolExecutor(n_workers) as executor: - futures = [] - # split the load operations into chunks - for i in range(0, len(paths), chunksize): - # select a chunk of filenames - filepaths = paths[i : (i + chunksize)] - # submit the task - future = executor.submit(load_document_batch, filepaths) - futures.append(future) - # process all results - for future in as_completed(futures): - # open the file and load the data - contents, _ = future.result() - docs.extend(contents) - - return docs - - -def split_documents(documents: list[Document]) -> tuple[list[Document], list[Document]]: - # Splits documents for correct Text Splitter - text_docs, python_docs = [], [] - for doc in documents: - file_extension = os.path.splitext(doc.metadata["source"])[1] - if file_extension == ".py": - python_docs.append(doc) - else: - text_docs.append(doc) - - return text_docs, python_docs - -def main():#device_type): - # Load documents and split in chunks - logging.info(f"Loading documents from {SOURCE_DIRECTORY}") - documents = load_documents(SOURCE_DIRECTORY) - text_documents, python_documents = split_documents(documents) - text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200) - python_splitter = RecursiveCharacterTextSplitter.from_language( - language=Language.PYTHON, chunk_size=1000, chunk_overlap=200 - ) - texts = text_splitter.split_documents(text_documents) - texts.extend(python_splitter.split_documents(python_documents)) - logging.info(f"Loaded {len(documents)} documents from {SOURCE_DIRECTORY}") - logging.info(f"Split into {len(texts)} chunks of text") - - # Create embeddings - embeddings = HuggingFaceInstructEmbeddings( - model_name=EMBEDDING_MODEL_NAME, - model_kwargs={"device": "cpu"}, - ) - # change the embedding type here if you are running into issues. - # These are much smaller embeddings and will work for most appications - # If you use HuggingFaceEmbeddings, make sure to also use the same in the - # run_localGPT.py file. - - # embeddings = HuggingFaceEmbeddings(model_name=EMBEDDING_MODEL_NAME) - - db = Chroma.from_documents( - texts, - embeddings, - persist_directory=PERSIST_DIRECTORY, - client_settings=CHROMA_SETTINGS, - ) - db.persist() - db = None - - return "done" - - -if __name__ == "__main__": - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(filename)s:%(lineno)s - %(message)s", level=logging.INFO - ) - main() diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/laser/laser_src/multitask_data_utils.py b/spaces/sriramelango/Social_Classification_Public/fairseq/examples/laser/laser_src/multitask_data_utils.py deleted file mode 100644 index b05caea26793bf5112a7abc29d76225f578f3ebe..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/laser/laser_src/multitask_data_utils.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from collections import OrderedDict - -import numpy as np - -from fairseq.data import BaseWrapperDataset, FairseqDataset, iterators - - -class MultiItr(object): - def __init__(self, itr): - self.itr = itr - self._counts = [0 for x in itr] - - def __len__(self): - return sum(len(itr) for itr in self.itr) - - def __iter__(self): - return self - - def __next__(self): - ratios = [count / len(itr) for count, itr in zip(self._counts, self.itr)] - idx = ratios.index(min(ratios)) - self._counts[idx] += 1 - return next(self.itr[idx]) - - -class MultidatasetEpochBatchIterator(iterators.EpochBatchIterating): - """A wrapper around multiple epoch batch iterators.""" - - def __init__( - self, - dataset, - batch_sampler, - seed=1, - num_shards=1, - shard_id=0, - num_workers=0, - epoch=1, - ): - - assert isinstance(dataset, OrderedDict) - assert len(dataset) - assert isinstance(dataset[next(iter(dataset))], FairseqDataset) - - self.iterators = [] - - self.epoch = epoch - for key, dt in dataset.items(): - epoch_iter = iterators.EpochBatchIterator( - dataset=dt, - collate_fn=dt.collater, - batch_sampler=batch_sampler[key], - seed=seed, - num_shards=num_shards, - shard_id=shard_id, - num_workers=0, - epoch=epoch, - ) - self.iterators.append(epoch_iter) - - def __len__(self): - return sum(len(itr) for itr in self.iterators) - - def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False): - # `self.epoch += 1` should be handled by underlying `EpochBatchIterator`s. - return MultiItr( - [ - itr.next_epoch_itr( - shuffle=shuffle, fix_batches_to_gpus=fix_batches_to_gpus - ) - for itr in self.iterators - ] - ) - - def end_of_epoch(self): - return all(itr.end_of_epoch() for itr in self.iterators) - - @property - def next_epoch_idx(self): - """Return the epoch index after *next_epoch_itr* is called.""" - - epochs = [itr.next_epoch_idx for itr in self.iterators] - self.epoch = epochs[0] - assert all(epoch == self.epoch for epoch in epochs) - - return self.epoch - - @property - def iterations_in_epoch(self): - return sum(itr.iterations_in_epoch for itr in self.iterators) - - def state_dict(self): - return { - "iterators": [it.state_dict() for it in self.iterators], - "epoch": self.epoch, - } - - def load_state_dict(self, state_dict): - self.epoch = state_dict["epoch"] - for it, d in zip(self.iterators, state_dict["iterators"]): - it.load_state_dict(d) - - -class MultitaskDatasetWrapper(BaseWrapperDataset): - """A wrapper for a multitask dataset.""" - - def __init__(self, dataset, target_language_id, sample=1.0, name=""): - super().__init__(dataset) - self.target_language_id = target_language_id - self.sample = sample - self.name = name - - def collater(self, *args, **kwargs): - ans = self.dataset.collater(*args, **kwargs) - if "net_input" in ans: - ans["net_input"]["target_language_id"] = self.target_language_id - ans["net_input"]["dataset_name"] = self.name - return ans - - def num_tokens(self, *args, **kwargs): - return self.dataset.num_tokens(*args, **kwargs) - - def ordered_indices(self, *args, **kwargs): - indices = self.dataset.ordered_indices(*args, **kwargs) - # Hacky solution for sampling - size = int(self.sample * indices.shape[0]) - - return indices.take(np.sort(np.random.permutation(indices.shape[0])[:size])) - - def size(self, index: int): - return self.dataset.size(index) - - @property - def supports_prefetch(self): - """Whether this dataset supports prefetching.""" - return getattr(self.dataset, "supports_prefetch", False) - - def prefetch(self, indices): - return self.dataset.prefetch(indices) diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/pointer_generator/preprocess.py b/spaces/sriramelango/Social_Classification_Public/fairseq/examples/pointer_generator/preprocess.py deleted file mode 100644 index f72ca7d3d97e12ab7b405dcff314bdb6c0a78755..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/pointer_generator/preprocess.py +++ /dev/null @@ -1,102 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -from itertools import zip_longest - - -def replace_oovs(source_in, target_in, vocabulary, source_out, target_out): - """Replaces out-of-vocabulary words in source and target text with , - where N in is the position of the word in the source sequence. - """ - - def format_unk(pos): - return "".format(pos) - - if target_in is None: - target_in = [] - - for seq_num, (source_seq, target_seq) in enumerate( - zip_longest(source_in, target_in) - ): - source_seq_out = [] - target_seq_out = [] - - word_to_pos = dict() - for position, token in enumerate(source_seq.strip().split()): - if token in vocabulary: - token_out = token - else: - if token in word_to_pos: - oov_pos = word_to_pos[token] - else: - word_to_pos[token] = position - oov_pos = position - token_out = format_unk(oov_pos) - source_seq_out.append(token_out) - source_out.write(" ".join(source_seq_out) + "\n") - - if target_seq is not None: - for token in target_seq.strip().split(): - if token in word_to_pos: - token_out = format_unk(word_to_pos[token]) - else: - token_out = token - target_seq_out.append(token_out) - if target_out is not None: - target_out.write(" ".join(target_seq_out) + "\n") - - -def main(): - parser = argparse.ArgumentParser( - description="Replaces out-of-vocabulary words in both source and target " - "sequences with tokens that indicate the position of the word " - "in the source sequence." - ) - parser.add_argument( - "--source", type=str, help="text file with source sequences", required=True - ) - parser.add_argument( - "--target", type=str, help="text file with target sequences", default=None - ) - parser.add_argument("--vocab", type=str, help="vocabulary file", required=True) - parser.add_argument( - "--source-out", - type=str, - help="where to write source sequences with entries", - required=True, - ) - parser.add_argument( - "--target-out", - type=str, - help="where to write target sequences with entries", - default=None, - ) - args = parser.parse_args() - - with open(args.vocab, encoding="utf-8") as vocab: - vocabulary = vocab.read().splitlines() - - target_in = ( - open(args.target, "r", encoding="utf-8") if args.target is not None else None - ) - target_out = ( - open(args.target_out, "w", encoding="utf-8") - if args.target_out is not None - else None - ) - with open(args.source, "r", encoding="utf-8") as source_in, open( - args.source_out, "w", encoding="utf-8" - ) as source_out: - replace_oovs(source_in, target_in, vocabulary, source_out, target_out) - if target_in is not None: - target_in.close() - if target_out is not None: - target_out.close() - - -if __name__ == "__main__": - main() diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_synthesis/docs/vctk_example.md b/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_synthesis/docs/vctk_example.md deleted file mode 100644 index 2ba78f3f73d6ea30f9de89150fbbc9dd5923b6fa..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_synthesis/docs/vctk_example.md +++ /dev/null @@ -1,51 +0,0 @@ -[[Back]](..) - -# VCTK - -[VCTK](https://datashare.ed.ac.uk/handle/10283/3443) is an open English speech corpus. We provide examples -for building [Transformer](https://arxiv.org/abs/1809.08895) models on this dataset. - - -## Data preparation -Download data, create splits and generate audio manifests with -```bash -python -m examples.speech_synthesis.preprocessing.get_vctk_audio_manifest \ - --output-data-root ${AUDIO_DATA_ROOT} \ - --output-manifest-root ${AUDIO_MANIFEST_ROOT} -``` - -Then, extract log-Mel spectrograms, generate feature manifest and create data configuration YAML with -```bash -python -m examples.speech_synthesis.preprocessing.get_feature_manifest \ - --audio-manifest-root ${AUDIO_MANIFEST_ROOT} \ - --output-root ${FEATURE_MANIFEST_ROOT} \ - --ipa-vocab --use-g2p -``` -where we use phoneme inputs (`--ipa-vocab --use-g2p`) as example. - -To denoise audio and trim leading/trailing silence using signal processing based VAD, run -```bash -for SPLIT in dev test train; do - python -m examples.speech_synthesis.preprocessing.denoise_and_vad_audio \ - --audio-manifest ${AUDIO_MANIFEST_ROOT}/${SPLIT}.audio.tsv \ - --output-dir ${PROCESSED_DATA_ROOT} \ - --denoise --vad --vad-agg-level 3 -done -``` - -## Training -(Please refer to [the LJSpeech example](../docs/ljspeech_example.md#transformer).) - -## Inference -(Please refer to [the LJSpeech example](../docs/ljspeech_example.md#inference).) - -## Automatic Evaluation -(Please refer to [the LJSpeech example](../docs/ljspeech_example.md#automatic-evaluation).) - -## Results - -| --arch | Params | Test MCD | Model | -|---|---|---|---| -| tts_transformer | 54M | 3.4 | [Download](https://dl.fbaipublicfiles.com/fairseq/s2/vctk_transformer_phn.tar) | - -[[Back]](..) diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq_cli/__init__.py b/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq_cli/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/srush/GPTWorld/app.py b/spaces/srush/GPTWorld/app.py deleted file mode 100644 index 372db05e0b50c59290c45a2dd7131b02c22b95c7..0000000000000000000000000000000000000000 --- a/spaces/srush/GPTWorld/app.py +++ /dev/null @@ -1,602 +0,0 @@ -import os -import sys -import gradio as gr -from dataclasses import dataclass -from chalk import * -from colour import Color -import inspect -import os -import openai -from typing import List, Tuple, Optional -from enum import Enum -import io -from contextlib import redirect_stdout -import imageio -import tiktoken -import time -import pandas as pd -import csv -from huggingface_hub import HfApi, Repository -DATASET_REPO_URL = "https://huggingface.co/datasets/srush/gptworld-leaderboard" -HF_TOKEN = os.environ.get("HF_API") -DATA_FILENAME = "data.csv" -DATA_FILE = os.path.join("data", DATA_FILENAME) - -openai.api_key = "" -tab = " " -repo = Repository(git_user="srush", - local_dir="data", clone_from=DATASET_REPO_URL, use_auth_token=HF_TOKEN -) -repo.git_pull() -def start2(prompt, board, api_key): - out = "" - # for chunk in openai.ChatCompletion.create( - # model="gpt-4", - # messages=[{ - # "role": "user", - # "content": prompt, - - # }], - # stream=True, - # temperature= 0 - # ): - board = board#Game(boundary=(9, 9), key=(1, 1), flag=(2, 2), init=(0, 0), walls=[(2, 0)]) - actions = [Actions.DOWNRIGHT, Actions.RIGHT, Actions.DOWNRIGHT, Actions.PICKUP, Actions.DOWNRIGHT] - contents = example(board, actions) - print(contents) - # encoding = tiktoken.encoding_for_model("gpt-4") - # num_tokens = encoding.encode(string) - - for content in contents: - time.sleep(0.005) - content = content - if content is not None: - out += content - print(content, end="") - yield out - yield out - -def start(prompt, board, api_key): - out = "" - # encoding = tiktoken.encoding_for_model("gpt-4") - # num_tokens = encoding.encode(string) - content = "" - openai.api_key = api_key - for chunk in openai.ChatCompletion.create( - model="gpt-4", - messages=[{ - "role": "user", - "content": prompt, - - }], - stream=True, - temperature= 0 - ): - - # for content in contents: - time.sleep(0.005) - content = chunk["choices"][0].get("delta", {}).get("content") - if content is not None: - out += content - print(content, end="") - yield out - yield out - -def num_tokens_from_string(string: str, encoding_name: str="gpt-4") -> int: - """Returns the number of tokens in a text string.""" - encoding = tiktoken.encoding_for_model(encoding_name) - num_tokens = len(encoding.encode(string)) - return num_tokens - - -# + [markdown] id="LMTjwXdD7v-I" -# ## Game Code -# -# This code creates a mini-game to play. It takes place on a hexagon. You are represented by a circle. You need to first pick up a key represented by a triangle. You finally need to make it to the cross to finish the game. The actions show each of the directions you can move. -# -# - -# + id="Fv3eTRKiV2ZB" cellView="form" -#@title Game Code - -# Possible Actions -class Actions(Enum): - UPRIGHT = "UR" - RIGHT = "R" - DOWNRIGHT = "DR" - DOWNLEFT = "DL" - LEFT = "L" - UPLEFT = "UL" - PICKUP = "Pickup" - -# Movements -change = { - Actions.UPRIGHT : (-1, 1), - Actions.RIGHT : (0, 2), - Actions.DOWNRIGHT : (1, 1), - Actions.DOWNLEFT : (1, -1), - Actions.LEFT : (0, -2), - Actions.UPLEFT : (-1, -1), - Actions.PICKUP : (0, 0), -} -change_str = {action.value: change[action] for action in Actions} -def add(a, b): - return a[0] + b[0], a[1] + b[1] - -@dataclass -class Board: - grid: List[str] - player_pos: Tuple[int, int] - flag_pos: Tuple[int, int] - wall_pos:List[Tuple[int, int]] - key_pos:Optional[Tuple[int, int]] - - def move(self, action: Actions) -> 'Board': - "Move by creating a new board." - d_m = change[action] - if action == Actions.PICKUP: - if self.player_pos == self.key_pos: - return Board(self.grid, self.player_pos, self.flag_pos, self.wall_pos, None) - else: - return self - - new_player_pos = add(self.player_pos, d_m) - # Out of bounds - if new_player_pos[0] < 0 or new_player_pos[0] >= len(self.grid): - return self - if new_player_pos[1] < 0 or new_player_pos[1] >= len(self.grid[0]): - return self - # Can't move through walls - if self.grid[new_player_pos[0]][new_player_pos[1]] == 'W': - return self - - new_grid = [row[:] for row in self.grid] # Create a copy of the grid - new_grid[self.player_pos[0]][self.player_pos[1]] = '.' - new_grid[new_player_pos[0]][new_player_pos[1]] = '@' - return Board(new_grid, new_player_pos, self.flag_pos, self.wall_pos, self.key_pos) - - def __str__(self) -> str: - return '\n'.join(''.join(row) for i, row in enumerate(self.grid)) - - @classmethod - def create_empty_board(cls, size: Tuple[int, int], key_pos, flag_pos, init, wall_pos) -> 'Board': - grid = [['.' if i % 2 == j % 2 else " " for i in range(size[1])] for j in range(size[0])] - player_pos = init - flag_pos = flag_pos - grid[player_pos[0]][player_pos[1]] = '@' - grid[flag_pos[0]][flag_pos[1]] = 'P' - grid[key_pos[0]][key_pos[1]] = 'K' - for pos in wall_pos: - grid[pos[0]][pos[1]] = 'W' - return cls(grid, player_pos, flag_pos, wall_pos, key_pos) - -class Game: - def __init__(self, init, flag, walls, key, boundary): - "Create the version of the game that the AI sees." - self.boundary = boundary - self.board = Board.create_empty_board(boundary, key, flag, init, walls) - self.original = self.board - self.actions = [] - - def move(self, action): - self.board = self.board.move(action) - self.actions.append(action) - - @property - def walls(self): - return self.board.wall_pos - - def won(self): - final = self.board - return final.key_pos is None and final.player_pos == final.flag_pos - - def __repr__(self) -> str: - walls = ",".join(map(str, self.board.wall_pos)) - return f"Game(init={self.board.player_pos}, flag={self.board.flag_pos}, walls={self.board.wall_pos}, boundary={self.boundary}, key={self.board.key_pos})" - -# This is the version of move that the AI can see. -def move(game, action, old_pos=None): - - # ACTIONS (must be legal) - if old_pos is None: - old_pos = game.board.player_pos - game.move(Actions(action)) - offset = change_str[action] - pos = (old_pos[0] + offset[0], old_pos[1] + offset[1]) - - - assert 0 <= pos[0] < game.boundary[0], "Row position out of bounds" - assert 0 <= pos[1] < game.boundary[1], "Col position out of bounds" - assert pos not in game.walls, f"Walked into wall {pos}" - if action == "PU": - assert pos == game.key, f"Not over key" - return pos - - -# + [markdown] id="PDOcPiQq8u_Y" -# We can look at the board by drawing it. - -# + colab={"base_uri": "https://localhost:8080/", "height": 221} id="Ic7WgOTi8uF1" outputId="4dc07cb9-9e5f-4d28-d4ea-470ad4b13141" -#@title Drawing code -def draw_board(grid, num=0): - hex = regular_polygon(6, 1).rotate_by(1/12).line_width(0.5).fill_color(Color("white")) - w = hex.get_envelope().width - canvas = empty() - for r, b in enumerate(grid): - def show(v): - if v == ".": - return hex - if v == "@": - return hex + circle(0.35).fill_color(Color("red")) - if v == "P": - x = rectangle(0.25, 0.7).fill_color(Color("blue")).line_width(0) - return hex + (x.rotate_by(0.25/2) + x.rotate_by(-0.25/2)) - if v == "K": - return hex + triangle(0.75).fill_color(Color("purple")) - if v == "W": - return hex.fill_color(Color("black")) - if v ==" ": - return hex - row = hcat(show(v) for i, v in enumerate(b[1 if r %2 else 0::2])) - canvas += row.translate(w * 0.5 if r%2 else 0, 1.5 * r) - canvas = canvas.center_xy().frame(0.5) - canvas = rectangle(canvas.get_envelope().width, canvas.get_envelope().height).line_width(0.5).fill_color(Color("orange")) + canvas - # canvas.render_svg(f"pic{num}.svg", 256) - canvas.render(f"pic{num}.png", 500) - return canvas - - - -# + colab={"base_uri": "https://localhost:8080/", "height": 424} id="nqgPKLu0AMhU" outputId="19e4c6d0-b792-4a34-f4c4-81902974c346" -# game = Game(boundary=(5, 5), key=(0, 2), flag=(4, 4), init=(0, 0), walls=[(2, 2)]) -# display(draw_board(game.board.grid)) -# move(game, "DR", (0,0)) -# display(draw_board(game.board.grid)) - - -# + [markdown] id="PhqF9af5_jvh" -# ## Prompt Code -# -# The puzzle is to write prompt code to make the model accomplish this task. We have provided some scaffolding code for you. The code creates: -# -# * A header for describing the game. -# * A function `make_fun` that shows the AI how to move in code. -# * A footer to describe the final game board that you want the mode to solve. -# -# You can fill this in a watch how the model moves around. - -# + id="jFf7TCOJaVHX" -#@title Make the Prompt -def make_fun(board, actions): - "This function generates python code for few-shot examples" - out = tab + "p = " + str(board.player_pos) - for i, action in enumerate(actions): - new_board = board.move(action) - out += f""" - # TODO ADD CODE - p = move(b, "{action.value}", p) # TODO ADD CODE""" - board = new_board - return out - -def example(game, actions): - """ - This code makes a few shot example. You don't need to edit it. - """ - return f""" -def my_example(b): -{make_fun(game.board, actions)} - return b -""" - - -ex = 0 -def prompt(game): - """ - You should fill these sections out to teach the AI how to play the game. - - Or you may do your own thing :) - """ - print(f""" -# TODO: DESCRIBE THE GAME - -# TODO: DESCRIBE THE ACTIONS -change_str = {change_str} - -{inspect.getsource(move)} -""") - - def example(game, actions): - """ - This code makes a few shot example. You don't need to edit it. - """ - global ex - ex += 1 - print(f""" -def example{ex}(): - b = {repr(game)} -{make_fun(game.board, actions)} - return b -# ------------ -""") - - # Create a few shot example (you may not need this) - board = Game(boundary=(3, 3), key=(1, 1), flag=(2, 2), init=(0, 0), walls=[(2, 0)]) - actions = [Actions.DOWNRIGHT, Actions.PICKUP, Actions.DOWNRIGHT] - example(board, actions) - - # Test case - print(f""" -# ---- -# TODO: ADD any custom example code -#--- -# TODO: FINAL description. - -# Contraints for this function:", {repr(game)} -# Please fill this in with code like the examples above (do not provide a description): -# -# The following function `my_example` instantiates a GameBoard called b with these constraints. - -""") - - - -# + [markdown] id="-iecyV7nAbFT" -# This code lets you make a game and see the output for a prompt for that game. There are easy, medium, and hard games. - -# + colab={"base_uri": "https://localhost:8080/"} id="cOneYFok_OMe" outputId="97080186-7322-4ba9-b500-095fb39071aa" -# Easy -easy_game = Game(boundary=(3, 3), key=(1, 1), flag=(2, 2), init=(0, 0), walls=[]) - -# Medium -medium_game = Game(boundary=(5, 5), key=(3, 1), flag=(4, 4), init=(0, 0), walls=[(1, 1)]) - -# Hard (This is the main one) -hard_game = Game(boundary=(8, 15), key=(3, 1), flag=(7, 13), init=(0, 0), walls=[(2, 2), (1, 1), (5, 3), (1, 11), (5, 5), (6, 6), (6, 10), (2, 6), (4, 12)]) - -# Evil -evil_game = Game(boundary=(8, 15), key=(5, 1), flag=(7, 13), init=(0, 0), walls=[(2, 2), (3, 3), (4, 2), (1, 1), (2, 4), (7, 11), (5, 3), (1, 11), (5, 5), (6, 6), (6, 10), (2, 6), (4, 12)]) - -games = {"Easy": easy_game, "Medium": medium_game, "Hard": hard_game, "Evil": evil_game} - -# Anima -def animate(game): - cur = game.original - i = 0 - images = [] - draw_board(cur.grid, i) - images.append(imageio.v2.imread(f"pic{i}.png")) - for act in game.actions: - cur = cur.move(act) - i += 1 - draw_board(cur.grid, i) - images.append(imageio.v2.imread(f"pic{i}.png")) - - return imageio.v2.mimsave('movie.gif', images, **{ 'duration': 1000, 'loop': 100}) - - -def load(inp): - if inp in games: - board = games[inp] - else: - board = eval(inp) - draw_board(board.board.grid, 0).render("tmp.png", 500) - - return "tmp.png", repr(board) - -with gr.Blocks() as app: - - # test = gr.Code(label="test") - # im2 = gr.Gallery() - # im2.style(preview=True) - gr.HTML(""" -
    -

    🌎 GPTWorld 🌍

    - -

    -GPTWorld is a prompting game. Your goal is to get an LLM to complete a maze. You are the red dot (🔴) need to first get the key (▲) and then reach the exit (x). The game takes place on a hexagonal grid with walls [Even rows are labeled (0,0), (0, 2), (0,4) and odd rows are labeled (1, 1), (1, 3), (1, 5)]. You play by prompting GPT to write code which solves the game on the right. -

    -
    -""") - - with gr.Row(): - with gr.Column(): - game_desc = gr.Text(label="Game (Select one first or make your own)") - examples = gr.Radio(show_label=False, - choices=["Easy", "Medium", "Hard", "Evil"]) - api_key = gr.Text(label="OpenAI Key", type="password", - value=os.environ.get("OPENAI_API_KEY"), - visible=not os.environ.get("OPENAI_API_KEY")) - with gr.Row(): - start_btn = gr.Button("Prompt >") - cancel_btn = gr.Button("Cancel") - - prompt = gr.Code(label="Prompt (Simple example)", language="python", lines=40, value=""" -# Let's play a game! -# Here are the moves. - -moves = {'UR': (-1, 1), 'R': (0, 2), 'DR': (1, 1), 'DL': (1, -1), - 'L': (0, -2), 'UL': (-1, -1), 'Pickup': (0, 0)} - -# You are going to write a function like this. - -# Game(init=(0, 0), flag=(1, 1), walls=[], boundary=(2, 3), key=(0, 2)) -# (0, 4) and (1, 3) are out-of-bounds. -def example(b): - # Starts at (0, 0) - move(b, "R") - # Moves to (0, 2). This one has the key. - move(b, "Pickup") - # Moves to (1, 1) - move(b, "DL") - return b - -# Be sure not to go out of bounds! - - -# Now you are going to write a function `my_example(b)` -# It will be passed b=%GAME% to inject the game description above. -# Here is the code. -""") - with gr.Column(): - im = gr.Image(show_label=False).style(height=500) - # im.style(preview=True, object_fit="scale-down", columns=1, container=True) - msg_box = gr.HTML(label="", show_label=False) - - output = gr.Code(label="Generating Game Code (You can also edit and rerun)", language="python", value="""def my_example(b): - p = (0, 0) - # This is the code you want it to generate. - p = move(b, "DR", p) - p = move(b, "Pickup", p) - p = move(b, "DL", p) - p = move(b, "R", p) - return b -""", lines=50) - - counter = gr.Slider(label="length", minimum=0, maximum=3000) - run_btn = gr.Button("Rerun ^") - state = gr.State() - - - examples.change(load, inputs=[examples], outputs=[im, game_desc]) - game_desc.submit(load, inputs=[game_desc], outputs=[im, game_desc]) - def run(data): - - board = eval(data[game_desc]) #games[data[examples]] - inp = data[prompt].replace("%GAME%", repr(board)) - print(inp) - q = {} - i = 0 - count = 0 - im_ = "tmp.png" - state_val = None - yield {im: im_, counter: 0, output: "", msg_box: "", state: state_val} - - for prefix in start(inp, board, data[api_key]): - ps = prefix.split("\n") - count += 1 - - if len(ps) > 3 and not ps[-2].strip().startswith("#") and prefix.endswith("\n"): - print("rendering") - try: - exec(prefix + f"\n return b\nq['board'] = my_example({repr(board)})") - except AssertionError as e: - print("fail") - yield {im: f"pic{i-1}.png", counter: count, output: prefix, msg_box: f"You made an illegal move: {e}"} - return - - except: - yield {counter: count, output: prefix, msg_box: f"Code error"} - return - draw_board(q["board"].board.grid, i).render("tmp.png", 500) - i += 1 - im_ = f"pic{i-1}.png" - yield {im: im_, counter: count, output: prefix} - else: - yield {im: im_, counter: count, output: prefix} - if q["board"].won(): - final_msg = "
    " - state_val = (data[prompt], prefix, count, data[examples]) - else: - final_msg = "Didn't make it" - animate(q["board"]) - - yield {im: "movie.gif", counter: count, output: prefix, - msg_box: final_msg, state: state_val} - - - start_prompt = start_btn.click(run, - inputs={prompt, game_desc, api_key, examples}, - outputs={im, output, counter, msg_box, state}) - cancel_btn.click(None, cancels=[start_prompt]) - def run2(data): - c = data[output] - board = eval(data[game_desc]) #games[data[examples]] - print(c) - i = 0 - q = {} - for j in range(len(c)): - - prefix = c[:j] - ps = prefix.split("\n") - if len(ps) > 3 and not ps[-2].strip().startswith("#") and prefix.endswith("\n"): - print("rendering", prefix) - exec(prefix + f"\n return b\nq['board'] = my_example({repr(board)})") - draw_board(q["board"].board.grid, i) - i += 1 - animate(q["board"]) - out = {im: f"movie.gif", msg_box: ""} - print(out) - return out - run_btn.click(run2, inputs={output, game_desc}, outputs={im, msg_box}) - - gr.HTML("""

    Leaderboard

    - -
    -To submit, first run a model that gets to the end. It will output "Victory"! -Then come down to the bottom, type in your team name, and then click submit. -The score is the number of output tokens your solution takes. -
    - -""") - with gr.Row() as row: - team_name = gr.Text(label="Team Name") - leaderboard = gr.Button(value="Submit") - refresh = gr.Button(value="Refresh") - msg = gr.Text(label="Status") - leader = gr.Dataframe(pd.read_csv(DATA_FILE)[["team", "board", "count"]].sort_values(["board", "count"])) - def leaderfn(data): - if data[state] is None: - return {msg: "Nothing to submit"} - if not data[team_name]: - return {msg: "No team name"} - prompt, code, count, board = data[state] - repo.git_pull() - with open(DATA_FILE, "a") as csvfile: - writer = csv.DictWriter(csvfile, fieldnames=["team", "prompt", "code", "count", "board"]) - writer.writerow( - {"team": data[team_name], "prompt": prompt, "code": code, "count": count, "board": board} - ) - commit_url = repo.push_to_hub() - leader_df = pd.read_csv(DATA_FILE)[["team", "board", "count"]] - leader_df = leader_df.sort_values(["board", "count"]) - return {msg: f"Success: Final score: {count} {board}", leader: leader_df} - - leaderboard.click(fn=leaderfn, inputs={state, team_name}, outputs={msg, leader}) - - def refreshfn(): - repo.git_pull() - leader_df = pd.read_csv(DATA_FILE)[["team", "board", "count"]] - leader_df = leader_df.sort_values(["board", "count"]) - return {msg: f"Refreshed", leader: leader_df} - - refresh.click(fn=refreshfn, outputs={msg, leader}) -app.queue().launch() - - -# f = io.StringIO() -# with redirect_stdout(f): -# ex = 0 -# prompt(game) -# my_prompt = f.getvalue() -# print(my_prompt) - -# # + id="LONWUsBLjOHo" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="472afd19-48c1-4924-cabd-639b5e2ad298" -# # Run an LLM and execute it as it runs. -# q = {} -# i = 0 -# for prefix in start(my_prompt): -# ps = prefix.split("\n") -# if len(ps) > 3 and not ps[-2].strip().startswith("#") and prefix.endswith("\n"): -# exec(prefix + "\n return b\nq['board'] = my_example()") -# display(draw_board(q["board"].board.grid, i)) -# i += 1 - - -# animate(i) -# display(Image("movie.gif")) - - -# # Print the number of tokens used -# print("Input Tokens:", num_tokens_from_string(my_prompt)) -# print("Output Tokens:", num_tokens_from_string(prefix)) - diff --git a/spaces/stomexserde/gpt4-ui/Examples/Gta San Andreas Free Download For Windows 10.md b/spaces/stomexserde/gpt4-ui/Examples/Gta San Andreas Free Download For Windows 10.md deleted file mode 100644 index 16dd9a3d7677a34dbe8b1d760bf86c783656d85f..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Gta San Andreas Free Download For Windows 10.md +++ /dev/null @@ -1,34 +0,0 @@ -
    -```html -

    How to Download and Play GTA San Andreas on Windows 10

    -

    GTA San Andreas is one of the most popular and influential games in the Grand Theft Auto series. It was released in 2004 for PlayStation 2, and later for PC, Xbox, Mac, and mobile devices. The game is set in the fictional state of San Andreas, which is based on California and Nevada, and follows the story of Carl "CJ" Johnson, a former gangster who returns to his hometown after his mother's death.

    -

    Gta San Andreas Free Download For Windows 10


    Download Zip https://urlgoal.com/2uI6Xk



    -

    If you want to relive the classic GTA experience on your Windows 10 PC, you have two options: you can either buy the original game from Steam or Rockstar Games Launcher, or you can download the remastered version of GTA San Andreas: The Definitive Edition, which is part of the GTA Trilogy: The Definitive Edition bundle that includes GTA III and GTA Vice City as well.

    -

    In this article, we will show you how to download and play both versions of GTA San Andreas on Windows 10.

    -

    How to Download and Play GTA San Andreas (Original) on Windows 10

    -

    The original version of GTA San Andreas is available for purchase on Steam or Rockstar Games Launcher for $14.99. To download and play it on Windows 10, follow these steps:

    -
      -
    1. Create an account on Steam or Rockstar Games Launcher if you don't have one already.
    2. -
    3. Launch Steam or Rockstar Games Launcher and log in with your account.
    4. -
    5. Search for GTA San Andreas on the store and click on "Add to Cart".
    6. -
    7. Proceed to checkout and complete your payment.
    8. -
    9. Once the purchase is confirmed, go to your library and click on "Install" next to GTA San Andreas.
    10. -
    11. Wait for the game to download and install on your PC.
    12. -
    13. Once the installation is finished, click on "Play" to launch the game.
    14. -
    -

    Note that you may need to install some additional software such as DirectX or Microsoft Visual C++ Redistributable to run the game properly. You may also need to adjust some settings such as resolution, graphics quality, and compatibility mode to optimize the game performance on Windows 10.

    -

    -

    How to Download and Play GTA San Andreas: The Definitive Edition on Windows 10

    -

    GTA San Andreas: The Definitive Edition is a remastered version of the original game that features improved graphics, lighting, textures, models, animations, and sound. It also includes some gameplay enhancements such as auto-aim, camera controls, checkpoints, and achievements. The game is part of the GTA Trilogy: The Definitive Edition bundle that also includes GTA III: The Definitive Edition and GTA Vice City: The Definitive Edition. The bundle costs $59.99 and is available exclusively on Rockstar Games Launcher. To download and play it on Windows 10, follow these steps:

    -
      -
    1. Create an account on Rockstar Games Launcher if you don't have one already.
    2. -
    3. Launch Rockstar Games Launcher and log in with your account.
    4. -
    5. Search for GTA Trilogy: The Definitive Edition on the store and click on "Buy Now".
    6. -
    7. Proceed to checkout and complete your payment.
    8. -
    9. Once the purchase is confirmed, go to your library and click on "Install" next to GTA Trilogy: The Definitive Edition.
    10. -
    11. Wait for the bundle to download and install on your PC. Note that it may take a long time as the bundle size is around 100 GB.
    12. -
    13. Once the installation is finished, click on "Play" next to GTA San Andreas: The Definitive Edition to launch the game.
    14. -
    -

    Note that you may need a high-end PC to run the game smoothly as it has higher system requirements than the original version. You may also need to update your drivers and Windows 10 to ensure compatibility. You can check the minimum and recommended system requirements for GTA San Andreas: The Definitive Edition https://urlgoal.com/2uI8RU



    -
      -
    • Logo stuck
    • -
    • Blank LCD
    • -
    • Dead display
    • -
    • Virus infection
    • -
    • FRP lock
    • -
    • Bootloop
    • -
    • System crash
    • -
    • Performance issues
    • -
    -

    The Huawei Clone V5S Flash File MT6580 7.0 Display Dead Fix Firmware is compatible with the MediaTek MT6580 processor that powers your phone. It is also tested and verified by many users who have successfully flashed their devices with this firmware.

    -

    How to Download the Huawei Clone V5S Flash File MT6580 7.0 Display Dead Fix Firmware?

    -

    To download the Huawei Clone V5S Flash File MT6580 7.0 Display Dead Fix Firmware, you need to follow these steps:

    -
      -
    1. Click on this link[^1^] to go to the download page.
    2. -
    3. Enter your email address and password to log in or create a new account if you don't have one.
    4. -
    5. Select the Huawei Clone V5S Flash File MT6580 7.0 Display Dead Fix Firmware from the list of available files.
    6. -
    7. Click on the download button and wait for the file to be downloaded on your computer.
    8. -
    9. Extract the zip file using a tool like WinRAR or 7-Zip and save the extracted files in a folder.
    10. -
    -

    How to Install the Huawei Clone V5S Flash File MT6580 7.0 Display Dead Fix Firmware?

    -

    To install the Huawei Clone V5S Flash File MT6580 7.0 Display Dead Fix Firmware, you need to follow these steps:

    -
      -
    1. Make sure that your phone has at least 50% battery charge and backup your important data before proceeding.
    2. -
    3. Download and install the MediaTek USB Driver on your computer if you don't have it already.
    4. -
    5. Download and install the SP Flash Tool on your computer if you don't have it already.
    6. -
    7. Launch the SP Flash Tool and click on the Scatter-loading button.
    8. -
    9. Browse to the folder where you extracted the firmware files and select the MT6580_Android_scatter.txt file.
    10. -
    11. Click on the Download button and connect your phone to your computer using a USB cable while holding the Volume Down or Volume Up button.
    12. -
    13. The flashing process will start automatically and you will see a green tick mark when it is completed.
    14. -
    15. Disconnect your phone from your computer and reboot it.
    16. -
    -

    Congratulations! You have successfully installed the Huawei Clone V5S Flash File MT6580 7.0 Display Dead Fix Firmware on your phone. You can now enjoy a smooth and stable performance on your device.

    e93f5a0c3f
    -
    -
    \ No newline at end of file diff --git a/spaces/sub314xxl/MetaGPT/metagpt/roles/qa_engineer.py b/spaces/sub314xxl/MetaGPT/metagpt/roles/qa_engineer.py deleted file mode 100644 index 491f5f997ccac2042217df1384274bd5d42381af..0000000000000000000000000000000000000000 --- a/spaces/sub314xxl/MetaGPT/metagpt/roles/qa_engineer.py +++ /dev/null @@ -1,179 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -""" -@Time : 2023/5/11 14:43 -@Author : alexanderwu -@File : qa_engineer.py -""" -import os -from pathlib import Path - -from metagpt.actions import DebugError, RunCode, WriteCode, WriteDesign, WriteTest -from metagpt.config import CONFIG -from metagpt.logs import logger -from metagpt.roles import Role -from metagpt.schema import Message -from metagpt.utils.common import CodeParser, parse_recipient -from metagpt.utils.special_tokens import FILENAME_CODE_SEP, MSG_SEP - - -class QaEngineer(Role): - def __init__( - self, - name="Edward", - profile="QaEngineer", - goal="Write comprehensive and robust tests to ensure codes will work as expected without bugs", - constraints="The test code you write should conform to code standard like PEP8, be modular, easy to read and maintain", - test_round_allowed=5, - ): - super().__init__(name, profile, goal, constraints) - self._init_actions( - [WriteTest] - ) # FIXME: a bit hack here, only init one action to circumvent _think() logic, will overwrite _think() in future updates - self._watch([WriteCode, WriteTest, RunCode, DebugError]) - self.test_round = 0 - self.test_round_allowed = test_round_allowed - - @classmethod - def parse_workspace(cls, system_design_msg: Message) -> str: - if not system_design_msg.instruct_content: - return system_design_msg.instruct_content.dict().get("Python package name") - return CodeParser.parse_str(block="Python package name", text=system_design_msg.content) - - def get_workspace(self, return_proj_dir=True) -> Path: - msg = self._rc.memory.get_by_action(WriteDesign)[-1] - if not msg: - return CONFIG.workspace / "src" - workspace = self.parse_workspace(msg) - # project directory: workspace/{package_name}, which contains package source code folder, tests folder, resources folder, etc. - if return_proj_dir: - return CONFIG.workspace / workspace - # development codes directory: workspace/{package_name}/{package_name} - return CONFIG.workspace / workspace / workspace - - def write_file(self, filename: str, code: str): - workspace = self.get_workspace() / "tests" - file = workspace / filename - file.parent.mkdir(parents=True, exist_ok=True) - file.write_text(code) - - async def _write_test(self, message: Message) -> None: - code_msgs = message.content.split(MSG_SEP) - # result_msg_all = [] - for code_msg in code_msgs: - # write tests - file_name, file_path = code_msg.split(FILENAME_CODE_SEP) - code_to_test = open(file_path, "r").read() - if "test" in file_name: - continue # Engineer might write some test files, skip testing a test file - test_file_name = "test_" + file_name - test_file_path = self.get_workspace() / "tests" / test_file_name - logger.info(f"Writing {test_file_name}..") - test_code = await WriteTest().run( - code_to_test=code_to_test, - test_file_name=test_file_name, - # source_file_name=file_name, - source_file_path=file_path, - workspace=self.get_workspace(), - ) - self.write_file(test_file_name, test_code) - - # prepare context for run tests in next round - command = ["python", f"tests/{test_file_name}"] - file_info = { - "file_name": file_name, - "file_path": str(file_path), - "test_file_name": test_file_name, - "test_file_path": str(test_file_path), - "command": command, - } - msg = Message( - content=str(file_info), - role=self.profile, - cause_by=WriteTest, - sent_from=self.profile, - send_to=self.profile, - ) - self._publish_message(msg) - - logger.info(f"Done {self.get_workspace()}/tests generating.") - - async def _run_code(self, msg): - file_info = eval(msg.content) - development_file_path = file_info["file_path"] - test_file_path = file_info["test_file_path"] - if not os.path.exists(development_file_path) or not os.path.exists(test_file_path): - return - - development_code = open(development_file_path, "r").read() - test_code = open(test_file_path, "r").read() - proj_dir = self.get_workspace() - development_code_dir = self.get_workspace(return_proj_dir=False) - - result_msg = await RunCode().run( - mode="script", - code=development_code, - code_file_name=file_info["file_name"], - test_code=test_code, - test_file_name=file_info["test_file_name"], - command=file_info["command"], - working_directory=proj_dir, # workspace/package_name, will run tests/test_xxx.py here - additional_python_paths=[development_code_dir], # workspace/package_name/package_name, - # import statement inside package code needs this - ) - - recipient = parse_recipient(result_msg) # the recipient might be Engineer or myself - content = str(file_info) + FILENAME_CODE_SEP + result_msg - msg = Message(content=content, role=self.profile, cause_by=RunCode, sent_from=self.profile, send_to=recipient) - self._publish_message(msg) - - async def _debug_error(self, msg): - file_info, context = msg.content.split(FILENAME_CODE_SEP) - file_name, code = await DebugError().run(context) - if file_name: - self.write_file(file_name, code) - recipient = msg.sent_from # send back to the one who ran the code for another run, might be one's self - msg = Message( - content=file_info, role=self.profile, cause_by=DebugError, sent_from=self.profile, send_to=recipient - ) - self._publish_message(msg) - - async def _observe(self) -> int: - await super()._observe() - self._rc.news = [ - msg for msg in self._rc.news if msg.send_to == self.profile - ] # only relevant msgs count as observed news - return len(self._rc.news) - - async def _act(self) -> Message: - if self.test_round > self.test_round_allowed: - result_msg = Message( - content=f"Exceeding {self.test_round_allowed} rounds of tests, skip (writing code counts as a round, too)", - role=self.profile, - cause_by=WriteTest, - sent_from=self.profile, - send_to="", - ) - return result_msg - - for msg in self._rc.news: - # Decide what to do based on observed msg type, currently defined by human, - # might potentially be moved to _think, that is, let the agent decides for itself - if msg.cause_by == WriteCode: - # engineer wrote a code, time to write a test for it - await self._write_test(msg) - elif msg.cause_by in [WriteTest, DebugError]: - # I wrote or debugged my test code, time to run it - await self._run_code(msg) - elif msg.cause_by == RunCode: - # I ran my test code, time to fix bugs, if any - await self._debug_error(msg) - self.test_round += 1 - result_msg = Message( - content=f"Round {self.test_round} of tests done", - role=self.profile, - cause_by=WriteTest, - sent_from=self.profile, - send_to="", - ) - return result_msg diff --git a/spaces/sub314xxl/MetaGPT/tests/metagpt/roles/test_project_manager.py b/spaces/sub314xxl/MetaGPT/tests/metagpt/roles/test_project_manager.py deleted file mode 100644 index ebda5901da4163429f0f446e6b00d37571e34c49..0000000000000000000000000000000000000000 --- a/spaces/sub314xxl/MetaGPT/tests/metagpt/roles/test_project_manager.py +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -""" -@Time : 2023/5/12 10:23 -@Author : alexanderwu -@File : test_project_manager.py -""" -import pytest - -from metagpt.logs import logger -from metagpt.roles import ProjectManager -from tests.metagpt.roles.mock import MockMessages - - -@pytest.mark.asyncio -async def test_project_manager(): - project_manager = ProjectManager() - rsp = await project_manager.handle(MockMessages.system_design) - logger.info(rsp) diff --git a/spaces/sukiru/BlueArchiveTTS/text/japanese.py b/spaces/sukiru/BlueArchiveTTS/text/japanese.py deleted file mode 100644 index 375e4d50872d5c68ee57ca17470a2ca425425eba..0000000000000000000000000000000000000000 --- a/spaces/sukiru/BlueArchiveTTS/text/japanese.py +++ /dev/null @@ -1,153 +0,0 @@ -import re -from unidecode import unidecode -import pyopenjtalk - - -# Regular expression matching Japanese without punctuation marks: -_japanese_characters = re.compile( - r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# Regular expression matching non-Japanese characters or punctuation marks: -_japanese_marks = re.compile( - r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# List of (symbol, Japanese) pairs for marks: -_symbols_to_japanese = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('%', 'パーセント') -]] - -# List of (romaji, ipa) pairs for marks: -_romaji_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ts', 'ʦ'), - ('u', 'ɯ'), - ('j', 'ʥ'), - ('y', 'j'), - ('ni', 'n^i'), - ('nj', 'n^'), - ('hi', 'çi'), - ('hj', 'ç'), - ('f', 'ɸ'), - ('I', 'i*'), - ('U', 'ɯ*'), - ('r', 'ɾ') -]] - -# List of (romaji, ipa2) pairs for marks: -_romaji_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('u', 'ɯ'), - ('ʧ', 'tʃ'), - ('j', 'dʑ'), - ('y', 'j'), - ('ni', 'n^i'), - ('nj', 'n^'), - ('hi', 'çi'), - ('hj', 'ç'), - ('f', 'ɸ'), - ('I', 'i*'), - ('U', 'ɯ*'), - ('r', 'ɾ') -]] - -# List of (consonant, sokuon) pairs: -_real_sokuon = [(re.compile('%s' % x[0]), x[1]) for x in [ - (r'Q([↑↓]*[kg])', r'k#\1'), - (r'Q([↑↓]*[tdjʧ])', r't#\1'), - (r'Q([↑↓]*[sʃ])', r's\1'), - (r'Q([↑↓]*[pb])', r'p#\1') -]] - -# List of (consonant, hatsuon) pairs: -_real_hatsuon = [(re.compile('%s' % x[0]), x[1]) for x in [ - (r'N([↑↓]*[pbm])', r'm\1'), - (r'N([↑↓]*[ʧʥj])', r'n^\1'), - (r'N([↑↓]*[tdn])', r'n\1'), - (r'N([↑↓]*[kg])', r'ŋ\1') -]] - - -def symbols_to_japanese(text): - for regex, replacement in _symbols_to_japanese: - text = re.sub(regex, replacement, text) - return text - - -def japanese_to_romaji_with_accent(text): - '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html''' - text = symbols_to_japanese(text) - sentences = re.split(_japanese_marks, text) - marks = re.findall(_japanese_marks, text) - text = '' - for i, sentence in enumerate(sentences): - if re.match(_japanese_characters, sentence): - if text != '': - text += ' ' - labels = pyopenjtalk.extract_fullcontext(sentence) - for n, label in enumerate(labels): - phoneme = re.search(r'\-([^\+]*)\+', label).group(1) - if phoneme not in ['sil', 'pau']: - text += phoneme.replace('ch', 'ʧ').replace('sh', - 'ʃ').replace('cl', 'Q') - else: - continue - # n_moras = int(re.search(r'/F:(\d+)_', label).group(1)) - a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1)) - a2 = int(re.search(r"\+(\d+)\+", label).group(1)) - a3 = int(re.search(r"\+(\d+)/", label).group(1)) - if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil', 'pau']: - a2_next = -1 - else: - a2_next = int( - re.search(r"\+(\d+)\+", labels[n + 1]).group(1)) - # Accent phrase boundary - if a3 == 1 and a2_next == 1: - text += ' ' - # Falling - elif a1 == 0 and a2_next == a2 + 1: - text += '↓' - # Rising - elif a2 == 1 and a2_next == 2: - text += '↑' - if i < len(marks): - text += unidecode(marks[i]).replace(' ', '') - return text - - -def get_real_sokuon(text): - for regex, replacement in _real_sokuon: - text = re.sub(regex, replacement, text) - return text - - -def get_real_hatsuon(text): - for regex, replacement in _real_hatsuon: - text = re.sub(regex, replacement, text) - return text - - -def japanese_to_ipa(text): - text = japanese_to_romaji_with_accent(text).replace('...', '…') - text = re.sub( - r'([aiueo])\1+', lambda x: x.group(0)[0]+'ː'*(len(x.group(0))-1), text) - text = get_real_sokuon(text) - text = get_real_hatsuon(text) - for regex, replacement in _romaji_to_ipa: - text = re.sub(regex, replacement, text) - return text - - -def japanese_to_ipa2(text): - text = japanese_to_romaji_with_accent(text).replace('...', '…') - text = get_real_sokuon(text) - text = get_real_hatsuon(text) - for regex, replacement in _romaji_to_ipa2: - text = re.sub(regex, replacement, text) - return text - - -def japanese_to_ipa3(text): - text = japanese_to_ipa2(text).replace('n^', 'ȵ').replace( - 'ʃ', 'ɕ').replace('*', '\u0325').replace('#', '\u031a') - text = re.sub( - r'([aiɯeo])\1+', lambda x: x.group(0)[0]+'ː'*(len(x.group(0))-1), text) - text = re.sub(r'((?:^|\s)(?:ts|tɕ|[kpt]))', r'\1ʰ', text) - return text diff --git a/spaces/supertori/files/stable-diffusion-webui/modules/mac_specific.py b/spaces/supertori/files/stable-diffusion-webui/modules/mac_specific.py deleted file mode 100644 index ddcea53b920d63a6a0b3a00dd3c54b36201ff761..0000000000000000000000000000000000000000 --- a/spaces/supertori/files/stable-diffusion-webui/modules/mac_specific.py +++ /dev/null @@ -1,53 +0,0 @@ -import torch -from modules import paths -from modules.sd_hijack_utils import CondFunc -from packaging import version - - -# has_mps is only available in nightly pytorch (for now) and macOS 12.3+. -# check `getattr` and try it for compatibility -def check_for_mps() -> bool: - if not getattr(torch, 'has_mps', False): - return False - try: - torch.zeros(1).to(torch.device("mps")) - return True - except Exception: - return False -has_mps = check_for_mps() - - -# MPS workaround for https://github.com/pytorch/pytorch/issues/89784 -def cumsum_fix(input, cumsum_func, *args, **kwargs): - if input.device.type == 'mps': - output_dtype = kwargs.get('dtype', input.dtype) - if output_dtype == torch.int64: - return cumsum_func(input.cpu(), *args, **kwargs).to(input.device) - elif cumsum_needs_bool_fix and output_dtype == torch.bool or cumsum_needs_int_fix and (output_dtype == torch.int8 or output_dtype == torch.int16): - return cumsum_func(input.to(torch.int32), *args, **kwargs).to(torch.int64) - return cumsum_func(input, *args, **kwargs) - - -if has_mps: - # MPS fix for randn in torchsde - CondFunc('torchsde._brownian.brownian_interval._randn', lambda _, size, dtype, device, seed: torch.randn(size, dtype=dtype, device=torch.device("cpu"), generator=torch.Generator(torch.device("cpu")).manual_seed(int(seed))).to(device), lambda _, size, dtype, device, seed: device.type == 'mps') - - if version.parse(torch.__version__) < version.parse("1.13"): - # PyTorch 1.13 doesn't need these fixes but unfortunately is slower and has regressions that prevent training from working - - # MPS workaround for https://github.com/pytorch/pytorch/issues/79383 - CondFunc('torch.Tensor.to', lambda orig_func, self, *args, **kwargs: orig_func(self.contiguous(), *args, **kwargs), - lambda _, self, *args, **kwargs: self.device.type != 'mps' and (args and isinstance(args[0], torch.device) and args[0].type == 'mps' or isinstance(kwargs.get('device'), torch.device) and kwargs['device'].type == 'mps')) - # MPS workaround for https://github.com/pytorch/pytorch/issues/80800 - CondFunc('torch.nn.functional.layer_norm', lambda orig_func, *args, **kwargs: orig_func(*([args[0].contiguous()] + list(args[1:])), **kwargs), - lambda _, *args, **kwargs: args and isinstance(args[0], torch.Tensor) and args[0].device.type == 'mps') - # MPS workaround for https://github.com/pytorch/pytorch/issues/90532 - CondFunc('torch.Tensor.numpy', lambda orig_func, self, *args, **kwargs: orig_func(self.detach(), *args, **kwargs), lambda _, self, *args, **kwargs: self.requires_grad) - elif version.parse(torch.__version__) > version.parse("1.13.1"): - cumsum_needs_int_fix = not torch.Tensor([1,2]).to(torch.device("mps")).equal(torch.ShortTensor([1,1]).to(torch.device("mps")).cumsum(0)) - cumsum_needs_bool_fix = not torch.BoolTensor([True,True]).to(device=torch.device("mps"), dtype=torch.int64).equal(torch.BoolTensor([True,False]).to(torch.device("mps")).cumsum(0)) - cumsum_fix_func = lambda orig_func, input, *args, **kwargs: cumsum_fix(input, orig_func, *args, **kwargs) - CondFunc('torch.cumsum', cumsum_fix_func, None) - CondFunc('torch.Tensor.cumsum', cumsum_fix_func, None) - CondFunc('torch.narrow', lambda orig_func, *args, **kwargs: orig_func(*args, **kwargs).clone(), None) - diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Aid4mail Portable [EXCLUSIVE].md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Aid4mail Portable [EXCLUSIVE].md deleted file mode 100644 index 6c7be579875632155c84f7764d4707b994de526c..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Aid4mail Portable [EXCLUSIVE].md +++ /dev/null @@ -1,73 +0,0 @@ - -

    What is Aid4Mail Portable and Why You Need It

    -

    If you are looking for a fast, reliable and accurate email processing software, you should consider Aid4Mail Portable. This is a powerful tool that can help you recover, collect, search and convert email data from various sources and formats. Whether you need to migrate email accounts, perform e-discovery and forensics, or prepare mail for archiving and analysis, Aid4Mail Portable can handle it all.

    -

    Aid4mail Portable


    Download Zip ··· https://cinurl.com/2uEZ7H



    -

    Aid4Mail Portable Features and Benefits

    -

    Aid4Mail Portable comes in three editions: Converter, Investigator and Enterprise. Each edition has its own set of features and benefits, depending on your needs and preferences.

    -
      -
    • Aid4Mail Converter: This edition is ideal for downloading and converting email data to different formats, such as PST, MSG, mbox, EML, CSV, HTML, PDF and more. You can use it to process unlimited mail stores, whether in-house or from external sources. It supports most popular formats and service providers, such as Outlook, Gmail, IMAP and more. It also offers secure access to Gmail, Office 365 and Outlook.com through OAuth 2. It is very fast and accurate, with incremental processing and error recovery protocol.
    • -
    • Aid4Mail Investigator: This edition adds powerful email filtering and Python scripting capabilities to the Converter edition. You can use it to perform data culling with Aid4Mail's search engine and native pre-acquisition filtering. You can also use Gmail and Microsoft 365 syntax to create complex search queries. Moreover, you can recover deleted and hidden email data, and process corrupt or unknown mail formats with forensic features.
    • -
    • Aid4Mail Enterprise: This edition includes all the features of the Investigator edition, plus some extra ones. You can extend your capabilities with support for Google Vault, Mimecast and Proofpoint exports. You can also migrate mail to live accounts using IMAP or native APIs. You can integrate the CLI with your tools and manage multi-user licenses hosted on your server or on a flash drive for work in the field.
    • -
    -

    How to Use Aid4Mail Portable

    -

    Aid4Mail Portable is easy to use and has a simple and intuitive user interface. You can run it from a USB drive or install it on a server. To use it, you just need to follow these steps:

    -
      -
    1. Select your source mail data. You can choose from local files, folders or profiles, or connect to online accounts.
    2. -
    3. Select your target mail format or service provider. You can choose from a wide range of options, depending on your purpose.
    4. -
    5. Optionally, apply filters or scripts to refine your email data. You can use keywords, dates, attachments, headers and more.
    6. -
    7. Start the conversion process and wait for it to finish. You can monitor the progress information, conversion statistics and logs.
    8. -
    9. Access your converted mail data in the target location or account.
    10. -
    -

    Conclusion

    -

    Aid4Mail Portable is a versatile and powerful email processing software that can help you with various tasks related to email data. It is fast, reliable and accurate, with support for all popular formats and service providers. It also offers advanced features for email filtering, scripting, forensics and migration. If you want to try Aid4Mail Portable for yourself, you can download a free trial version from their website.

    -

    -

    Who Can Benefit from Aid4Mail Portable

    -

    Aid4Mail Portable is designed for anyone who needs to work with email data in various scenarios. Whether you are a home user, a business owner, a legal professional, or a forensic investigator, you can find a use for Aid4Mail Portable. Here are some examples of how Aid4Mail Portable can help you:

    -
      -
    • If you want to switch to a new email program or service provider, you can use Aid4Mail Portable to migrate your email data without losing any information or functionality.
    • -
    • If you need to archive your email data for compliance or backup purposes, you can use Aid4Mail Portable to convert your email data to compact and non-proprietary files that are easy to store and access.
    • -
    • If you need to analyze your email data for business intelligence or legal discovery, you can use Aid4Mail Portable to search and filter your email data using advanced criteria and export them to formats that are compatible with your tools.
    • -
    • If you need to recover your email data from damaged or deleted sources, you can use Aid4Mail Portable to extract email data from corrupt or unknown mail formats and restore them to readable formats.
    • -
    -

    How to Get Aid4Mail Portable

    -

    If you are interested in trying Aid4Mail Portable for yourself, you can download a free trial version from their website. The trial version allows you to process up to 50 messages per folder and has some limitations on the target formats and features. You can also purchase a license for the edition that suits your needs and preferences. The license is valid for one year and includes free updates and support. You can also renew your license at a discounted price after one year.

    -

    Conclusion

    -

    Aid4Mail Portable is a versatile and powerful email processing software that can help you with various tasks related to email data. It is fast, reliable and accurate, with support for all popular formats and service providers. It also offers advanced features for email filtering, scripting, forensics and migration. If you want to try Aid4Mail Portable for yourself, you can download a free trial version from their website.

    -

    How Aid4Mail Portable Compares to Other Email Processing Software

    -

    There are many email processing software available on the market, but not all of them can match the quality and performance of Aid4Mail Portable. Here are some reasons why Aid4Mail Portable stands out from the rest:

    -
      -
    • Aid4Mail Portable is faster and more accurate than most email processing software. It can process thousands of emails per minute without losing or compromising any data.
    • -
    • Aid4Mail Portable supports more formats and service providers than most email processing software. It can handle over 40 mail formats and 20 service providers, including popular ones like Outlook, Gmail, Office 365, IMAP and more.
    • -
    • Aid4Mail Portable offers more features and capabilities than most email processing software. It can perform email filtering, scripting, forensics, migration and more, with advanced options and customization.
    • -
    • Aid4Mail Portable is easier to use and more reliable than most email processing software. It has a simple and intuitive user interface, a robust IMAP client, a remote authentication tool, and a detailed log and report system.
    • -
    • Aid4Mail Portable is developed by perfectionists in Switzerland who have over 20 years of experience in email processing. They constantly update and improve Aid4Mail Portable to meet the highest standards of quality and customer satisfaction.
    • -
    -

    How to Contact Aid4Mail Portable

    -

    If you have any questions or feedback about Aid4Mail Portable, you can contact them by email or phone. You can also visit their website for more information and resources. Here are their contact details:

    -

    Email: support@aid4mail.com

    -

    Phone: +41 21 800 80 80

    -

    Website: https://www.aid4mail.com/

    -

    Conclusion

    -

    Aid4Mail Portable is a versatile and powerful email processing software that can help you with various tasks related to email data. It is fast, reliable and accurate, with support for all popular formats and service providers. It also offers advanced features for email filtering, scripting, forensics and migration. If you want to try Aid4Mail Portable for yourself, you can download a free trial version from their website.

    -

    How to Optimize Your Email Data with Aid4Mail Portable

    -

    Aid4Mail Portable not only helps you to convert and migrate your email data, but also to optimize it for better performance and usability. Here are some tips on how to optimize your email data with Aid4Mail Portable:

    -
      -
    • Remove duplicates and unwanted emails. Aid4Mail Portable can automatically detect and remove duplicate messages from your email data, saving you space and time. You can also use filters and scripts to exclude emails that are irrelevant or unnecessary for your purpose.
    • -
    • Extract attachments and embedded files. Aid4Mail Portable can extract attachments and embedded files from your email data and save them in separate folders. This can reduce the size of your email files and make them easier to manage and access.
    • -
    • Compress and encrypt your email data. Aid4Mail Portable can compress your email data to reduce its size and improve its portability. You can also encrypt your email data with a password to protect it from unauthorized access or tampering.
    • -
    • Index and search your email data. Aid4Mail Portable can create an index of your email data that allows you to search it quickly and efficiently. You can use keywords, dates, attachments, headers and more to find the emails you need.
    • -
    • Analyze and report on your email data. Aid4Mail Portable can generate statistics and reports on your email data, such as the number of messages, senders, recipients, domains, attachments, sizes and more. You can use this information to gain insights into your email activity and behavior.
    • -
    -

    How to Troubleshoot Common Issues with Aid4Mail Portable

    -

    Aid4Mail Portable is designed to be reliable and error-free, but sometimes you may encounter some issues or difficulties when using it. Here are some common issues and how to troubleshoot them:

    -
      -
    • The conversion process is slow or fails. This may be due to insufficient disk space, memory or network bandwidth. Make sure you have enough resources available for the conversion process. You can also try to reduce the number of concurrent conversions or split large mail stores into smaller ones.
    • -
    • The source or target mail format is not supported. Aid4Mail Portable supports over 40 mail formats and 20 service providers, but it may not support some rare or obsolete formats. Check the list of supported formats on their website or contact them for assistance.
    • -
    • The source or target mail account is not accessible. This may be due to incorrect login credentials, firewall settings, network issues or server problems. Make sure you have entered the correct login credentials, allowed access through your firewall, checked your network connection and verified the server status.
    • -
    • The converted mail data is incomplete or corrupted. This may be due to errors in the source mail data, such as missing headers, invalid characters, encryption or compression. Aid4Mail Portable can recover most of these errors, but some may be irreparable. You can try to repair the source mail data with another tool or contact the original sender for a copy.
    • -
    • The converted mail data is not compatible with the target program or service provider. This may be due to differences in the mail formats or features supported by the target program or service provider. For example, some programs or service providers may have limits on the message size, attachment size, folder depth or character encoding. You can try to adjust these settings in Aid4Mail Portable or in the target program or service provider.
    • -
    -

    Conclusion

    -

    Aid4Mail Portable is a versatile and powerful email processing software that can help you with various tasks related to email data. It is fast, reliable and accurate, with support for all popular formats and service providers. It also offers advanced features for email filtering, scripting, forensics and migration. You can use it to convert, migrate, archive, analyze and optimize your email data with ease and efficiency. If you want to try Aid4Mail Portable for yourself, you can download a free trial version from their website or purchase a license for the edition that suits your needs and preferences.

    3cee63e6c2
    -
    -
    \ No newline at end of file diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/ExpressVPNv309keygen.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/ExpressVPNv309keygen.md deleted file mode 100644 index 26ce7a6b476a027c613a03cbd2332578888517d0..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/ExpressVPNv309keygen.md +++ /dev/null @@ -1,8 +0,0 @@ -

    expressVPNv309keygen


    Download Zip 🌟 https://cinurl.com/2uEZ7y



    - -PDF The Estill Voice Model Theory And Translation Download: ( PDF, 16.2 MB) -The Estill Voice model: A Comprehensive Study Of Theoretical Approaches To The Estill Theory Of The Language Of The Human Mind This book provides a comprehensive review of the estill voice model and empowers readers to apply this model in translation and linguistic research. -It also provides examples of translational research using estill voice and the estill model and contains practical recommendations for researchers working with an estill voice in translation. 8a78ff9644
    -
    -
    -

    diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Full Download Edius Pro 6.5 Cracked Version Key And.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Full Download Edius Pro 6.5 Cracked Version Key And.md deleted file mode 100644 index 505376ada9c373e8e1e23813fb8d1db9228d77a6..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Full Download Edius Pro 6.5 Cracked Version Key And.md +++ /dev/null @@ -1,7 +0,0 @@ -

    Full Download Edius Pro 6.5 Cracked Version Key And


    Download Zip » https://cinurl.com/2uEX25



    - -EDIUS Pro 9. Software for non-linear editing. More creative options and real-time editing without rendering in all popular SD, HD and . 3D. -NLE has introduced a new version of its EDIUS Pro software. This non-linear editing product offers its users advanced editing capabilities and allows you to work with video images of various formats. The program combines features such as converting video to .3D, image enhancement, and creating transition effects. EDIUS Pro also features 8a78ff9644
    -
    -
    -

    diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/High Quality Download Rakht Charitra I 3 In Hindi 720p.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/High Quality Download Rakht Charitra I 3 In Hindi 720p.md deleted file mode 100644 index 2953bc56dfd268d4f45aa98f22cb210b7d3534b5..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/High Quality Download Rakht Charitra I 3 In Hindi 720p.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Download Rakht Charitra I 3 In Hindi 720p


    Download Zip ✑ ✑ ✑ https://cinurl.com/2uEYOj



    - -Rakht Charitra 2 dubbed in Hindi. 101 views • 11 jun. 2020 • Suriya actor, bibek aboroi … Show more. Show more. Music in this video. #Rakta Charitra - I is an upcoming biopic directed by Ram Gopal Varma. Movie stars Vivek Oberoi. Directed by Aditya Narayan. The film tells the life story of musician Rakhi Shartri. The film is set to release on June 27, 2020. Cast: Vivek Oberoi, Biji Bhallal, Anushka Sharma, Javed Akhtar, Asif Ali, Divya Datta, Kabir Bedi, Nasser, Ritesh Deshmukh and others. #RakhtCharitra #RakhtCharitra #AdityaNarayan #AdityaNarayan #VivekOberoi #VivekOberoi #DivyaDatta #DivyaDatta #BijiBhallal #BijiBhallal #AusifAjat #KabirBedi #Nasser #RiteshDeshmukh #AnushkaSharma #Vee 8a78ff9644
    -
    -
    -

    diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Le Bras De La Vengeance FRENCH DVDRIP !!HOT!!.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Le Bras De La Vengeance FRENCH DVDRIP !!HOT!!.md deleted file mode 100644 index 1e74cf71764f4f11422bd142695e34a8fba0bcc7..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Le Bras De La Vengeance FRENCH DVDRIP !!HOT!!.md +++ /dev/null @@ -1,8 +0,0 @@ -

    Le Bras de la Vengeance FRENCH DVDRIP


    Download ➡ https://cinurl.com/2uEYKN



    -
    -Brazilian movie. Raw. Torrent. Le Bras de la Vengeance. Le Bras de la Vengeance is a Brazilian comedy drama based on a stage production, which has been shot on location and contains an English language soundtrack. Director: Jesse Buchler. Production Company: Lionsgate. Language: French. Genre: Drama. Le Bras de la Vengeance is a Brazilian comedy drama based on a stage production, which has been shot on location and contains an English language soundtrack. Director: Jesse Buchler. Production Company: Lionsgate. Language: French. Genre: Drama. Brazilian movie. - -Some Other Title: Le Bras de la Vengeance. The YouTube Video is the property of Le Bras De La Vengeance and is. Le Bras De La Vengeance. Directed by Jesse Buchler. With Sean Patrick Flannery, Nathalia Avelar,. Le Bras de la Vengeance. Le Bras de la Vengeance (2005) english subtitle Download Le Bras de la Vengeance Torrent. Le Bras De La Vengeance BR. Watch Le Bras De La Vengeance Full Movie Online Free In HD Quality.. Le Bras de la Vengeance BR. Le Bras de la Vengeance. Director: Jesse Buchler. With Nathalia Avelar, Sean Patrick Flannery. English. Directed by Jesse Buchler. With Nathalia Avelar, Sean Patrick Flannery.. Le Bras de la Vengeance BR. Watch Le Bras De La Vengeance Full Movie Online Free In HD Quality. Le Bras De La Vengeance (2005) BR Le Bras de la Vengeance (2005). The YouTube Video is the property of Le Bras De La Vengeance and is. Le Bras De La Vengeance. Directed by Jesse Buchler. With Sean Patrick Flannery, Nathalia Avelar,. Le Bras De La Vengeance. Le Bras de la Vengeance is a Brazilian comedy drama based on a stage production, which has been shot on location and contains an English language soundtrack. Director: Jesse Buchler. Production Company: Lionsgate. Language: French. Genre: Drama. The YouTube Video is the property of Le Bras De La Vengeance and is. Le Bras De La Vengeance BR. Watch Le Bras De La Vengeance Full Movie Online Free In HD Quality. Le Bras De La Vengeance (2005) BR Le Bras De La Vengeance (2005). The YouTube Video is the property of Le Bras De La Vengeance and is. Le Bras De La Vengeance. Directed by Jesse Buchler. With Sean Patrick Flannery, Nathalia Avelar,. Le Bras De La Vengeance. Le Bras de la Vengeance is a 4fefd39f24
    -
    -
    -

    diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Naruto Movie 6 Road To Ninja Download __FULL__ Torrent English Sub.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Naruto Movie 6 Road To Ninja Download __FULL__ Torrent English Sub.md deleted file mode 100644 index fe3a0b52fcd85fcad3b753add1d4dd8cccbf9eb8..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Naruto Movie 6 Road To Ninja Download __FULL__ Torrent English Sub.md +++ /dev/null @@ -1,7 +0,0 @@ -
    -

    the trailer of the film was first revealed on april 2, 2012. the preview was posted online on the official website of studio pierrot. on april 4, 2012, the film was first shown in japan. it was then shown a few days later in united states, canada, and europe on april 18, 2012.

    -

    naruto movie 6 road to ninja download torrent english sub


    Download File ☆☆☆ https://cinurl.com/2uEXBP



    -

    naruto movie 6: road to ninja''' is the sequel to the film naruto shippuden the movie: bonds. the new film has a final goal and tone different than the previous film, in which naruto's memories are erased by kakashi. the movie starts after naruto defeats his father and leaves konoha to become a ninja, and follows his pilgrimage to become the next raikage. while the story is similar to the previous film, the movie's goal is to depict naruto as a dangerous and mad ninja, rather than a courageous sage. naruto's growth in this film is regarded as the slowest in the series.

    -

    the anime revolves around a ninja village formed by a group of genetically.. naruto shippuden movie naruto special episodes hd english 720p second. naruto shippuden the movie: blood prison (2010) 1080p blu-ray remux avc dts-hd ma 5.1 aya.mkv. 2.06 gb. ed. blood prison 2 heisei era naruto shippuden movie 6 (2010) 1080p. [alzoo] codec, movie.gab, mp3, flac, wav. naruto shippuden - naruto shippuden movie 6: naruto the movie part 6 english. 6.3-inch screen, with an optimal resolution of 1280 x 720 and a pixel density of. bollywood in hindi torrent [720p/1080p/4k]. bollywood movies. download, watch, stream bollywood movies in hindi subtitles. ddl torrent [720p/1080p/4k]. download naruto shippuden [720p/1080p/4k]. naruto shippuden [720p/1080p/4k]. naruto shippuden [dvd-rii. bollywood movies by vijay acharaya. full hindi movies in 720p or 1080p for free. free animes for download naruto shippuden movie 7 the last 1080p blu-ray. movies torrents. naruto shippuden the movie blood prison 720p. 720p (390 mb) | download. movie 480p (390 mb): ddl torrent. movie 720p + 1080p (2 gib): batch torrent. image may. naruto shippuden: the lost tower 720p dcam english sub .-macbook pro-arabic,.mkv (star wars the clone wars season 2 episode 18 and episode 15, download or watch in. download naruto shippuden movie 5 - blood prison eng subbed animekuro download 1080p 720p & 240p in small size from anime. naruto shippuden movie 6: naruto the movie part 6.1080p web-dl x264: 720p [ed. pack] (episode summary: trailer). full movie 1080p: 720p [ed. pack] (the girl with the. download naruto shippuden movie 6 the lost tower english subbed. naruto shippuden the movie blood prison 720p eng subbed edition. naruto shippuden. the movie s story is set in the year before the anime which covers the naruto s journey as he becomes the. watch naruto shippuden the movie 4:the lost tower (2010) online free in. naruto movie blood prison 720p torrent the complete naruto the movies list. the japanese anime series naruto,. fd7225991cd iron man 3 full movie in hindi download 720p [chapitre] français. suivre. jack the ripper hd full movie. bollywood movies [hd/720p] [publish] [easy. free. bollywood movie [hd/720p] [easy. direct downloader] - manish. bollywood movies - 48345 downloads as hd and. the movie revolves around a young naruto, born on a day of. p-people the movie, not that annoying. [9-th december 2005] - tags: naruto shippuden movie 6: naruto the movie part 6 english. naruto.

    899543212b
    -
    -
    \ No newline at end of file diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Support Driver Huawei E173 Zip.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Support Driver Huawei E173 Zip.md deleted file mode 100644 index 1dd4ae2191d084cda0c996e69ddd3553ce517503..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Support Driver Huawei E173 Zip.md +++ /dev/null @@ -1,25 +0,0 @@ -
    -

    How to Download and Install Huawei E173 Driver

    -

    Huawei E173 is a USB modem that supports high-speed mobile broadband. It can be used to connect your laptop or desktop to the internet using a SIM card. However, you may need to install the driver for Huawei E173 before you can use it properly. Here are some steps to help you download and install Huawei E173 driver.

    -

    support driver huawei e173 zip


    DOWNLOAD ✒ https://cinurl.com/2uEZgQ



    -
      -
    1. Visit the official Huawei website and go to the Driver List page[^1^] [^2^]. You can also use the search box to find your product model.
    2. -
    3. Enter your device serial number or select Huawei E173 from the drop-down menu. You will see a list of available drivers for your device.
    4. -
    5. Click on the download icon next to the driver you want to download. You will be redirected to a page where you can agree to the terms and conditions and start the download.
    6. -
    7. Save the driver file (usually a zip file) to your computer. You may need to extract it using a tool like WinZip or WinRAR.
    8. -
    9. Open the extracted folder and double-click on the setup.exe file. Follow the on-screen instructions to install the driver.
    10. -
    11. Restart your computer and plug in your Huawei E173 modem. It should be recognized by your system and ready to use.
    12. -
    -

    If you encounter any problems with downloading or installing Huawei E173 driver, you can contact Huawei customer service for assistance[^1^] [^2^]. You can also check out this forum post[^3^] for some alternative methods and tips.

    Huawei E173 driver is compatible with Windows XP, Vista, 7, 8, and 10. It also supports Mac OS X 10.6 and above. You can use it to access various network modes, such as GSM, GPRS, EDGE, UMTS, and HSDPA. It also supports SMS and voice calls.

    -

    Some of the benefits of using Huawei E173 driver are:

    -
      -
    • It enhances the performance and stability of your modem.
    • -
    • It enables you to use your modem with different SIM cards and network providers.
    • -
    • It allows you to customize the settings and preferences of your modem.
    • -
    • It updates your modem firmware to the latest version.
    • -
    -

    Before you download and install Huawei E173 driver, make sure you have a backup of your important data. You should also scan the driver file for viruses and malware. Do not interrupt the installation process or unplug your modem until it is completed. If you have any questions or feedback about Huawei E173 driver, you can leave a comment below or contact us via email.

    In this article, we have shown you how to download and install Huawei E173 driver. We have also explained some of the features and benefits of using this driver. We hope you have found this article helpful and informative. If you have any suggestions or feedback, please let us know in the comments section below.

    -

    -

    Thank you for reading and have a nice day!

    There is nothing more to write for this article. It is already complete and covers the main topic of Huawei E173 driver. If you want to write more, you can choose a different topic or keyword.

    d5da3c52bf
    -
    -
    \ No newline at end of file diff --git a/spaces/tbvl/Fake_Face_Detection/app.py b/spaces/tbvl/Fake_Face_Detection/app.py deleted file mode 100644 index 2cf7bf792c879d4c4d0e17d2123303620ebc84b0..0000000000000000000000000000000000000000 --- a/spaces/tbvl/Fake_Face_Detection/app.py +++ /dev/null @@ -1,206 +0,0 @@ -import gradio as gr -from PIL import Image -import numpy as np -import os -from face_cropper import detect_and_label_faces -# Define a custom function to convert an image to grayscale -def to_grayscale(input_image): - grayscale_image = Image.fromarray(np.array(input_image).mean(axis=-1).astype(np.uint8)) - return grayscale_image - - -description_markdown = """ -# Fake Face Detection tool from TrustWorthy BiometraVision Lab IISER Bhopal - -## Usage -This tool expects a face image as input. Upon submission, it will process the image and provide an output with bounding boxes drawn on the face. Alongside the visual markers, the tool will give a detection result indicating whether the face is fake or real. - -## Disclaimer -Please note that this tool is for research purposes only and may not always be 100% accurate. Users are advised to exercise discretion and supervise the tool's usage accordingly. - -## Licensing and Permissions -This tool has been developed solely for research and demonstrative purposes. Any commercial utilization of this tool is strictly prohibited unless explicit permission has been obtained from the developers. - -## Developer Contact -For further inquiries or permissions, you can reach out to the developer through the following social media accounts: -- [LAB Webpage](https://sites.google.com/iiitd.ac.in/agarwalakshay/labiiserb?authuser=0) -- [LinkedIn](https://www.linkedin.com/in/shivam-shukla-0a50ab1a2/) -- [GitHub](https://github.com/SaShukla090) -""" - - - - -# Create the Gradio app -app = gr.Interface( - fn=detect_and_label_faces, - inputs=gr.Image(type="pil"), - outputs="image", - # examples=[ - # "path_to_example_image_1.jpg", - # "path_to_example_image_2.jpg" - # ] - examples=[ - os.path.join("Examples", image_name) for image_name in os.listdir("Examples") - ], - title="Fake Face Detection", - description=description_markdown, -) - -# Run the app -app.launch() - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# import torch.nn.functional as F -# import torch -# import torch.nn as nn -# import torch.optim as optim -# from torch.utils.data import DataLoader -# from sklearn.metrics import accuracy_score, precision_recall_fscore_support -# from torch.optim.lr_scheduler import CosineAnnealingLR -# from tqdm import tqdm -# import warnings -# warnings.filterwarnings("ignore") - -# from utils.config import cfg -# from dataset.real_n_fake_dataloader import Extracted_Frames_Dataset -# from utils.data_transforms import get_transforms_train, get_transforms_val -# from net.Multimodalmodel import Image_n_DCT -# import gradio as gr - - - - -# import os -# import json -# import torch -# from torchvision import transforms -# from torch.utils.data import DataLoader, Dataset -# from PIL import Image -# import numpy as np -# import pandas as pd -# import cv2 -# import argparse - - - - - - -# from sklearn.metrics import classification_report, confusion_matrix -# import matplotlib.pyplot as plt -# import seaborn as sns - - - - - -# class Test_Dataset(Dataset): -# def __init__(self, test_data_path = None, transform = None, image = None): -# """ -# Args: -# returns: -# """ - -# if test_data_path is None and image is not None: -# self.dataset = [(image, 2)] -# self.transform = transform - -# def __len__(self): -# return len(self.dataset) - -# def __getitem__(self, idx): -# sample_input = self.get_sample_input(idx) -# return sample_input - - -# def get_sample_input(self, idx): -# rgb_image = self.get_rgb_image(self.dataset[idx][0]) -# dct_image = self.compute_dct_color(self.dataset[idx][0]) -# # label = self.get_label(idx) -# sample_input = {"rgb_image": rgb_image, "dct_image": dct_image} - -# return sample_input - - -# def get_rgb_image(self, rgb_image): -# # rgb_image_path = self.dataset[idx][0] -# # rgb_image = Image.open(rgb_image_path) -# if self.transform: -# rgb_image = self.transform(rgb_image) -# return rgb_image - -# def get_dct_image(self, idx): -# rgb_image_path = self.dataset[idx][0] -# rgb_image = cv2.imread(rgb_image_path) -# dct_image = self.compute_dct_color(rgb_image) -# if self.transform: -# dct_image = self.transform(dct_image) - -# return dct_image - -# def get_label(self, idx): -# return self.dataset[idx][1] - - -# def compute_dct_color(self, image): -# image_float = np.float32(image) -# dct_image = np.zeros_like(image_float) -# for i in range(3): -# dct_image[:, :, i] = cv2.dct(image_float[:, :, i]) -# if self.transform: -# dct_image = self.transform(dct_image) -# return dct_image - - -# device = torch.device("cpu") -# # print(device) -# model = Image_n_DCT() -# model.load_state_dict(torch.load('weights/best_model.pth', map_location = device)) -# model.to(device) -# model.eval() - - -# def classify(image): -# test_dataset = Test_Dataset(transform = get_transforms_val(), image = image) -# inputs = test_dataset[0] -# rgb_image, dct_image = inputs['rgb_image'].to(device), inputs['dct_image'].to(device) -# output = model(rgb_image.unsqueeze(0), dct_image.unsqueeze(0)) -# # _, predicted = torch.max(output.data, 1) -# # print(f"the face is {'real' if predicted==1 else 'fake'}") -# return {'Fake': output[0][0], 'Real': output[0][1]} - -# iface = gr.Interface(fn=classify, inputs="image", outputs="label") -# if __name__ == "__main__": -# iface.launch() diff --git a/spaces/terfces0erbo/CollegeProjectV2/4k Video Download REPACKer 4.4 License Key.md b/spaces/terfces0erbo/CollegeProjectV2/4k Video Download REPACKer 4.4 License Key.md deleted file mode 100644 index e1dfc1ecad02510c8b0fcf6b592112d2d2976b9f..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/4k Video Download REPACKer 4.4 License Key.md +++ /dev/null @@ -1,6 +0,0 @@ -

    4k video downloader 4.4 license key


    Download Zip ★★★★★ https://bytlly.com/2uGkuu



    - -4K Video Downloader Crack Plus Serial Key Download. Not only that, but it also allows you to download videos to iDevices. It can stream videos from your iTunes library ... for offline viewing. Finally, the program can also convert YouTube videos to MP3 files to stream for free. But unfortunately, you can't do this offline. However, if you don't use iTunes, or if you don't like its interface, this might not be the best program for you. If you can't live without iTunes, 4K Video Downloader might be the best option for you. 8a78ff9644
    -
    -
    -

    diff --git a/spaces/terfces0erbo/CollegeProjectV2/Activados Matematica 3 Puerto De Palos Pdf 39 1.md b/spaces/terfces0erbo/CollegeProjectV2/Activados Matematica 3 Puerto De Palos Pdf 39 1.md deleted file mode 100644 index f734855885a041bf827382c426053994d5ee53ad..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Activados Matematica 3 Puerto De Palos Pdf 39 1.md +++ /dev/null @@ -1,6 +0,0 @@ -

    activados matematica 3 puerto de palos pdf 39 1


    DOWNLOAD — https://bytlly.com/2uGjZG



    -
    -2019 Topps Series 1 Baseball #284 Joey Votto Cincinnati Reds Official MLB .... Nov 25 ... Activados Matematica 3 Puerto De Palos Pdf 39 1fdad05405
    -
    -
    -

    diff --git a/spaces/terfces0erbo/CollegeProjectV2/Cherish Model.rar.md b/spaces/terfces0erbo/CollegeProjectV2/Cherish Model.rar.md deleted file mode 100644 index aed7c37333e7ae64c5e40c64735340bbd3af3a03..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Cherish Model.rar.md +++ /dev/null @@ -1,77 +0,0 @@ -
    -

    Cherish Model.rar: The Ultimate Guide to Finding and Downloading Her Photos and Videos

    -

    Cherish Model is one of the most popular and beautiful models on the web. Her photos and videos are highly sought after by her fans and admirers. However, finding and downloading her content can be challenging, especially if you are looking for RAR files.

    -

    RAR files are compressed archives that contain multiple files in a single package. They can save you time and space when downloading large collections of Cherish Model's photos and videos. However, they can also be hard to find, as they are often hidden or disguised on the web.

    -

    cherish model.rar


    Download Zip ✑ https://bytlly.com/2uGiYs



    -

    In this article, we will show you how to find and download Cherish Model.rar files from the web, how to extract them using a free software, and how to enjoy her photos and videos on your device. We will also give you some tips on how to avoid viruses and malware that might be hidden in some RAR files.

    -

    How to Find Cherish Model.rar Files on the Web

    -

    One of the easiest ways to find Cherish Model.rar files on the web is to use a search engine like Yandex or Google. You can simply type "cherish model.rar" in the search box and browse through the results. However, not all results will be relevant or safe. Some of them might be fake or malicious links that can harm your device or steal your personal information.

    -

    To avoid these risks, you should follow some basic guidelines when searching for Cherish Model.rar files:

    -
      -
    • Look for reputable and trustworthy websites that offer Cherish Model's content. Some examples are Fashion Land, TMF, Videozerk, and Turbobit. These websites have a large collection of Cherish Model's photos and videos in RAR format, and they are regularly updated with new content.
    • -
    • Avoid clicking on suspicious or unknown links that promise free or unlimited downloads of Cherish Model.rar files. These links might redirect you to phishing or scam websites that can infect your device with viruses or malware.
    • -
    • Check the file size and name of the RAR file before downloading it. A typical Cherish Model.rar file should have a size of around 200 MB to 2 GB, depending on the number of photos and videos included. The file name should also match the content description, such as "AMS-Cherish-01-Afternoon-Fun.mp4" or "AMS-Cherish-101-150-sets.rar". If the file size or name is too small, too large, or too vague, it might be a fake or corrupted file.
    • -
    -

    How to Download and Extract Cherish Model.rar Files

    -

    Once you have found a reliable website that offers Cherish Model.rar files, you can proceed to download them to your device. Depending on the website, you might need to create an account, pay a fee, or complete a captcha verification before downloading the file. Follow the instructions on the website to complete the download process.

    -

    After downloading the Cherish Model.rar file, you will need to extract it using a software that can handle RAR files. One of the most popular and free software for this purpose is WinRAR. You can download WinRAR from its official website: https://www.win-rar.com/. Install WinRAR on your device and follow these steps to extract Cherish Model.rar files:

    -
      -
    1. Locate the Cherish Model.rar file on your device and right-click on it.
    2. -
    3. Select "Extract Here" or "Extract to (file name)" from the context menu.
    4. -
    5. Wait for WinRAR to extract the files to your desired location.
    6. -
    7. Open the extracted folder and enjoy Cherish Model's photos and videos.
    8. -
    -

    How to Enjoy Cherish Model's Photos and Videos

    -

    Now that you have extracted Cherish Model's photos and videos from the RAR file, you can enjoy them on your device. You can view her photos using any image viewer software, such as Windows Photo Viewer or IrfanView. You can watch her videos using any media player software, such as VLC Media Player or Windows Media Player.

    -

    You can also transfer her photos and videos to other devices, such as your smartphone or tablet, using a USB cable or a cloud service. However, make sure that your device has enough storage space and supports the file formats of Cherish Model's content. Most of her photos are in JPEG format, while most of her videos are in MP4 format.

    -

    Conclusion

    -

    Cherish Model is one of the most popular and beautiful models on the web. Her photos and videos are highly sought after by her fans and admirers. If you want to download and enjoy her content in RAR format, you need to follow some simple steps:

    -

    -
      -
    • Find a reputable website that offers Cherish Model.rar files.
    • -
    • Download the RAR file to your device.
    • -
    • Extract the RAR file using WinRAR or another software.
    • -
    • Enjoy Cherish Model's photos and videos on your device.
    • -
    -

    We hope this article has helped you learn how to find and download Cherish Model.rar files. Remember to always be careful when downloading files from the web, and avoid clicking on suspicious or unknown links. Cherish Model deserves your respect and admiration, so don't share her content without her permission or use it for illegal purposes.

    -

    How to Optimize Your Website for Cherish Model.rar Files

    -

    If you are a website owner or a SEO specialist who wants to attract more visitors and customers who are looking for Cherish Model.rar files, you need to optimize your website for this keyword. Optimizing your website means making it more relevant, user-friendly, and authoritative for your target audience and search engines.

    -

    Here are some tips on how to optimize your website for Cherish Model.rar files:

    -
      -
    • Use the keyword "cherish model.rar" in your domain name, title tag, meta description, headings, and content. This will help search engines and users understand what your website is about and rank it higher for this keyword.
    • -
    • Provide high-quality and original content that offers value and information to your visitors. Don't copy or spin content from other websites, as this can harm your reputation and ranking. Write engaging and informative articles, reviews, guides, or tutorials about Cherish Model.rar files and related topics.
    • -
    • Include images, videos, or other media that enhance your content and make it more appealing and interactive. Use alt text and captions to describe your media and include the keyword "cherish model.rar" in them. This will help search engines and users find your media and understand its context.
    • -
    • Use internal and external links to connect your content with other relevant pages on your website or other websites. Internal links help users navigate your website and find more information. External links help users find more resources and establish your credibility and authority.
    • -
    • Use social media buttons to encourage your visitors to share your content with their friends and followers. Social media can help you increase your exposure, traffic, and engagement. It can also help you build relationships with your audience and potential customers.
    • -
    -

    How to Avoid Legal Issues When Downloading Cherish Model.rar Files

    -

    Downloading Cherish Model.rar files can be fun and exciting, but it can also involve some legal risks. Cherish Model is a professional model who owns the rights to her photos and videos. She works with reputable agencies and websites that sell her content legally and ethically. If you download her content from unauthorized sources or use it for illegal purposes, you might face legal consequences.

    -

    Here are some tips on how to avoid legal issues when downloading Cherish Model.rar files:

    -
      -
    • Only download Cherish Model.rar files from authorized websites that have her permission and license to sell her content. These websites usually have a clear terms of service and privacy policy that explain their rules and regulations.
    • -
    • Don't share Cherish Model.rar files with others without her consent or authorization. This includes uploading them to other websites, forums, social media platforms, or file-sharing services. This can violate her rights and expose you to legal action.
    • -
    • Don't use Cherish Model.rar files for commercial or illegal purposes. This includes selling them, using them for advertising or marketing, or exploiting them for sexual or abusive purposes. This can harm her reputation and dignity and expose you to legal action.
    • -
    • Respect Cherish Model's privacy and personal information. Don't try to contact her, stalk her, harass her, or threaten her. This can violate her rights and expose you to legal action.
    • -
    • Appreciate Cherish Model's work and talent. Don't criticize her, insult her, or degrade her. This can hurt her feelings and expose you to legal action.
    • -
    -

    Conclusion

    -

    Cherish Model is one of the most popular and beautiful models on the web. Her photos and videos are highly sought after by her fans and admirers. If you want to download and enjoy her content in RAR format, you need to follow some simple steps:

    -
      -
    • Find a reputable website that offers Cherish Model.rar files.
    • -
    • Download the RAR file to your device.
    • -
    • Extract the RAR file using WinRAR or another software.
    • -
    • Enjoy Cherish Model's photos and videos on your device.
    • -
    -

    We hope this article has helped you learn how to find and download Cherish Model.rar files. Remember to always be careful when downloading files from the web, avoid clicking on suspicious or unknown links, optimize your website for this keyword, and avoid legal issues when downloading or using her content. Cherish Model deserves your respect and admiration, so don't share her content without her permission or use it for illegal purposes.

    -

    Cherish Model is one of the most popular and beautiful models on the web. Her photos and videos are highly sought after by her fans and admirers. If you want to download and enjoy her content in RAR format, you need to follow some simple steps:

    -
      -
    • Find a reputable website that offers Cherish Model.rar files.
    • -
    • Download the RAR file to your device.
    • -
    • Extract the RAR file using WinRAR or another software.
    • -
    • Enjoy Cherish Model's photos and videos on your device.
    • -
    -

    We hope this article has helped you learn how to find and download Cherish Model.rar files. Remember to always be careful when downloading files from the web, avoid clicking on suspicious or unknown links, optimize your website for this keyword, and avoid legal issues when downloading or using her content. Cherish Model deserves your respect and admiration, so don't share her content without her permission or use it for illegal purposes.

    - -Are you satisfied with this conclusion? Do you want to revise or edit it?

    3cee63e6c2
    -
    -
    \ No newline at end of file diff --git a/spaces/terfces0erbo/CollegeProjectV2/Download EXCLUSIVE Da Apostila Mmi Casados Para Sempre Em 38.md b/spaces/terfces0erbo/CollegeProjectV2/Download EXCLUSIVE Da Apostila Mmi Casados Para Sempre Em 38.md deleted file mode 100644 index b3e7c9b0a8451c5c1df6763bcaa8faa0dfa1f3f2..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Download EXCLUSIVE Da Apostila Mmi Casados Para Sempre Em 38.md +++ /dev/null @@ -1,6 +0,0 @@ -

    download da apostila mmi casados para sempre em 38


    Download File >>>>> https://bytlly.com/2uGjZ7



    - -Veja as letras de Casados Para Sempre e ouça "Aliança", "Visão de Fé e Confiança", "Semeando e Colhendo", "Fluindo Juntos No Espírito" e muito mais ... 4d29de3e1b
    -
    -
    -

    diff --git a/spaces/terfces0erbo/CollegeProjectV2/Insidious The Last Key (English) 2 Movie Torrent WORK Download.md b/spaces/terfces0erbo/CollegeProjectV2/Insidious The Last Key (English) 2 Movie Torrent WORK Download.md deleted file mode 100644 index ad1da774a147de5f7edbd8c48089656a9c46539f..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Insidious The Last Key (English) 2 Movie Torrent WORK Download.md +++ /dev/null @@ -1,6 +0,0 @@ - -

    many of the hits i get on my statistics are for people looking for ways to get around the internet censorship in certain countries. theres some stuff that i do to make this more efficient, but i dont mind letting it run for a few days. if i think youre in a country with an unsavory internet censor, i may give you a raw page to download, but i definitely wont send any money to your account. i will however take some money from your account once its done and i can determine that youre not in one of those countries, and i will send you a link to a new page that you can download on your own. if you have a legitimate reason for wanting to download something that you cant download in your country, i will send you a link to a page on my server that you can download on your own (i know that some people like to experiment with various methods, but i dont like to encourage that).

    -

    Insidious: The Last Key (English) 2 Movie Torrent Download


    DOWNLOAD - https://bytlly.com/2uGjWl



    -

    i dont remember exactly what was in the original, but the new one has a couple of plots: the first is about a guy who gets caught up in a cult, the second is about a woman who gets caught up in a cult, and the third is about a guy who gets caught up in a cult and tries to get out. there is a fourth plot, but i cant remember what it was about (it may have been the original one). i also think theres a fifth plot, but i cant remember what it was about. the plot of the third movie is more complex than the plots of the original and the first two movies, which i think makes sense because the original and the first two movies are much simpler than the third movie. this means that the third movie is more like the original than the first two movies are, which is why i think it was the original.

    899543212b
    -
    -
    \ No newline at end of file diff --git a/spaces/thuanz123/peft-sd-realfill/colab.py b/spaces/thuanz123/peft-sd-realfill/colab.py deleted file mode 100644 index c042ce8b9522da2d083d7c5305f561e147d3b0a5..0000000000000000000000000000000000000000 --- a/spaces/thuanz123/peft-sd-realfill/colab.py +++ /dev/null @@ -1,371 +0,0 @@ -#!/usr/bin/env python -""" -Demo showcasing parameter-efficient fine-tuning of Stable Dissfusion via Dreambooth leveraging 🤗 PEFT (https://github.com/huggingface/peft) - -The code in this repo is partly adapted from the following repositories: -https://huggingface.co/spaces/hysts/LoRA-SD-training -https://huggingface.co/spaces/multimodalart/dreambooth-training -""" -from __future__ import annotations - -import os -import pathlib - -import gradio as gr -import torch -from typing import List - -from inference import InferencePipeline -from trainer import Trainer -from uploader import upload - - -TITLE = "# LoRA + Dreambooth Training and Inference Demo 🎨" -DESCRIPTION = "Demo showcasing parameter-efficient fine-tuning of Stable Dissfusion via Dreambooth leveraging 🤗 PEFT (https://github.com/huggingface/peft)." - - -ORIGINAL_SPACE_ID = "smangrul/peft-lora-sd-dreambooth" - -SPACE_ID = os.getenv("SPACE_ID", ORIGINAL_SPACE_ID) -SHARED_UI_WARNING = f"""# Attention - This Space doesn't work in this shared UI. You can duplicate and use it with a paid private T4 GPU. -
    Duplicate Space
    -""" -if os.getenv("SYSTEM") == "spaces" and SPACE_ID != ORIGINAL_SPACE_ID: - SETTINGS = f'Settings' - -else: - SETTINGS = "Settings" -CUDA_NOT_AVAILABLE_WARNING = f"""# Attention - Running on CPU. -
    -You can assign a GPU in the {SETTINGS} tab if you are running this on HF Spaces. -"T4 small" is sufficient to run this demo. -
    -""" - - -def show_warning(warning_text: str) -> gr.Blocks: - with gr.Blocks() as demo: - with gr.Box(): - gr.Markdown(warning_text) - return demo - - -def update_output_files() -> dict: - paths = sorted(pathlib.Path("results").glob("*.pt")) - config_paths = sorted(pathlib.Path("results").glob("*.json")) - paths = paths + config_paths - paths = [path.as_posix() for path in paths] # type: ignore - return gr.update(value=paths or None) - - -def create_training_demo(trainer: Trainer, pipe: InferencePipeline) -> gr.Blocks: - with gr.Blocks() as demo: - base_model = gr.Dropdown( - choices=[ - "CompVis/stable-diffusion-v1-4", - "runwayml/stable-diffusion-v1-5", - "stabilityai/stable-diffusion-2-1-base", - ], - value="runwayml/stable-diffusion-v1-5", - label="Base Model", - visible=True, - ) - resolution = gr.Dropdown(choices=["512"], value="512", label="Resolution", visible=False) - - with gr.Row(): - with gr.Box(): - gr.Markdown("Training Data") - concept_images = gr.Files(label="Images for your concept") - concept_prompt = gr.Textbox(label="Concept Prompt", max_lines=1) - gr.Markdown( - """ - - Upload images of the style you are planning on training on. - - For a concept prompt, use a unique, made up word to avoid collisions. - - Guidelines for getting good results: - - Dreambooth for an `object` or `style`: - - 5-10 images of the object from different angles - - 500-800 iterations should be good enough. - - Prior preservation is recommended. - - `class_prompt`: - - `a photo of object` - - `style` - - `concept_prompt`: - - ` object` - - ` style` - - `a photo of object` - - `a photo of style` - - Dreambooth for a `Person/Face`: - - 15-50 images of the person from different angles, lighting, and expressions. - Have considerable photos with close up faces. - - 800-1200 iterations should be good enough. - - good defaults for hyperparams - - Model - `runwayml/stable-diffusion-v1-5` or `stabilityai/stable-diffusion-2-1-base` - - Use/check Prior preservation. - - Number of class images to use - 200 - - Prior Loss Weight - 1 - - LoRA Rank for unet - 16 - - LoRA Alpha for unet - 20 - - lora dropout - 0 - - LoRA Bias for unet - `all` - - LoRA Rank for CLIP - 16 - - LoRA Alpha for CLIP - 17 - - LoRA Bias for CLIP - `all` - - lora dropout for CLIP - 0 - - Uncheck `FP16` and `8bit-Adam` (don't use them for faces) - - `class_prompt`: Use the gender related word of the person - - `man` - - `woman` - - `boy` - - `girl` - - `concept_prompt`: just the unique, made up word, e.g., `srm` - - Choose `all` for `lora_bias` and `text_encode_lora_bias` - - Dreambooth for a `Scene`: - - 15-50 images of the scene from different angles, lighting, and expressions. - - 800-1200 iterations should be good enough. - - Prior preservation is recommended. - - `class_prompt`: - - `scene` - - `landscape` - - `city` - - `beach` - - `mountain` - - `concept_prompt`: - - ` scene` - - ` landscape` - - Experiment with various values for lora dropouts, enabling/disabling fp16 and 8bit-Adam - """ - ) - with gr.Box(): - gr.Markdown("Training Parameters") - num_training_steps = gr.Number(label="Number of Training Steps", value=1000, precision=0) - learning_rate = gr.Number(label="Learning Rate", value=0.0001) - gradient_checkpointing = gr.Checkbox(label="Whether to use gradient checkpointing", value=True) - train_text_encoder = gr.Checkbox(label="Train Text Encoder", value=True) - with_prior_preservation = gr.Checkbox(label="Prior Preservation", value=True) - class_prompt = gr.Textbox( - label="Class Prompt", max_lines=1, placeholder='Example: "a photo of object"' - ) - num_class_images = gr.Number(label="Number of class images to use", value=50, precision=0) - prior_loss_weight = gr.Number(label="Prior Loss Weight", value=1.0, precision=1) - # use_lora = gr.Checkbox(label="Whether to use LoRA", value=True) - lora_r = gr.Number(label="LoRA Rank for unet", value=4, precision=0) - lora_alpha = gr.Number( - label="LoRA Alpha for unet. scaling factor = lora_r/lora_alpha", value=4, precision=0 - ) - lora_dropout = gr.Number(label="lora dropout", value=0.00) - lora_bias = gr.Dropdown( - choices=["none", "all", "lora_only"], - value="none", - label="LoRA Bias for unet. This enables bias params to be trainable based on the bias type", - visible=True, - ) - lora_text_encoder_r = gr.Number(label="LoRA Rank for CLIP", value=4, precision=0) - lora_text_encoder_alpha = gr.Number( - label="LoRA Alpha for CLIP. scaling factor = lora_r/lora_alpha", value=4, precision=0 - ) - lora_text_encoder_dropout = gr.Number(label="lora dropout for CLIP", value=0.00) - lora_text_encoder_bias = gr.Dropdown( - choices=["none", "all", "lora_only"], - value="none", - label="LoRA Bias for CLIP. This enables bias params to be trainable based on the bias type", - visible=True, - ) - gradient_accumulation = gr.Number(label="Number of Gradient Accumulation", value=1, precision=0) - fp16 = gr.Checkbox(label="FP16", value=True) - use_8bit_adam = gr.Checkbox(label="Use 8bit Adam", value=True) - gr.Markdown( - """ - - It will take about 20-30 minutes to train for 1000 steps with a T4 GPU. - - You may want to try a small number of steps first, like 1, to see if everything works fine in your environment. - - Note that your trained models will be deleted when the second training is started. You can upload your trained model in the "Upload" tab. - """ - ) - - run_button = gr.Button("Start Training") - with gr.Box(): - with gr.Row(): - check_status_button = gr.Button("Check Training Status") - with gr.Column(): - with gr.Box(): - gr.Markdown("Message") - training_status = gr.Markdown() - output_files = gr.Files(label="Trained Weight Files and Configs") - - run_button.click(fn=pipe.clear) - - run_button.click( - fn=trainer.run, - inputs=[ - base_model, - resolution, - num_training_steps, - concept_images, - concept_prompt, - learning_rate, - gradient_accumulation, - fp16, - use_8bit_adam, - gradient_checkpointing, - train_text_encoder, - with_prior_preservation, - prior_loss_weight, - class_prompt, - num_class_images, - lora_r, - lora_alpha, - lora_bias, - lora_dropout, - lora_text_encoder_r, - lora_text_encoder_alpha, - lora_text_encoder_bias, - lora_text_encoder_dropout, - ], - outputs=[ - training_status, - output_files, - ], - queue=False, - ) - check_status_button.click(fn=trainer.check_if_running, inputs=None, outputs=training_status, queue=False) - check_status_button.click(fn=update_output_files, inputs=None, outputs=output_files, queue=False) - return demo - - -def find_weight_files() -> List[str]: - curr_dir = pathlib.Path(__file__).parent - paths = sorted(curr_dir.rglob("*.pt")) - return [path.relative_to(curr_dir).as_posix() for path in paths] - - -def reload_lora_weight_list() -> dict: - return gr.update(choices=find_weight_files()) - - -def create_inference_demo(pipe: InferencePipeline) -> gr.Blocks: - with gr.Blocks() as demo: - with gr.Row(): - with gr.Column(): - base_model = gr.Dropdown( - choices=[ - "CompVis/stable-diffusion-v1-4", - "runwayml/stable-diffusion-v1-5", - "stabilityai/stable-diffusion-2-1-base", - ], - value="runwayml/stable-diffusion-v1-5", - label="Base Model", - visible=True, - ) - reload_button = gr.Button("Reload Weight List") - lora_weight_name = gr.Dropdown( - choices=find_weight_files(), value="lora/lora_disney.pt", label="LoRA Weight File" - ) - prompt = gr.Textbox(label="Prompt", max_lines=1, placeholder='Example: "style of sks, baby lion"') - negative_prompt = gr.Textbox( - label="Negative Prompt", max_lines=1, placeholder='Example: "blurry, botched, low quality"' - ) - seed = gr.Slider(label="Seed", minimum=0, maximum=100000, step=1, value=1) - with gr.Accordion("Other Parameters", open=False): - num_steps = gr.Slider(label="Number of Steps", minimum=0, maximum=1000, step=1, value=50) - guidance_scale = gr.Slider(label="CFG Scale", minimum=0, maximum=50, step=0.1, value=7) - - run_button = gr.Button("Generate") - - gr.Markdown( - """ - - After training, you can press "Reload Weight List" button to load your trained model names. - - Few repos to refer for ideas: - - https://huggingface.co/smangrul/smangrul - - https://huggingface.co/smangrul/painting-in-the-style-of-smangrul - - https://huggingface.co/smangrul/erenyeager - """ - ) - with gr.Column(): - result = gr.Image(label="Result") - - reload_button.click(fn=reload_lora_weight_list, inputs=None, outputs=lora_weight_name) - prompt.submit( - fn=pipe.run, - inputs=[ - base_model, - lora_weight_name, - prompt, - negative_prompt, - seed, - num_steps, - guidance_scale, - ], - outputs=result, - queue=False, - ) - run_button.click( - fn=pipe.run, - inputs=[ - base_model, - lora_weight_name, - prompt, - negative_prompt, - seed, - num_steps, - guidance_scale, - ], - outputs=result, - queue=False, - ) - seed.change( - fn=pipe.run, - inputs=[ - base_model, - lora_weight_name, - prompt, - negative_prompt, - seed, - num_steps, - guidance_scale, - ], - outputs=result, - queue=False, - ) - return demo - - -def create_upload_demo() -> gr.Blocks: - with gr.Blocks() as demo: - model_name = gr.Textbox(label="Model Name") - hf_token = gr.Textbox(label="Hugging Face Token (with write permission)") - upload_button = gr.Button("Upload") - with gr.Box(): - gr.Markdown("Message") - result = gr.Markdown() - gr.Markdown( - """ - - You can upload your trained model to your private Model repo (i.e. https://huggingface.co/{your_username}/{model_name}). - - You can find your Hugging Face token [here](https://huggingface.co/settings/tokens). - """ - ) - - upload_button.click(fn=upload, inputs=[model_name, hf_token], outputs=result) - - return demo - - -pipe = InferencePipeline() -trainer = Trainer() - -with gr.Blocks(css="style.css") as demo: - if os.getenv("IS_SHARED_UI"): - show_warning(SHARED_UI_WARNING) - if not torch.cuda.is_available(): - show_warning(CUDA_NOT_AVAILABLE_WARNING) - - gr.Markdown(TITLE) - gr.Markdown(DESCRIPTION) - - with gr.Tabs(): - with gr.TabItem("Train"): - create_training_demo(trainer, pipe) - with gr.TabItem("Test"): - create_inference_demo(pipe) - with gr.TabItem("Upload"): - create_upload_demo() - -demo.queue(default_enabled=False).launch(share=True) diff --git a/spaces/tialenAdioni/chat-gpt-api/Tere Naal Love Ho Gaya Full Movie Free Download Mp4 Hd.md b/spaces/tialenAdioni/chat-gpt-api/Tere Naal Love Ho Gaya Full Movie Free Download Mp4 Hd.md deleted file mode 100644 index 03ca0386920aad9dbb4c748bd5cc89515d0d629b..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/Tere Naal Love Ho Gaya Full Movie Free Download Mp4 Hd.md +++ /dev/null @@ -1,74 +0,0 @@ -## Tere Naal Love Ho Gaya Full Movie Free Download Mp4 Hd - - - - - - - - - -**Click Here >>>>> [https://conttooperting.blogspot.com/?l=2tzQAQ](https://conttooperting.blogspot.com/?l=2tzQAQ)** - - - - - - - - - - - - - -# How to Watch Tere Naal Love Ho Gaya Full Movie Online for Free - - - -Tere Naal Love Ho Gaya is a 2012 romantic comedy film starring Riteish Deshmukh and Genelia D'Souza. The film follows the story of Viren, a young man who kidnaps Mini, the daughter of his employer, in order to start his own travel agency with the ransom money. However, he falls in love with her and they embark on a series of adventures together. - - - -If you are looking for a way to watch Tere Naal Love Ho Gaya full movie online for free, you have come to the right place. In this article, we will show you how to stream or download the movie in high quality Mp4 format without paying any subscription fees or downloading any software. - - - -## Streaming Tere Naal Love Ho Gaya Online - - - -One of the easiest ways to watch Tere Naal Love Ho Gaya online is to use a streaming service that offers the movie. There are several options available, depending on your location and preference. Here are some of them: - - - -- **Disney+ Hotstar**: Disney+ Hotstar is a popular streaming platform that offers a variety of movies and shows from Disney, Marvel, Star Wars, Pixar, National Geographic, and more. You can watch Tere Naal Love Ho Gaya on Disney+ Hotstar if you are in India, Indonesia, Malaysia, Singapore, Thailand, or the Philippines. You can sign up for a free trial or a monthly subscription plan that suits your budget and needs[^2^]. - -- **Dailymotion**: Dailymotion is a video-sharing website that hosts millions of videos from various genres and categories. You can watch Tere Naal Love Ho Gaya on Dailymotion for free by searching for the movie title and selecting the video uploaded by Movie Lanka[^3^]. The video has English subtitles and decent quality. However, you may have to deal with some ads and pop-ups while watching. - -- **MovieSpyHD**: MovieSpyHD is another video-sharing website that offers a large collection of movies and shows from Bollywood, Hollywood, and other industries. You can watch Tere Naal Love Ho Gaya on MovieSpyHD for free by clicking on the link provided on their website[^1^]. The video has good quality and no ads. However, you may have to complete some surveys or tasks before accessing the link. - - - -## Downloading Tere Naal Love Ho Gaya Online - - - -If you prefer to download Tere Naal Love Ho Gaya full movie in Mp4 format and watch it offline, you can also do that using some websites that provide direct download links or torrent files. However, you should be careful about the legality and safety of these websites as they may contain viruses or malware that can harm your device or data. Here are some of the websites that claim to offer Tere Naal Love Ho Gaya full movie free download Mp4 Hd: - - - -- **Filmyzilla**: Filmyzilla is a notorious website that leaks pirated movies and shows from various industries. You can download Tere Naal Love Ho Gaya full movie in Mp4 format from Filmyzilla by searching for the movie title and selecting the download option. However, you may have to face some legal issues or penalties if you use this website as it violates the copyright laws. - -- **Filmywap**: Filmywap is another website that offers pirated movies and shows for free download. You can download Tere Naal Love Ho Gaya full movie in Mp4 format from Filmywap by following the same steps as Filmyzilla. However, you may also encounter some risks or problems if you use this website as it is illegal and unsafe. - -- **Coolmoviez**: Coolmoviez is a website that provides links to various movies and shows from different genres and languages. You can download Tere Naal Love Ho Gaya full movie in Mp4 format from Coolmoviez by clicking on the link given on their website. However, you may have to register or sign up before accessing the link. - - - - Biffy Clyro - Puzzle [2007]@320: A Masterpiece of Alternative Rock

- -

If you are looking for a powerful and emotional album that combines alternative rock, alternative metal, and orchestral elements, you should check out Biffy Clyro's fourth studio album, Puzzle [2007]@320. This album was released in June 2007 and received widespread critical acclaim, reaching No. 2 in the UK Albums Chart and No. 39 in the overall world charts. It was also voted the best album of 2007 by Kerrang! and Rock Sound magazines.

-

Biffy Clyro - Puzzle [2007]@320


Download File ⚹ https://urlcod.com/2uK1GE



- -

Puzzle [2007]@320 is Biffy Clyro's first album since leaving Beggars Banquet (though the sleeve artwork still features the Beggars Banquet logo) and their first album to be produced by Garth Richardson, who has worked with bands like Rage Against the Machine, Red Hot Chili Peppers, and Muse. The album was partly inspired by the death of frontman Simon Neil's mother Eleanor, who had died a few years prior to the recording of Puzzle. The album deals with themes of grief, loss, love, and hope, and features some of the band's most personal and heartfelt lyrics.

- -

The album also showcases Biffy Clyro's mastery of huge stadium-filling melodies, as well as their signature use of complex time signatures and abrupt structure changes. The album contains 13 tracks, including six singles: "Semi-Mental", "Saturday Superhouse", "Living Is a Problem Because Everything Dies", "Folding Stars", "Machines", and "Who's Got a Match?". Some of the highlights of the album are:

- -
    -
  • "Living Is a Problem Because Everything Dies": The opening track of the album, which starts with a choir singing a Latin phrase that translates to "We have come into this world to die". The song then explodes into a heavy riff and a catchy chorus that reflects on the meaninglessness of life.
  • -
  • "Saturday Superhouse": The second single from the album, which is a fast-paced and energetic rock song that contrasts with the darker themes of the album. The song is about finding happiness in simple things like spending time with your loved ones.
  • -
  • "Folding Stars": The fourth single from the album, which is a beautiful and emotional ballad dedicated to Simon Neil's mother. The song features acoustic guitar, piano, strings, and Neil's soaring vocals that express his love and sorrow for his mother.
  • -
  • "Machines": The fifth single from the album, which is a minimalist and haunting song that closes the album. The song is composed of only vocals and acoustic guitar, and it is about finding strength and hope in difficult times.
  • -
- -

Puzzle [2007]@320 is a masterpiece of alternative rock that showcases Biffy Clyro's musical talent, creativity, and emotion. It is an album that will make you feel, think, and rock out. If you want to listen to this amazing album, you can download it in high-quality MP3 format (320 kbps) from our website. Just click on the link below and enjoy!

- -Download Biffy Clyro - Puzzle [2007]@320

-

81aa517590
-
-
\ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Download Office 365 Full Version Crack !FREE!.md b/spaces/tialenAdioni/chat-gpt-api/logs/Download Office 365 Full Version Crack !FREE!.md deleted file mode 100644 index 2146e982dd88fea16f16efee275905d5ff8a5f05..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Download Office 365 Full Version Crack !FREE!.md +++ /dev/null @@ -1,11 +0,0 @@ -
-

Why You Should Not Download Office 365 Full Version Crack

-

Office 365 is a subscription-based service that provides access to various Microsoft applications, such as Word, Excel, PowerPoint, Outlook, OneNote, and more. Office 365 also offers cloud storage, online collaboration, and security features. However, some people may be tempted to download Office 365 full version crack from unauthorized sources, hoping to get the benefits of Office 365 without paying for it. This is a bad idea for several reasons, and we will explain why in this article.

-

download office 365 full version crack


DOWNLOAD === https://urlcod.com/2uK5qM



-

First of all, downloading Office 365 full version crack is illegal and unethical. Office 365 is a copyrighted product of Microsoft, and cracking it violates the terms and conditions of the license agreement. By downloading Office 365 full version crack, you are stealing from Microsoft and depriving them of their rightful revenue. This is not only unfair to Microsoft, but also to the legitimate users who pay for the service.

-

Secondly, downloading Office 365 full version crack is risky and unsafe. Office 365 full version crack may contain malware, viruses, spyware, or other harmful programs that can damage your computer or compromise your personal information. You may also expose yourself to legal consequences if you are caught using or distributing Office 365 full version crack. Moreover, Office 365 full version crack may not work properly or at all, as it may be incompatible with your system or outdated. You may also miss out on the updates, features, and support that Microsoft provides for the official Office 365 service.

-

Thirdly, downloading Office 365 full version crack is unnecessary and unwise. Office 365 is a valuable and affordable service that offers many benefits and advantages for users. You can choose from different plans and options that suit your needs and budget. You can also enjoy the convenience and flexibility of accessing your files and applications from any device and location. You can also collaborate with others in real-time and share your work easily. You can also trust that your data and privacy are protected by Microsoft's security and encryption standards.

-

Therefore, we strongly advise you not to download Office 365 full version crack from any source. Instead, we recommend you to purchase a legitimate subscription of Office 365 from Microsoft or an authorized reseller. This way, you can enjoy the full potential and benefits of Office 365 without any legal, ethical, or technical issues.

-

ddb901b051
-
-
\ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/3D Xrit il Azrbaycann Corafiya v qlim Xsusiyytlri.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/3D Xrit il Azrbaycann Corafiya v qlim Xsusiyytlri.md deleted file mode 100644 index 0b18a811fcb8ecff989710c066743884d791f2c7..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/3D Xrit il Azrbaycann Corafiya v qlim Xsusiyytlri.md +++ /dev/null @@ -1,162 +0,0 @@ - -

3D Xerite Azerbaycan: A New Way to Explore the Land of Fire

-

Azerbaijan is a landlocked country in the Caucasus region, with a rich and diverse geography, culture, and history. It is also known as the Land of Fire, because of its natural gas reserves and ancient fire-worshipping traditions. But how can you explore this fascinating country in a new and exciting way? The answer is 3D map of Azerbaijan, a technology that allows you to see and interact with the country in three dimensions, using computer-generated imagery. In this article, we will explain what 3D mapping technology is, how it works, and how you can access and use 3D map of Azerbaijan online. We will also show you how to create and share your own 3D maps of Azerbaijan, using some tools and software that are available for anyone. By the end of this article, you will have a better understanding of 3D map of Azerbaijan, and why it is worth exploring.

-

3d xerite azerbaycan


Download Zip –––––>>> https://bltlly.com/2uOheC



-

What is 3D mapping technology and how does it work?

-

3D mapping technology is a technique that involves creating 3D representations of objects, buildings, landscapes, and other physical entities using computer-generated imagery. 3D mapping requires the collection of data from various sources, such as aerial images, satellite data, and ground-based measurements, which are then processed using specialized software to create a 3D model. You can also create 3D effects by detecting the light field using active 3D imaging techniques, such as lasers or cameras.

-

The benefits of 3D mapping for various fields and industries

-

3D mapping has many applications and benefits for various fields and industries, such as:

-
    -
  • Geography and cartography: 3D mapping can help create more accurate and detailed maps of any area, showing the terrain, elevation, vegetation, water bodies, landmarks, roads, and other features. 3D maps can also be used for navigation, planning, analysis, visualization, and education purposes.
  • -
  • Architecture and engineering: 3D mapping can help design and construct buildings, bridges, dams, tunnels, pipelines, and other structures, by providing realistic models that can be tested and modified before implementation. 3D maps can also help monitor and maintain the structures after completion.
  • -
  • Agriculture and environment: 3D mapping can help assess and manage the land use, soil quality, crop yield, irrigation, drainage, erosion, pollution, biodiversity, climate change, natural disasters, and other factors that affect agriculture and environment.
  • -
  • Tourism and culture: 3D mapping can help promote and preserve the tourism and culture of any place, by showcasing its natural beauty, historical heritage, architectural wonders, artistic expressions, religious traditions, festivals, cuisines, crafts, music, dances, languages, customs, and values.
  • -
-

The challenges and limitations of 3D mapping in Azerbaijan.

The challenges and limitations of 3D mapping in Azerbaijan

-

Despite the benefits and applications of 3D mapping, there are also some challenges and limitations that need to be addressed, especially in Azerbaijan. Some of these are:

-
    -
  • Data availability and quality: 3D mapping requires a large amount of data from various sources, such as aerial images, satellite data, and ground-based measurements. However, not all areas of Azerbaijan have sufficient and updated data coverage, especially in remote and mountainous regions. Moreover, the data quality may vary depending on the resolution, accuracy, completeness, and consistency of the sources. Therefore, data availability and quality are crucial factors that affect the reliability and precision of 3D mapping products.
  • -
  • Data processing and analysis: 3D mapping requires complex and time-consuming data processing and analysis using specialized software and hardware. However, not all users of 3D mapping have the necessary skills, knowledge, and resources to perform these tasks efficiently and effectively. Moreover, different software and hardware may have different capabilities, compatibility, and performance issues that may affect the outcome of 3D mapping. Therefore, data processing and analysis are challenging tasks that require careful selection and optimization of the tools and methods used.
  • -
  • Data storage and dissemination: 3D mapping produces large amounts of data that need to be stored and disseminated in a secure and accessible way. However, not all users of 3D mapping have adequate storage capacity and bandwidth to handle these data. Moreover, different users may have different preferences and requirements for accessing and using these data, such as formats, standards, licenses, privacy, and security. Therefore, data storage and dissemination are important issues that need to be addressed by providing suitable platforms and services for 3D mapping data.
  • -
-

These challenges and limitations may hinder the full potential of 3D mapping in Azerbaijan. However, they can also be overcome by developing and implementing appropriate solutions, such as improving data collection methods, enhancing data processing techniques, providing data management systems, and promoting data sharing practices.

-

3d xerite azerbaycan online
-3d xerite azerbaycan gomap
-3d xerite azerbaycan fiziki
-3d xerite azerbaycan mobil
-3d xerite azerbaycan azərişıq
-3d xerite azerbaycan bakı
-3d xerite azerbaycan rayonlar
-3d xerite azerbaycan yollar
-3d xerite azerbaycan qarabağ
-3d xerite azerbaycan şəhərlər
-3d xerite azerbaycan satılıq
-3d xerite azerbaycan pulsuz
-3d xerite azerbaycan indir
-3d xerite azerbaycan naviqasiya
-3d xerite azerbaycan gps
-3d xerite azerbaycan google
-3d xerite azerbaycan yandex
-3d xerite azerbaycan bing
-3d xerite azerbaycan apple
-3d xerite azerbaycan openstreetmap
-3d xerite azerbaycan wikipedia
-3d xerite azerbaycan təhsil
-3d xerite azerbaycan turizm
-3d xerite azerbaycan məkanlar
-3d xerite azerbaycan abidələr
-3d xerite azerbaycan muzeylər
-3d xerite azerbaycan parklar
-3d xerite azerbaycan restoranlar
-3d xerite azerbaycan otellər
-3d xerite azerbaycan mağazalar
-3d xerite azerbaycan avtovağzallar
-3d xerite azerbaycan metrostansiyaları
-3d xerite azerbaycan avtobus dayanacaqları
-3d xerite azerbaycan taksi dayanacaqları
-3d xerite azerbaycan bankomatlar
-3d xerite azerbanklar canlı reytinqi
-3d xerite azərbaycanda neft yataqları
-3d xəritə azərbaycanda tikinti şirkətləri
-3d xəritə azərbaycanda sənaye zonaları
-3d xəritə azərbaycanda kənd təsərrüfatı
-3d xəritə azərbaycanda enerji infrastrukturu
-3d xəritə azərbaycanda nüvə reaktorları
-3d xəritə azərbaycanda hava limanları
-3d xəritə azərbaycanda dövlət qurumları
-3d xəritə azərbaycanda universitetlər
-3d xəritə azərbaycanda muzeylər və kitabxanalar
-3d xəritə azərbaycanda milyonçu evlilri
-3d xěritě azěrbaycanda qazinti sahělěri

How to access and use 3D map of Azerbaijan online

-

One of the easiest and most convenient ways to access and use 3D map of Azerbaijan online is through GoMap.Az, the official online map service of Azerbaijan. GoMap.Az is a web-based platform that provides various types of maps and geospatial information for the users, such as administrative boundaries, roads, railways, airports, ports, landmarks, facilities, services, and more. GoMap.Az also offers 3D map of Azerbaijan, which allows you to see and interact with the country in three dimensions, using your web browser or mobile device.

-

The features and functions of GoMap.Az, the official online map service of Azerbaijan

-

GoMap.Az has many features and functions that make it a useful and user-friendly online map service. Some of these are:

-
    -
  • Search: You can search for any location, address, or place name in Azerbaijan using the search box on the top left corner of the screen. You can also use the advanced search option to filter your results by categories, such as administrative units, settlements, streets, buildings, etc.
  • -
  • Zoom: You can zoom in and out of the map using the plus and minus buttons on the bottom right corner of the screen. You can also use the scroll wheel on your mouse or the pinch gesture on your touch screen to zoom in and out.
  • -
  • Pan: You can pan or move the map by clicking and dragging on the screen. You can also use the arrow keys on your keyboard or the swipe gesture on your touch screen to pan the map.
  • -
  • Rotate: You can rotate the map by holding down the shift key and dragging on the screen. You can also use the rotate gesture on your touch screen to rotate the map.
  • -
  • Tilt: You can tilt the map by holding down the ctrl key and dragging on the screen. You can also use the tilt gesture on your touch screen to tilt the map.
  • -
  • Layers: You can choose different layers to display on the map using the layers button on the top right corner of the screen. You can select from various options, such as base map, satellite imagery, hybrid map, terrain map, traffic map, public transport map, etc.
  • -
  • 3D: You can switch to 3D mode by clicking on the 3D button on the bottom left corner of the screen. You can then see and interact with the 3D map of Azerbaijan, which shows realistic models of buildings, landmarks, mountains, rivers, etc. You can also adjust the 3D settings using the 3D button.
  • -
  • Measure: You can measure distances and areas on the map using the measure tool on the top right corner of the screen. You can choose from different units, such as meters, kilometers, feet, miles, etc.
  • -
  • Print: You can print or save a screenshot of the map using the print tool on the top right corner of the screen. You can choose from different formats, such as PDF, PNG, JPG, etc.
  • -
  • Share: You can share a link to the map with others using the share tool on the top right corner of the screen. You can choose from different options, such as email, Facebook, Twitter, WhatsApp, etc.
  • -
-

The advantages and disadvantages of GoMap.Az compared to other online map services

-

GoMap.Az has some advantages and disadvantages compared to other online map services, such as Google Maps, Bing Maps, or OpenStreetMap. Some of these are:

- - - - - - - - - - - - - - - - - - - - -
AdvantagesDisadvantages
- It is free and easy to use.- It may not have complete and updated data coverage for some areas.
- It is tailored for Azerbaijan and its users.- It may not have some features and functions that other services offer.
- It provides accurate and reliable information from official sources.- It may not have user-generated content or feedback that other services have.
- It supports Azerbaijani language and culture.- It may not have multilingual support or translation that other services have.
-

Therefore, GoMap.Az has its own strengths and weaknesses compared to other online map services. However, it is still a valuable and useful platform for accessing and using 3 D map of Azerbaijan online.

-

The tips and tricks for using GoMap.Az effectively and efficiently

-

To make the most out of GoMap.Az, here are some tips and tricks that you can follow:

-
    -
  • Use the search function wisely: You can use the search function to find any location, address, or place name in Azerbaijan quickly and easily. However, you should also pay attention to the spelling, punctuation, and capitalization of your search terms, as they may affect the results. For example, if you search for "Baku", you will get the capital city of Azerbaijan, but if you search for "baku", you will get a mythical creature from Japanese folklore. You can also use the advanced search option to narrow down your results by categories, such as administrative units, settlements, streets, buildings, etc.
  • -
  • Use the zoom function properly: You can use the zoom function to see more or less details on the map. However, you should also be aware of the scale and resolution of the map, as they may change depending on the zoom level. For example, if you zoom in too much, you may see some pixelation or distortion on the map. If you zoom out too much, you may lose some important information or features on the map. You can also use the scale bar on the bottom left corner of the screen to see the actual distance on the map.
  • -
  • Use the 3D function carefully: You can use the 3D function to see and interact with the 3D map of Azerbaijan, which shows realistic models of buildings, landmarks, mountains, rivers, etc. However, you should also be mindful of the performance and compatibility of your device and browser, as they may affect the quality and speed of the 3D map. For example, if your device or browser is not powerful or updated enough, you may experience some lagging or crashing on the 3D map. You can also adjust the 3D settings using the 3D button to optimize your experience.
  • -
  • Use the layers function selectively: You can use the layers function to choose different layers to display on the map. However, you should also be selective and strategic about which layers you choose, as they may affect the clarity and complexity of the map. For example, if you choose too many layers at once, you may clutter or overload the map with too much information or features. If you choose too few layers at once, you may miss or overlook some important information or features on the map. You can also use the opacity slider on each layer to adjust its visibility on the map.
  • -
  • Use the share function responsibly: You can use the share function to share a link to the map with others. However, you should also be responsible and respectful about how and with whom you share it. For example, if you share it with someone who is not interested or familiar with Azerbaijan, you may annoy or confuse them. If you share it with someone who has a different opinion or perspective on Azerbaijan, you may offend or provoke them. You can also use the short link option to create a shorter and simpler link to the map.
  • -
-

How to create and share your own 3D maps of Azerbaijan

-

If you want to create and share your own 3D maps of Azerbaijan, you will need some tools and software that are available for anyone. Here are some of them:

-

The tools and software you need to create 3D maps from aerial images, satellite data, and ground measurements

-

To create 3D maps from aerial images, satellite data, and ground measurements, you will need some tools and software that can help you collect, process, and analyze these data sources. Some of these are:

-
    -
  • Drones: Drones are unmanned aerial vehicles that can fly over any area and capture high-resolution images and videos from different angles and heights. You can use drones to collect aerial images of any place in Azerbaijan that you want to create a 3D map of.
  • -
  • Satellites: Satellites are artificial objects that orbit around Earth and transmit data from space. You can use satellites to collect satellite data of any place in Azerbaijan that you want to create a 3D map of.
  • -
  • Sensors: Sensors are devices that measure physical properties such as temperature, pressure, humidity, light, sound, etc. You can use sensors to collect ground measurements of any place in Azerbaijan that you want to create a 3D map of.
  • -
  • ArcGIS: ArcGIS is a geographic information system (GIS) software that allows you to manage, analyze, and visualize geospatial data. You can use Arc GIS to process and analyze the aerial images, satellite data, and ground measurements that you collected for creating a 3D map of Azerbaijan.
  • -
  • Autodesk: Autodesk is a software company that provides various products and services for 3D design, engineering, and entertainment. You can use Autodesk software to create 3D models and maps from the geospatial data that you processed and analyzed using ArcGIS.
  • -
-

The steps and procedures for creating 3D maps using Autodesk software

-

To create 3D maps using Autodesk software, you will need to follow some steps and procedures that can help you transform your geospatial data into 3D models and maps. Some of these are:

-
    -
  1. Import your geospatial data into Autodesk software: You can import your geospatial data into Autodesk software, such as AutoCAD, Civil 3D, InfraWorks, or Revit, depending on the type and format of your data. You can also use Autodesk ReCap to convert your images and videos into 3D point clouds.
  2. -
  3. Create your 3D models from your geospatial data: You can create your 3D models from your geospatial data using various tools and features in Autodesk software, such as extrusion, lofting, sweeping, meshing, sculpting, texturing, lighting, etc. You can also use Autodesk 3ds Max or Maya to enhance your 3D models with more details and effects.
  4. -
  5. Export your 3D models into 3D maps: You can export your 3D models into 3D maps using various formats and standards, such as KML, KMZ, COLLADA, FBX, OBJ, etc. You can also use Autodesk A360 or BIM 360 to upload and share your 3D maps online.
  6. -
-

The best practices and examples for sharing your 3D maps with others online or offline

-

To share your 3D maps with others online or offline, you will need to follow some best practices and examples that can help you communicate and collaborate effectively and efficiently. Some of these are:

-
    -
  • Choose the right platform and format for your 3D maps: You can choose the right platform and format for your 3D maps depending on your purpose and audience. For example, if you want to share your 3D maps with the general public online, you can use platforms such as Google Earth, Sketchfab, or CesiumJS, which support various formats such as KML, KMZ, COLLADA, FBX, OBJ, etc. If you want to share your 3D maps with specific users offline, you can use platforms such as ArcGIS Pro or ArcGIS Earth, which support formats such as MPKX or PPKX.
  • -
  • Add metadata and annotations to your 3D maps: You can add metadata and annotations to your 3D maps to provide more information and context for your users. For example, you can add metadata such as title, description, author, date, source, license, etc. to describe the properties and characteristics of your 3D maps. You can also add annotations such as labels, symbols, legends, pop-ups, etc. to highlight the features and functions of your 3D maps.
  • -
  • Use interactive and immersive elements to enhance your 3D maps: You can use interactive and immersive elements to enhance your 3D maps and make them more engaging and appealing for your users. For example, you can use interactive elements such as zooming, panning, rotating , tilting, etc. to manipulate and explore your 3D maps. You can also use immersive elements such as virtual reality, augmented reality, or mixed reality to experience and interact with your 3D maps in a more realistic and lifelike way.
  • -
-

Conclusion: Why 3D map of Azerbaijan is worth exploring

-

In conclusion, 3D map of Azerbaijan is a new and exciting way to explore the Land of Fire, a country with a rich and diverse geography, culture, and history. 3D map of Azerbaijan is a technology that allows you to see and interact with the country in three dimensions, using computer-generated imagery. You can access and use 3D map of Azerbaijan online through GoMap.Az, the official online map service of Azerbaijan, which provides various features and functions for the users. You can also create and share your own 3D maps of Azerbaijan, using some tools and software that are available for anyone, such as drones, satellites, sensors, ArcGIS, and Autodesk. However, you should also be aware of the challenges and limitations of 3D mapping in Azerbaijan, such as data availability and quality, data processing and analysis, and data storage and dissemination. You should also follow some tips and tricks for using GoMap.Az effectively and efficiently, such as using the search function wisely, using the zoom function properly, using the 3D function carefully, using the layers function selectively, and using the share function responsibly. By doing so, you will be able to enjoy the benefits and applications of 3D mapping for various fields and industries, such as geography and cartography, architecture and engineering, agriculture and environment, tourism and culture.

-

Therefore, we hope that this article has given you a better understanding of 3D map of Azerbaijan, and why it is worth exploring. We encourage you to try out 3D map of Azerbaijan yourself, and discover the beauty and diversity of this country in a new and exciting way.

-

FAQs

-

Here are some frequently asked questions (FAQs) about 3D map of Azerbaijan:

-
    -
  1. What is 3D map of Azerbaijan?
  2. -

    3D map of Azerbaijan is a technology that allows you to see and interact with the country in three dimensions, using computer-generated imagery.

    -
  3. How can I access and use 3D map of Azerbaijan online?
  4. -

    You can access and use 3D map of Azerbaijan online through GoMap.Az, the official online map service of Azerbaijan, which provides various features and functions for the users.

    -
  5. How can I create and share my own 3D maps of Azerbaijan?
  6. -

    You can create and share your own 3D maps of Azerbaijan using some tools and software that are available for anyone, such as drones, satellites, sensors, ArcGIS, and Autodesk.

    -
  7. What are the benefits and applications of 3D mapping for various fields and industries?
  8. -

    3D mapping has many benefits and applications for various fields and industries, such as geography and cartography, architecture and engineering, agriculture and environment, tourism and culture.

    -
  9. What are the challenges and limitations of 3D mapping in Azerbaijan?
  10. -

    Some of the challenges and limitations of 3D mapping in Azerbaijan are data availability and quality, data processing and analysis, and data storage and dissemination.

    -

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Bazooka Boy APK indir - Enjoy Fun from Crazy Explosions and Ragdoll Enemies.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Bazooka Boy APK indir - Enjoy Fun from Crazy Explosions and Ragdoll Enemies.md deleted file mode 100644 index bb7bb30daed321245393b9bca478fc725097c5e3..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Bazooka Boy APK indir - Enjoy Fun from Crazy Explosions and Ragdoll Enemies.md +++ /dev/null @@ -1,149 +0,0 @@ -
-

Bazooka Boy APK Indir: How to Download and Play this Explosive Game

-

If you are looking for a fun and addictive game that will keep you entertained for hours, you should try Bazooka Boy. This is a game where you blast through levels and destroy everything before you with a bunch of amazing weapons. You can collect all and master their different and unique qualities, send your enemies flying, explode their bases and crush their buildings. In this article, we will show you how to download and play Bazooka Boy APK indir, the Android version of this explosive game.

-

What is Bazooka Boy?

-

A brief introduction to the game and its features

-

Bazooka Boy is a physics-based puzzle game developed by Supersonic Studios LTD. It was released in 2020 and has gained over 50 million downloads on Google Play Store. The game has a simple premise: you are a boy with a bazooka and you have to shoot your way through various levels filled with enemies, obstacles, and destructible objects. You can use different types of weapons, such as rockets, grenades, bombs, lasers, and more, each with their own effects and advantages. You can also customize your character with different outfits and accessories.

-

bazooka boy apk indir


Download ✫ https://bltlly.com/2uOsht



-

Why you should play Bazooka Boy

-

Bazooka Boy is a game that offers satisfying destruction and unlimited possibilities to shoot your way to victory. It has many features that make it fun and engaging, such as:

-
    -
  • Fun ragdoll enemies and crazy explosions with levels upon levels of awesome content and challenges.
  • -
  • Flashy exploding effects and awesome physics that make each level a unique action puzzle.
  • -
  • A variety of weapons to choose from, each with their own characteristics and abilities.
  • -
  • A colorful and cartoonish graphics style that adds to the humor and charm of the game.
  • -
  • An easy-to-use interface and intuitive controls that make the game accessible to anyone.
  • -
-

If you are looking for a game that will make you laugh, challenge your brain, and satisfy your destructive urges, Bazooka Boy is the game for you.

-

How to download Bazooka Boy APK indir

-

The benefits of downloading the APK file

-

If you want to play Bazooka Boy on your Android device, you have two options: you can either download it from the Google Play Store or download the APK file from a third-party source. The APK file is an application package file that contains all the data and files needed to install an app on your device. There are some benefits of downloading the APK file instead of the Google Play Store version, such as:

-
    -
  • You can access the latest version of the game before it is officially released on the Google Play Store.
  • -
  • You can bypass any regional restrictions or compatibility issues that may prevent you from downloading the game from the Google Play Store.
  • -
  • You can save some storage space on your device by downloading only the necessary files instead of the whole app.
  • -
-

However, there are also some risks involved in downloading the APK file from a third-party source, such as:

-
    -
  • You may expose your device to malware or viruses that may harm your data or system.
  • -
  • You may violate the terms of service or privacy policy of the game developer or publisher.
  • -
  • You may not receive any updates or support from the game developer or publisher.
  • -
-

Therefore, if you decide to download the APK file, you should do so at your own risk and only from a trusted and reputable source.

- The steps to download and install the APK file -

If you have decided to download the APK file of Bazooka Boy, you need to follow these steps to install it on your device:

-
    -
  1. Go to a reliable and secure website that offers the APK file of Bazooka Boy, such as [APKPure] or [APKMirror].
  2. -
  3. Download the APK file to your device. You may need to enable the option to install apps from unknown sources in your device settings.
  4. -
  5. Locate the downloaded APK file in your file manager and tap on it to start the installation process.
  6. -
  7. Follow the instructions on the screen and grant the necessary permissions to the app.
  8. -
  9. Wait for the installation to finish and launch the app from your home screen or app drawer.
  10. -
-

Congratulations, you have successfully installed Bazooka Boy APK indir on your device. Now you can enjoy playing this explosive game anytime and anywhere.

-

bazooka boy android game download
-bazooka boy apk free download
-bazooka boy apk mod indir
-bazooka boy apk son sürüm indir
-bazooka boy apk tamindir
-bazooka boy apk uptodown
-bazooka boy apk yükle
-bazooka boy app indir
-bazooka boy full apk indir
-bazooka boy game apk indir
-bazooka boy hileli apk indir
-bazooka boy indir apk cepde
-bazooka boy indir apk dayı
-bazooka boy indir apk pure
-bazooka boy indir google play
-bazooka boy oyunu apk indir
-bazooka boy para hileli apk indir
-bazooka boy premium apk indir
-bazooka boy pro apk indir
-bazooka boy unlimited money apk indir
-download bazooka boy android game
-download bazooka boy apk for android
-download bazooka boy apk mod
-download bazooka boy app for android
-download bazooka boy game for android
-download game bazooka boy mod apk
-how to download bazooka boy on android
-how to install bazooka boy apk on android
-how to play bazooka boy on android
-indir oyun club bazooka boy apk
-oyun club net indir bazooka boy hileli mod apk android oyunlar aksiyon macera oyunlari 2023 06 07 2023 06 07 html (^1^)
-play store da yok olan oyunlar 2023 06 07 2023 06 07 html (^2^)
-play store da yok olan oyunlar 2023 06 07 2023 06 07 html
-play store da yok olan oyunlar 2023 06 07 2023 06 07 html
-play store da yok olan oyunlar 2023 06 07 2023 06 07 html

-

How to play Bazooka Boy

-

The basic gameplay and controls

-

Bazooka Boy is a game that is easy to learn but hard to master. The gameplay is simple: you have to shoot your way through each level and destroy everything in your path. You can move your character by dragging your finger on the left side of the screen, and aim and shoot by tapping on the right side of the screen. You can also zoom in and out by pinching the screen. You have a limited amount of ammo for each weapon, so use it wisely. You can switch between different weapons by tapping on their icons at the bottom of the screen. You can also collect coins, gems, and power-ups along the way, which can help you upgrade your weapons and unlock new outfits and accessories.

-

The different weapons and their qualities

-

Bazooka Boy offers a variety of weapons that you can use to blast through the levels. Each weapon has its own characteristics and abilities, such as:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
WeaponDescription
RocketThe default weapon that shoots a single rocket that explodes on impact.
GrenadeA weapon that shoots a grenade that bounces off surfaces and explodes after a few seconds.
BombA weapon that shoots a bomb that sticks to surfaces and explodes when you tap on it.
LaserA weapon that shoots a laser beam that cuts through objects and enemies.
FireworkA weapon that shoots a firework that splits into multiple rockets in mid-air.
Cluster BombA weapon that shoots a cluster bomb that releases smaller bombs upon explosion.
NukeA weapon that shoots a nuke that causes a massive explosion that destroys everything in its radius.
And more...There are many more weapons to discover and unlock in Bazooka Boy, such as the flamethrower, the ice gun, the lightning rod, and more.
-

You can upgrade your weapons by spending coins and gems, which will increase their damage, range, speed, and ammo capacity. You can also unlock new weapons by completing certain levels or achievements.

-

The tips and tricks to master the game

-

Bazooka Boy is a game that requires skill, strategy, and creativity. Here are some tips and tricks that can help you master the game:

-
    -
  • Aim for the weak spots of the enemies and objects, such as barrels, crates, gas tanks, etc. They will cause bigger explosions and chain reactions.
  • -
  • Use the environment to your advantage, such as walls, ramps, bridges, etc. They can help you bounce or redirect your shots or create obstacles for your enemies.
  • -
  • Experiment with different weapons and find out which ones suit your play style and each level best. Some weapons are more effective than others depending on the situation.
  • -
  • Watch out for hazards and traps, such as mines, spikes, lasers, etc. They can damage or kill you if you are not careful.
  • -
  • Collect as many coins, gems, and power-ups as you can. They will help you upgrade your weapons and unlock new items.
  • -
  • Have fun and be creative. There is no right or wrong way to play Bazooka Boy. You can try different approaches and strategies to complete each level.
  • Conclusion -

    A summary of the main points and a call to action

    -

    Bazooka Boy is a game that will make you feel like a kid again. It is a game that lets you unleash your creativity and have fun with explosions and destruction. You can download and play Bazooka Boy APK indir on your Android device by following the steps we have provided in this article. You can also enjoy the game's features, such as the different weapons, the colorful graphics, the easy controls, and the challenging levels. Bazooka Boy is a game that will keep you hooked for hours and make you smile with every shot. So what are you waiting for? Download Bazooka Boy APK indir today and start blasting away!

    -

    FAQs

    -

    What are some of the FAQs about Bazooka Boy APK indir?

    -

    Here are some of the frequently asked questions about Bazooka Boy APK indir and their answers:

    -
      -
    1. Is Bazooka Boy APK indir free to play?
      -Yes, Bazooka Boy APK indir is free to play. However, it contains ads and in-app purchases that can enhance your gaming experience.
    2. -
    3. Is Bazooka Boy APK indir safe to download and install?
      -Yes, Bazooka Boy APK indir is safe to download and install if you do so from a trusted and reputable source. However, you should always be careful when downloading any APK file from a third-party source and scan it for any malware or viruses before installing it on your device.
    4. -
    5. How can I update Bazooka Boy APK indir?
      -You can update Bazooka Boy APK indir by downloading the latest version of the APK file from the same source where you downloaded the previous version. You can also check for updates within the app settings.
    6. -
    7. How can I contact the developer or publisher of Bazooka Boy?
      -You can contact the developer or publisher of Bazooka Boy by visiting their official website or social media pages. You can also send them an email or leave a review on the Google Play Store.
    8. -
    9. How can I share my feedback or suggestions for Bazooka Boy?
      -You can share your feedback or suggestions for Bazooka Boy by leaving a comment or rating on the Google Play Store or by contacting the developer or publisher directly.
    10. -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/rich/highlighter.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/rich/highlighter.py deleted file mode 100644 index 82293dffc492ea50b16335fd411b255dd5dfca57..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/rich/highlighter.py +++ /dev/null @@ -1,232 +0,0 @@ -import re -from abc import ABC, abstractmethod -from typing import List, Union - -from .text import Span, Text - - -def _combine_regex(*regexes: str) -> str: - """Combine a number of regexes in to a single regex. - - Returns: - str: New regex with all regexes ORed together. - """ - return "|".join(regexes) - - -class Highlighter(ABC): - """Abstract base class for highlighters.""" - - def __call__(self, text: Union[str, Text]) -> Text: - """Highlight a str or Text instance. - - Args: - text (Union[str, ~Text]): Text to highlight. - - Raises: - TypeError: If not called with text or str. - - Returns: - Text: A test instance with highlighting applied. - """ - if isinstance(text, str): - highlight_text = Text(text) - elif isinstance(text, Text): - highlight_text = text.copy() - else: - raise TypeError(f"str or Text instance required, not {text!r}") - self.highlight(highlight_text) - return highlight_text - - @abstractmethod - def highlight(self, text: Text) -> None: - """Apply highlighting in place to text. - - Args: - text (~Text): A text object highlight. - """ - - -class NullHighlighter(Highlighter): - """A highlighter object that doesn't highlight. - - May be used to disable highlighting entirely. - - """ - - def highlight(self, text: Text) -> None: - """Nothing to do""" - - -class RegexHighlighter(Highlighter): - """Applies highlighting from a list of regular expressions.""" - - highlights: List[str] = [] - base_style: str = "" - - def highlight(self, text: Text) -> None: - """Highlight :class:`rich.text.Text` using regular expressions. - - Args: - text (~Text): Text to highlighted. - - """ - - highlight_regex = text.highlight_regex - for re_highlight in self.highlights: - highlight_regex(re_highlight, style_prefix=self.base_style) - - -class ReprHighlighter(RegexHighlighter): - """Highlights the text typically produced from ``__repr__`` methods.""" - - base_style = "repr." - highlights = [ - r"(?P<)(?P[-\w.:|]*)(?P[\w\W]*?)(?P>)", - r'(?P[\w_]{1,50})=(?P"?[\w_]+"?)?', - r"(?P[][{}()])", - _combine_regex( - r"(?P[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})", - r"(?P([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4})", - r"(?P(?:[0-9A-Fa-f]{1,2}-){7}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{1,2}:){7}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{4}\.){3}[0-9A-Fa-f]{4})", - r"(?P(?:[0-9A-Fa-f]{1,2}-){5}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{1,2}:){5}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{4}\.){2}[0-9A-Fa-f]{4})", - r"(?P[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12})", - r"(?P[\w.]*?)\(", - r"\b(?PTrue)\b|\b(?PFalse)\b|\b(?PNone)\b", - r"(?P\.\.\.)", - r"(?P(?(?\B(/[-\w._+]+)*\/)(?P[-\w._+]*)?", - r"(?b?'''.*?(?(file|https|http|ws|wss)://[-0-9a-zA-Z$_+!`(),.?/;:&=%#]*)", - ), - ] - - -class JSONHighlighter(RegexHighlighter): - """Highlights JSON""" - - # Captures the start and end of JSON strings, handling escaped quotes - JSON_STR = r"(?b?\".*?(?[\{\[\(\)\]\}])", - r"\b(?Ptrue)\b|\b(?Pfalse)\b|\b(?Pnull)\b", - r"(?P(? None: - super().highlight(text) - - # Additional work to handle highlighting JSON keys - plain = text.plain - append = text.spans.append - whitespace = self.JSON_WHITESPACE - for match in re.finditer(self.JSON_STR, plain): - start, end = match.span() - cursor = end - while cursor < len(plain): - char = plain[cursor] - cursor += 1 - if char == ":": - append(Span(start, end, "json.key")) - elif char in whitespace: - continue - break - - -class ISO8601Highlighter(RegexHighlighter): - """Highlights the ISO8601 date time strings. - Regex reference: https://www.oreilly.com/library/view/regular-expressions-cookbook/9781449327453/ch04s07.html - """ - - base_style = "iso8601." - highlights = [ - # - # Dates - # - # Calendar month (e.g. 2008-08). The hyphen is required - r"^(?P[0-9]{4})-(?P1[0-2]|0[1-9])$", - # Calendar date w/o hyphens (e.g. 20080830) - r"^(?P(?P[0-9]{4})(?P1[0-2]|0[1-9])(?P3[01]|0[1-9]|[12][0-9]))$", - # Ordinal date (e.g. 2008-243). The hyphen is optional - r"^(?P(?P[0-9]{4})-?(?P36[0-6]|3[0-5][0-9]|[12][0-9]{2}|0[1-9][0-9]|00[1-9]))$", - # - # Weeks - # - # Week of the year (e.g., 2008-W35). The hyphen is optional - r"^(?P(?P[0-9]{4})-?W(?P5[0-3]|[1-4][0-9]|0[1-9]))$", - # Week date (e.g., 2008-W35-6). The hyphens are optional - r"^(?P(?P[0-9]{4})-?W(?P5[0-3]|[1-4][0-9]|0[1-9])-?(?P[1-7]))$", - # - # Times - # - # Hours and minutes (e.g., 17:21). The colon is optional - r"^(?P(?P2[0-3]|[01][0-9]):?(?P[0-5][0-9]))$", - # Hours, minutes, and seconds w/o colons (e.g., 172159) - r"^(?P(?P2[0-3]|[01][0-9])(?P[0-5][0-9])(?P[0-5][0-9]))$", - # Time zone designator (e.g., Z, +07 or +07:00). The colons and the minutes are optional - r"^(?P(Z|[+-](?:2[0-3]|[01][0-9])(?::?(?:[0-5][0-9]))?))$", - # Hours, minutes, and seconds with time zone designator (e.g., 17:21:59+07:00). - # All the colons are optional. The minutes in the time zone designator are also optional - r"^(?P(?P2[0-3]|[01][0-9])(?P[0-5][0-9])(?P[0-5][0-9]))(?PZ|[+-](?:2[0-3]|[01][0-9])(?::?(?:[0-5][0-9]))?)$", - # - # Date and Time - # - # Calendar date with hours, minutes, and seconds (e.g., 2008-08-30 17:21:59 or 20080830 172159). - # A space is required between the date and the time. The hyphens and colons are optional. - # This regex matches dates and times that specify some hyphens or colons but omit others. - # This does not follow ISO 8601 - r"^(?P(?P[0-9]{4})(?P-)?(?P1[0-2]|0[1-9])(?(hyphen)-)(?P3[01]|0[1-9]|[12][0-9])) (?P(?P2[0-3]|[01][0-9])(?(hyphen):)(?P[0-5][0-9])(?(hyphen):)(?P[0-5][0-9]))$", - # - # XML Schema dates and times - # - # Date, with optional time zone (e.g., 2008-08-30 or 2008-08-30+07:00). - # Hyphens are required. This is the XML Schema 'date' type - r"^(?P(?P-?(?:[1-9][0-9]*)?[0-9]{4})-(?P1[0-2]|0[1-9])-(?P3[01]|0[1-9]|[12][0-9]))(?PZ|[+-](?:2[0-3]|[01][0-9]):[0-5][0-9])?$", - # Time, with optional fractional seconds and time zone (e.g., 01:45:36 or 01:45:36.123+07:00). - # There is no limit on the number of digits for the fractional seconds. This is the XML Schema 'time' type - r"^(?P(?P2[0-3]|[01][0-9]):(?P[0-5][0-9]):(?P[0-5][0-9])(?P\.[0-9]+)?)(?PZ|[+-](?:2[0-3]|[01][0-9]):[0-5][0-9])?$", - # Date and time, with optional fractional seconds and time zone (e.g., 2008-08-30T01:45:36 or 2008-08-30T01:45:36.123Z). - # This is the XML Schema 'dateTime' type - r"^(?P(?P-?(?:[1-9][0-9]*)?[0-9]{4})-(?P1[0-2]|0[1-9])-(?P3[01]|0[1-9]|[12][0-9]))T(?P(?P2[0-3]|[01][0-9]):(?P[0-5][0-9]):(?P[0-5][0-9])(?P\.[0-9]+)?)(?PZ|[+-](?:2[0-3]|[01][0-9]):[0-5][0-9])?$", - ] - - -if __name__ == "__main__": # pragma: no cover - from .console import Console - - console = Console() - console.print("[bold green]hello world![/bold green]") - console.print("'[bold green]hello world![/bold green]'") - - console.print(" /foo") - console.print("/foo/") - console.print("/foo/bar") - console.print("foo/bar/baz") - - console.print("/foo/bar/baz?foo=bar+egg&egg=baz") - console.print("/foo/bar/baz/") - console.print("/foo/bar/baz/egg") - console.print("/foo/bar/baz/egg.py") - console.print("/foo/bar/baz/egg.py word") - console.print(" /foo/bar/baz/egg.py word") - console.print("foo /foo/bar/baz/egg.py word") - console.print("foo /foo/bar/ba._++z/egg+.py word") - console.print("https://example.org?foo=bar#header") - - console.print(1234567.34) - console.print(1 / 2) - console.print(-1 / 123123123123) - - console.print( - "127.0.1.1 bar 192.168.1.4 2001:0db8:85a3:0000:0000:8a2e:0370:7334 foo" - ) - import json - - console.print_json(json.dumps(obj={"name": "apple", "count": 1}), indent=None) diff --git a/spaces/tomofi/MMOCR/mmocr/models/textdet/modules/local_graph.py b/spaces/tomofi/MMOCR/mmocr/models/textdet/modules/local_graph.py deleted file mode 100644 index 861582030313ae4f393e070c3eab5e496ecdd78a..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MMOCR/mmocr/models/textdet/modules/local_graph.py +++ /dev/null @@ -1,297 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import numpy as np -import torch -from mmcv.ops import RoIAlignRotated - -from .utils import (euclidean_distance_matrix, feature_embedding, - normalize_adjacent_matrix) - - -class LocalGraphs: - """Generate local graphs for GCN to classify the neighbors of a pivot for - DRRG: Deep Relational Reasoning Graph Network for Arbitrary Shape Text - Detection. - - [https://arxiv.org/abs/2003.07493]. This code was partially adapted from - https://github.com/GXYM/DRRG licensed under the MIT license. - - Args: - k_at_hops (tuple(int)): The number of i-hop neighbors, i = 1, 2. - num_adjacent_linkages (int): The number of linkages when constructing - adjacent matrix. - node_geo_feat_len (int): The length of embedded geometric feature - vector of a text component. - pooling_scale (float): The spatial scale of rotated RoI-Align. - pooling_output_size (tuple(int)): The output size of rotated RoI-Align. - local_graph_thr(float): The threshold for filtering out identical local - graphs. - """ - - def __init__(self, k_at_hops, num_adjacent_linkages, node_geo_feat_len, - pooling_scale, pooling_output_size, local_graph_thr): - - assert len(k_at_hops) == 2 - assert all(isinstance(n, int) for n in k_at_hops) - assert isinstance(num_adjacent_linkages, int) - assert isinstance(node_geo_feat_len, int) - assert isinstance(pooling_scale, float) - assert all(isinstance(n, int) for n in pooling_output_size) - assert isinstance(local_graph_thr, float) - - self.k_at_hops = k_at_hops - self.num_adjacent_linkages = num_adjacent_linkages - self.node_geo_feat_dim = node_geo_feat_len - self.pooling = RoIAlignRotated(pooling_output_size, pooling_scale) - self.local_graph_thr = local_graph_thr - - def generate_local_graphs(self, sorted_dist_inds, gt_comp_labels): - """Generate local graphs for GCN to predict which instance a text - component belongs to. - - Args: - sorted_dist_inds (ndarray): The complete graph node indices, which - is sorted according to the Euclidean distance. - gt_comp_labels(ndarray): The ground truth labels define the - instance to which the text components (nodes in graphs) belong. - - Returns: - pivot_local_graphs(list[list[int]]): The list of local graph - neighbor indices of pivots. - pivot_knns(list[list[int]]): The list of k-nearest neighbor indices - of pivots. - """ - - assert sorted_dist_inds.ndim == 2 - assert (sorted_dist_inds.shape[0] == sorted_dist_inds.shape[1] == - gt_comp_labels.shape[0]) - - knn_graph = sorted_dist_inds[:, 1:self.k_at_hops[0] + 1] - pivot_local_graphs = [] - pivot_knns = [] - for pivot_ind, knn in enumerate(knn_graph): - - local_graph_neighbors = set(knn) - - for neighbor_ind in knn: - local_graph_neighbors.update( - set(sorted_dist_inds[neighbor_ind, - 1:self.k_at_hops[1] + 1])) - - local_graph_neighbors.discard(pivot_ind) - pivot_local_graph = list(local_graph_neighbors) - pivot_local_graph.insert(0, pivot_ind) - pivot_knn = [pivot_ind] + list(knn) - - if pivot_ind < 1: - pivot_local_graphs.append(pivot_local_graph) - pivot_knns.append(pivot_knn) - else: - add_flag = True - for graph_ind, added_knn in enumerate(pivot_knns): - added_pivot_ind = added_knn[0] - added_local_graph = pivot_local_graphs[graph_ind] - - union = len( - set(pivot_local_graph[1:]).union( - set(added_local_graph[1:]))) - intersect = len( - set(pivot_local_graph[1:]).intersection( - set(added_local_graph[1:]))) - local_graph_iou = intersect / (union + 1e-8) - - if (local_graph_iou > self.local_graph_thr - and pivot_ind in added_knn - and gt_comp_labels[added_pivot_ind] - == gt_comp_labels[pivot_ind] - and gt_comp_labels[pivot_ind] != 0): - add_flag = False - break - if add_flag: - pivot_local_graphs.append(pivot_local_graph) - pivot_knns.append(pivot_knn) - - return pivot_local_graphs, pivot_knns - - def generate_gcn_input(self, node_feat_batch, node_label_batch, - local_graph_batch, knn_batch, - sorted_dist_ind_batch): - """Generate graph convolution network input data. - - Args: - node_feat_batch (List[Tensor]): The batched graph node features. - node_label_batch (List[ndarray]): The batched text component - labels. - local_graph_batch (List[List[list[int]]]): The local graph node - indices of image batch. - knn_batch (List[List[list[int]]]): The knn graph node indices of - image batch. - sorted_dist_ind_batch (list[ndarray]): The node indices sorted - according to the Euclidean distance. - - Returns: - local_graphs_node_feat (Tensor): The node features of graph. - adjacent_matrices (Tensor): The adjacent matrices of local graphs. - pivots_knn_inds (Tensor): The k-nearest neighbor indices in - local graph. - gt_linkage (Tensor): The surpervision signal of GCN for linkage - prediction. - """ - assert isinstance(node_feat_batch, list) - assert isinstance(node_label_batch, list) - assert isinstance(local_graph_batch, list) - assert isinstance(knn_batch, list) - assert isinstance(sorted_dist_ind_batch, list) - - num_max_nodes = max([ - len(pivot_local_graph) for pivot_local_graphs in local_graph_batch - for pivot_local_graph in pivot_local_graphs - ]) - - local_graphs_node_feat = [] - adjacent_matrices = [] - pivots_knn_inds = [] - pivots_gt_linkage = [] - - for batch_ind, sorted_dist_inds in enumerate(sorted_dist_ind_batch): - node_feats = node_feat_batch[batch_ind] - pivot_local_graphs = local_graph_batch[batch_ind] - pivot_knns = knn_batch[batch_ind] - node_labels = node_label_batch[batch_ind] - device = node_feats.device - - for graph_ind, pivot_knn in enumerate(pivot_knns): - pivot_local_graph = pivot_local_graphs[graph_ind] - num_nodes = len(pivot_local_graph) - pivot_ind = pivot_local_graph[0] - node2ind_map = {j: i for i, j in enumerate(pivot_local_graph)} - - knn_inds = torch.tensor( - [node2ind_map[i] for i in pivot_knn[1:]]) - pivot_feats = node_feats[pivot_ind] - normalized_feats = node_feats[pivot_local_graph] - pivot_feats - - adjacent_matrix = np.zeros((num_nodes, num_nodes), - dtype=np.float32) - for node in pivot_local_graph: - neighbors = sorted_dist_inds[node, - 1:self.num_adjacent_linkages + - 1] - for neighbor in neighbors: - if neighbor in pivot_local_graph: - - adjacent_matrix[node2ind_map[node], - node2ind_map[neighbor]] = 1 - adjacent_matrix[node2ind_map[neighbor], - node2ind_map[node]] = 1 - - adjacent_matrix = normalize_adjacent_matrix(adjacent_matrix) - pad_adjacent_matrix = torch.zeros( - (num_max_nodes, num_max_nodes), - dtype=torch.float, - device=device) - pad_adjacent_matrix[:num_nodes, :num_nodes] = torch.from_numpy( - adjacent_matrix) - - pad_normalized_feats = torch.cat([ - normalized_feats, - torch.zeros( - (num_max_nodes - num_nodes, normalized_feats.shape[1]), - dtype=torch.float, - device=device) - ], - dim=0) - - local_graph_labels = node_labels[pivot_local_graph] - knn_labels = local_graph_labels[knn_inds] - link_labels = ((node_labels[pivot_ind] == knn_labels) & - (node_labels[pivot_ind] > 0)).astype(np.int64) - link_labels = torch.from_numpy(link_labels) - - local_graphs_node_feat.append(pad_normalized_feats) - adjacent_matrices.append(pad_adjacent_matrix) - pivots_knn_inds.append(knn_inds) - pivots_gt_linkage.append(link_labels) - - local_graphs_node_feat = torch.stack(local_graphs_node_feat, 0) - adjacent_matrices = torch.stack(adjacent_matrices, 0) - pivots_knn_inds = torch.stack(pivots_knn_inds, 0) - pivots_gt_linkage = torch.stack(pivots_gt_linkage, 0) - - return (local_graphs_node_feat, adjacent_matrices, pivots_knn_inds, - pivots_gt_linkage) - - def __call__(self, feat_maps, comp_attribs): - """Generate local graphs as GCN input. - - Args: - feat_maps (Tensor): The feature maps to extract the content - features of text components. - comp_attribs (ndarray): The text component attributes. - - Returns: - local_graphs_node_feat (Tensor): The node features of graph. - adjacent_matrices (Tensor): The adjacent matrices of local graphs. - pivots_knn_inds (Tensor): The k-nearest neighbor indices in local - graph. - gt_linkage (Tensor): The surpervision signal of GCN for linkage - prediction. - """ - - assert isinstance(feat_maps, torch.Tensor) - assert comp_attribs.ndim == 3 - assert comp_attribs.shape[2] == 8 - - sorted_dist_inds_batch = [] - local_graph_batch = [] - knn_batch = [] - node_feat_batch = [] - node_label_batch = [] - device = feat_maps.device - - for batch_ind in range(comp_attribs.shape[0]): - num_comps = int(comp_attribs[batch_ind, 0, 0]) - comp_geo_attribs = comp_attribs[batch_ind, :num_comps, 1:7] - node_labels = comp_attribs[batch_ind, :num_comps, - 7].astype(np.int32) - - comp_centers = comp_geo_attribs[:, 0:2] - distance_matrix = euclidean_distance_matrix( - comp_centers, comp_centers) - - batch_id = np.zeros( - (comp_geo_attribs.shape[0], 1), dtype=np.float32) * batch_ind - comp_geo_attribs[:, -2] = np.clip(comp_geo_attribs[:, -2], -1, 1) - angle = np.arccos(comp_geo_attribs[:, -2]) * np.sign( - comp_geo_attribs[:, -1]) - angle = angle.reshape((-1, 1)) - rotated_rois = np.hstack( - [batch_id, comp_geo_attribs[:, :-2], angle]) - rois = torch.from_numpy(rotated_rois).to(device) - content_feats = self.pooling(feat_maps[batch_ind].unsqueeze(0), - rois) - - content_feats = content_feats.view(content_feats.shape[0], - -1).to(feat_maps.device) - geo_feats = feature_embedding(comp_geo_attribs, - self.node_geo_feat_dim) - geo_feats = torch.from_numpy(geo_feats).to(device) - node_feats = torch.cat([content_feats, geo_feats], dim=-1) - - sorted_dist_inds = np.argsort(distance_matrix, axis=1) - pivot_local_graphs, pivot_knns = self.generate_local_graphs( - sorted_dist_inds, node_labels) - - node_feat_batch.append(node_feats) - node_label_batch.append(node_labels) - local_graph_batch.append(pivot_local_graphs) - knn_batch.append(pivot_knns) - sorted_dist_inds_batch.append(sorted_dist_inds) - - (node_feats, adjacent_matrices, knn_inds, gt_linkage) = \ - self.generate_gcn_input(node_feat_batch, - node_label_batch, - local_graph_batch, - knn_batch, - sorted_dist_inds_batch) - - return node_feats, adjacent_matrices, knn_inds, gt_linkage diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/mask_rcnn/mask_rcnn_r50_fpn_2x_coco.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/mask_rcnn/mask_rcnn_r50_fpn_2x_coco.py deleted file mode 100644 index 932b1f905155a0d3285daefc4891f5194705e30d..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/mask_rcnn/mask_rcnn_r50_fpn_2x_coco.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = [ - '../_base_/models/mask_rcnn_r50_fpn.py', - '../_base_/datasets/coco_instance.py', - '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' -] diff --git "a/spaces/trialapp/gpt_summarizer/pages/2_Youtube\343\203\223\343\203\207\343\202\252\343\201\256\350\246\201\347\264\204.py" "b/spaces/trialapp/gpt_summarizer/pages/2_Youtube\343\203\223\343\203\207\343\202\252\343\201\256\350\246\201\347\264\204.py" deleted file mode 100644 index 8b19a3546465a4dfe12468bdb63468e1b0442cff..0000000000000000000000000000000000000000 --- "a/spaces/trialapp/gpt_summarizer/pages/2_Youtube\343\203\223\343\203\207\343\202\252\343\201\256\350\246\201\347\264\204.py" +++ /dev/null @@ -1,118 +0,0 @@ -import langchain -import streamlit as st -from dotenv import load_dotenv -from langchain.cache import SQLiteCache -from langchain.callbacks import get_openai_callback -from langchain.chains.summarize import load_summarize_chain -from langchain.chat_models import ChatOpenAI -from langchain.document_loaders import YoutubeLoader -from langchain.llms import OpenAI -from langchain.prompts import PromptTemplate -from langchain.text_splitter import RecursiveCharacterTextSplitter - - -def init_page(): - st.set_page_config(page_title="Youtube ビデオの要約", page_icon="🤗") - st.header("Youtube ビデオの要約 🤗") - st.sidebar.title("Options") - st.session_state.costs = [] - - -def select_model(): - model = st.sidebar.radio( - "モデルを選択してください。", ("GPT-3.5", "GPT-3.5-16k", "GPT-4"), disabled=True - ) - if model == "GPT-3.5": - st.session_state.model_name = "gpt-3.5-turbo" - elif model == "GPT-3.5-16k": - st.session_state.model_name = "gpt-3.5-turbo-16k" - else: - st.session_state.model_name = "gpt-4" - # 300: The number of tokens for instructions outside the main text - st.session_state.max_token = ( - OpenAI.modelname_to_contextsize(st.session_state.model_name) - 300 - ) - return ChatOpenAI(temperature=0, model_name=st.session_state.model_name) - - -def get_url_input(): - url = st.text_input("Youtube URL: ", key="input") - return url - - -def get_document(url): - with st.spinner("コンテンツの取得中です ..."): - loader = YoutubeLoader.from_youtube_url( - url, - add_video_info=True, # You can also retrieve the title and view count. - language=[ - "en", - "ja", - ], # Retrieve subtitles prioritizing English to Japanese translation. - ) - text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder( - model_name=st.session_state.model_name, - chunk_size=st.session_state.max_token, - chunk_overlap=0, - ) - return loader.load_and_split(text_splitter=text_splitter) - - -def summarize(llm, docs): - prompt_template = """下記YouTubeビデオのトランスクリプトの簡潔な要約を書いてください. -=== - -{text} - -""" - PROMPT = PromptTemplate(template=prompt_template, input_variables=["text"]) - with get_openai_callback() as cb: - chain = load_summarize_chain( - llm, - chain_type="map_reduce", - verbose=True, - map_prompt=PROMPT, - combine_prompt=PROMPT, - ) - response = chain( - { - "input_documents": docs, - # If you don't specify token_max, the internal processing will be adjusted to fit regular model sizes like GPT-3.5, so please be aware of that. - "token_max": st.session_state.max_token, - }, - return_only_outputs=True, - ) - return response["output_text"], cb.total_cost - - -def main(): - load_dotenv() - langchain.llm_cache = SQLiteCache(database_path=".langchain.db") - init_page() - llm = select_model() - container = st.container() - response_container = st.container() - with container: - if url := get_url_input(): - document = get_document(url) - with st.spinner("ChatGptはタイピングしています ..."): - output_text, cost = summarize(llm, document) - st.session_state.costs.append(cost) - else: - output_text = None - if output_text: - with response_container: - st.markdown("## 要約") - st.write(output_text) - st.markdown("---") - st.markdown("## 元の文章") - st.write(document) - costs = st.session_state.get("費用", []) - st.sidebar.markdown("## 費用") - st.sidebar.markdown(f"**総費用: ${sum(costs):.5f}**") - for cost in costs: - st.sidebar.markdown(f"- ${cost:.5f}") - - -if __name__ == "__main__": - main() diff --git a/spaces/triggah61/chingu-music/tests/models/test_musicgen.py b/spaces/triggah61/chingu-music/tests/models/test_musicgen.py deleted file mode 100644 index d43cf73763f6c690ab0b277227ac225b286fa143..0000000000000000000000000000000000000000 --- a/spaces/triggah61/chingu-music/tests/models/test_musicgen.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import pytest -import torch - -from audiocraft.models import MusicGen - - -class TestSEANetModel: - def get_musicgen(self): - mg = MusicGen.get_pretrained(name='debug', device='cpu') - mg.set_generation_params(duration=2.0, extend_stride=2.) - return mg - - def test_base(self): - mg = self.get_musicgen() - assert mg.frame_rate == 25 - assert mg.sample_rate == 32000 - assert mg.audio_channels == 1 - - def test_generate_unconditional(self): - mg = self.get_musicgen() - wav = mg.generate_unconditional(3) - assert list(wav.shape) == [3, 1, 64000] - - def test_generate_continuation(self): - mg = self.get_musicgen() - prompt = torch.randn(3, 1, 32000) - wav = mg.generate_continuation(prompt, 32000) - assert list(wav.shape) == [3, 1, 64000] - - prompt = torch.randn(2, 1, 32000) - wav = mg.generate_continuation( - prompt, 32000, ['youpi', 'lapin dort']) - assert list(wav.shape) == [2, 1, 64000] - - prompt = torch.randn(2, 1, 32000) - with pytest.raises(AssertionError): - wav = mg.generate_continuation( - prompt, 32000, ['youpi', 'lapin dort', 'one too many']) - - def test_generate(self): - mg = self.get_musicgen() - wav = mg.generate( - ['youpi', 'lapin dort']) - assert list(wav.shape) == [2, 1, 64000] - - def test_generate_long(self): - mg = self.get_musicgen() - mg.max_duration = 3. - mg.set_generation_params(duration=4., extend_stride=2.) - wav = mg.generate( - ['youpi', 'lapin dort']) - assert list(wav.shape) == [2, 1, 32000 * 4] diff --git a/spaces/trysem/AnimeGANv2/app.py b/spaces/trysem/AnimeGANv2/app.py deleted file mode 100644 index b26b59835e0cf64d490ba2b0318952cb76471977..0000000000000000000000000000000000000000 --- a/spaces/trysem/AnimeGANv2/app.py +++ /dev/null @@ -1,33 +0,0 @@ -from PIL import Image -import torch -import gradio as gr - - - -model2 = torch.hub.load( - "AK391/animegan2-pytorch:main", - "generator", - pretrained=True, - device="cpu", - progress=False -) - - -model1 = torch.hub.load("AK391/animegan2-pytorch:main", "generator", pretrained="face_paint_512_v1", device="cpu") -face2paint = torch.hub.load( - 'AK391/animegan2-pytorch:main', 'face2paint', - size=1024, device="cpu",side_by_side=False -) -def inference(img, ver): - if ver == 'version 2 (🔺 robustness,🔻 stylization)': - out = face2paint(model2, img) - else: - out = face2paint(model1, img) - return out - -title = "AnimeGANv2" -description = "Gradio Demo for AnimeGanv2 Face Portrait. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. Please use a cropped portrait picture for best results similar to the examples below." -article = "

    Github Repo Pytorch

    visitor badge

    " -examples=[['groot.jpeg','version 2 (🔺 robustness,🔻 stylization)'],['bill.png','version 1 (🔺 stylization, 🔻 robustness)'],['tony.png','version 1 (🔺 stylization, 🔻 robustness)'],['elon.png','version 2 (🔺 robustness,🔻 stylization)'],['IU.png','version 1 (🔺 stylization, 🔻 robustness)'],['billie.png','version 2 (🔺 robustness,🔻 stylization)'],['will.png','version 2 (🔺 robustness,🔻 stylization)'],['beyonce.png','version 1 (🔺 stylization, 🔻 robustness)'],['gongyoo.jpeg','version 1 (🔺 stylization, 🔻 robustness)']] -gr.Interface(inference, [gr.inputs.Image(type="pil"),gr.inputs.Radio(['version 1 (🔺 stylization, 🔻 robustness)','version 2 (🔺 robustness,🔻 stylization)'], type="value", default='version 2 (🔺 robustness,🔻 stylization)', label='version') -], gr.outputs.Image(type="pil"),title=title,description=description,article=article,examples=examples,allow_flagging=False,allow_screenshot=False).launch() \ No newline at end of file diff --git a/spaces/twdac/BuChengFangYuan-ChineseJapaneseTranslation/app/my_py_lib/heatmap_tool.py b/spaces/twdac/BuChengFangYuan-ChineseJapaneseTranslation/app/my_py_lib/heatmap_tool.py deleted file mode 100644 index 3b868fc04aefd7acc5b8a5ae10af34bb90046dd7..0000000000000000000000000000000000000000 --- a/spaces/twdac/BuChengFangYuan-ChineseJapaneseTranslation/app/my_py_lib/heatmap_tool.py +++ /dev/null @@ -1,173 +0,0 @@ -''' -热图工具 -''' - -import numpy as np -import cv2 -from skimage.draw import disk as sk_disk -from . import bbox_tool -from . import contour_tool -from typing import Iterable - - -def get_cls_with_det_pts_from_cls_hm(cls_hm: np.ndarray, det_pts: np.ndarray, radius: int=3, mode: str='mean'): - ''' - 根据检测点和分类热图,从中获得获得每个检测点的类别 - :param cls_hm: 分类热图 - :param det_pts: 检测点坐标,要求形状为 [-1, 2],坐标格式为 yx - :param radius: 检索半径 - :param mode: 计算模式,转换检索半径内类别像素的到分类概率的方式 - :return: - ''' - assert cls_hm.ndim == 3 - assert cls_hm.shape[2] >= 1 - assert mode in ('sum', 'mean', 'max', 'min') - det_pts = np.asarray(det_pts, np.int32) - if len(det_pts) == 0: - det_pts = det_pts.reshape([-1, 2]) - assert det_pts.ndim == 2 and det_pts.shape[1] == 2 - radius = int(radius) - # 每个点对应各个类别的概率 - cls_probs = np.zeros([len(det_pts), cls_hm.shape[2]], np.float32) - hw = cls_hm.shape[:2] - for i, pt in enumerate(det_pts): - rr, cc = sk_disk(pt, radius=radius, shape=hw) - for c in range(cls_hm.shape[2]): - if mode == 'sum': - cls_probs[i, c] = cls_hm[rr, cc, c].sum() - elif mode == 'mean': - cls_probs[i, c] = cls_hm[rr, cc, c].mean() - elif mode == 'max': - cls_probs[i, c] = cls_hm[rr, cc, c].max() - elif mode == 'min': - cls_probs[i, c] = cls_hm[rr, cc, c].min() - else: - raise AssertionError('Error! Invalid mode param.') - return cls_probs - - -def tr_bboxes_to_tlbr_heatmap(im_hw, hm_hw, bboxes, n_class=0, classes=None): - ''' - 转换包围框到tlbr热图(FCOS热图方式) - 额外修改,仅在置信度大于0.5时,才会设定tlbr值,否则坐标值为0 - :param im_hw: 原始图像大小 - :param hm_hw: 热图大小 - :param bboxes: 包围框,形状为[-1, 4],坐标格式为[y1x1y2x2, y1x1y2x2] - :return: - ''' - if n_class > 0: - assert classes is not None, 'Error! The classes is not allow None when n_class > 0.' - assert len(bboxes) == len(classes), 'Error! The len(bboxes) must be equal len(classes).' - if len(classes) > 0: - assert max(classes) < n_class and min(classes) >= 0, 'Error! All class must be in range [0, n_class).' - - ohm = np.zeros([*hm_hw, 1 + 4 + n_class], dtype=np.float32) - im_hw = np.asarray(im_hw, np.int32) - hm_hw = np.asarray(hm_hw, np.int32) - for box_id, box in enumerate(bboxes): - block_float_box = np.asarray(box / [*im_hw, *im_hw] * [*hm_hw, *hm_hw], np.float32) - block_box = np.asarray(block_float_box, np.int32) - center_hw = bbox_tool.calc_bbox_center(block_float_box).astype(np.int32) - for ih in range(max(block_box[0], 0), min(block_box[2] + 1, hm_hw[0])): - for iw in range(max(block_box[1], 0), min(block_box[3] + 1, hm_hw[1])): - # 只有正中间的IOU值,才设定为1,周围都设定为0 - # if ih == block_int_center[0] and iw == block_int_center[1]: - # ohm[ih, iw, :1] = 1. - cur_pos = np.asarray([ih + 0.5, iw + 0.5], np.float32) - t = cur_pos[0] - block_float_box[0] - l = cur_pos[1] - block_float_box[1] - b = block_float_box[2] - cur_pos[0] - r = block_float_box[3] - cur_pos[1] - if np.min([t, l, b, r]) <= 0: - continue - - # 计算中心度 - if ih == center_hw[0] and iw == center_hw[1]: - c = 1. - else: - c = np.sqrt((min(l, r) / max(l, r)) * (min(t, b) / max(t, b))) - - # 设定置信度 - if c == 1.: - ohm[ih, iw, 0:1] = c - elif ohm[ih, iw, 0:1] > 0: - ohm[ih, iw, 0:1] = max(ohm[ih, iw, 0:1], c) - min(ohm[ih, iw, 0:1], c) - else: - ohm[ih, iw, 0:1] = c - - # 设定分类 - if n_class > 0: - ohm[ih, iw, 5+classes[box_id]] = c - - # yx位置,同样设定 - if c > 0.5: - ohm[ih, iw, 1:5] = [t, l, b, r] - return ohm - - -def tr_tlbr_heatmap_to_bboxes(ohm, im_hw, thresh=0.5): - ''' - 转换tlbr热图(FCOS热图方式)到包围框 - :param ohm: tlbr热图块输入,要求格式为 [H,W,1+4] - :param im_hw: 原图像大小 - :param thresh: 阈值 - :return: - ''' - assert isinstance(ohm, np.ndarray) and ohm.ndim == 3 and ohm.shape[2] >= 5 - assert len(im_hw) == 2 - - has_class = ohm.shape[2] > 5 - n_class = ohm.shape[2] - 5 - - hm_hw = ohm.shape[:2] - hm_hwhw = np.asarray([*hm_hw, *hm_hw], np.float32) - im_hwhw = np.asarray([*im_hw, *im_hw], np.float32) - confs, tlbr, classes = np.split(ohm, [1, 5], 2) - grid = np.transpose(np.mgrid[:hm_hw[0], :hm_hw[1]], [1, 2, 0]) - bs = confs > thresh - - s_confs = confs[bs] - if has_class: - s_classes = classes[bs[..., 0]] - - s_tlbr = tlbr[bs[..., 0]] - s_grid = grid[bs[..., 0]] + 0.5 - s_tlbr[:, :2] = s_grid - s_tlbr[:, :2] - s_tlbr[:, 2:] = s_grid + s_tlbr[:, 2:] - bboxes = s_tlbr / hm_hwhw * im_hwhw - s_confs = np.asarray(s_confs, np.float32).reshape([-1, 1]) - bboxes = np.asarray(bboxes, np.float32).reshape([-1, 4]) - - if has_class: - s_classes = np.asarray(s_classes, np.float32).reshape([-1, n_class]) - return s_confs, bboxes, s_classes - else: - return s_confs, bboxes - - -def get_cls_contours_from_cls_hm(cls_hm, probs=0.5): - ''' - 从类别热图中获得多个类别轮廓 - :param cls_hm: - :param probs: - :return: - ''' - assert cls_hm.ndim == 3 - assert cls_hm.shape[2] >= 1 - - C = cls_hm.shape[-1] - - if not isinstance(probs, Iterable): - probs = [probs] * C - - cls = [] - cs = [] - - for c in range(C): - bin = np.uint8(cls_hm[..., c] > probs[c]) - conts = contour_tool.find_contours(bin, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE, keep_invalid_contour=False) - n = len(conts) - cls.extend([c]*n) - cs.extend(conts) - - return cls, cs diff --git a/spaces/typesdigital/twitter-pro/app.py b/spaces/typesdigital/twitter-pro/app.py deleted file mode 100644 index ac73118e8170ce7dfee4a16dc361f9b94beacf09..0000000000000000000000000000000000000000 --- a/spaces/typesdigital/twitter-pro/app.py +++ /dev/null @@ -1,20 +0,0 @@ -import gradio -import openai - -openai.api_key = "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" - -messages = [{"role": "system", "content": "You are a Twitter Tweets experts that specializes in creating viral tweets for start up marketing and update"}] - -def CustomChatGPT(user_input): - messages.append({"role": "user", "content": user_input}) - response = openai.ChatCompletion.create( - model = "gpt-3.5-turbo", - messages = messages - ) - ChatGPT_reply = response["choices"][0]["message"]["content"] - messages.append({"role": "assistant", "content": ChatGPT_reply}) - return ChatGPT_reply - -demo = gradio.Interface(fn=CustomChatGPT, inputs = "text", outputs = "text", title = "Twitter Tweets Pro") - -demo.launch(share=True) \ No newline at end of file diff --git a/spaces/unity/ML-Agents-SoccerTwos/TemplateData/style.css b/spaces/unity/ML-Agents-SoccerTwos/TemplateData/style.css deleted file mode 100644 index cdc3477fb8c1c824db96f451631bca7cde305923..0000000000000000000000000000000000000000 --- a/spaces/unity/ML-Agents-SoccerTwos/TemplateData/style.css +++ /dev/null @@ -1,105 +0,0 @@ -html { - box-sizing: border-box; -} -*, *:before, *:after { - box-sizing: inherit; -} -html, body { - height: 100%; -} -canvas { - display: block; -} -body { - margin: 0; -} -#unity-container { - width: 100%; - height: 100%; -} -#unity-canvas { - width: 100%; - height: 100%; - background: #231F20; -} -#loading-cover { - position: absolute; - top: 0; - left: 0; - width: 100%; - height: 100%; - display: flex; - justify-content: center; - align-items: center; -} -#unity-loading-bar { - flex: 1 1 auto; - display: flex; - flex-direction: column; - justify-content: center; - align-items: center; -} -#unity-logo { - text-align: center; -} -#unity-logo img { - max-width: 80%; -} -#unity-progress-bar-empty { - width: 80%; - height: 24px; - margin: 10px 20px 20px 10px; - text-align: left; - border: 1px solid white; - padding: 2px; -} -#unity-progress-bar-full { - width: 0%; - height: 100%; - background: #ffd21e; -} -.light #unity-progress-bar-empty { - border-color: black; -} -.light #unity-progress-bar-full { - background: black; -} - -#unity-fullscreen-button { - position: absolute; - right: 10px; - bottom: 10px; - width: 38px; - height: 38px; - background: url('fullscreen-button.png') no-repeat center; - background-size: contain; -} - -.spinner, -.spinner:after { - border-radius: 50%; - width: 5em; - height: 5em; -} -.spinner { - margin: 10px; - font-size: 10px; - position: relative; - text-indent: -9999em; - border-top: 1.1em solid rgba(255, 255, 255, 0.2); - border-right: 1.1em solid rgba(255, 255, 255, 0.2); - border-bottom: 1.1em solid rgba(255, 255, 255, 0.2); - border-left: 1.1em solid #ffffff; - transform: translateZ(0); - animation: spinner-spin 1.1s infinite linear; -} -@keyframes spinner-spin { - 0% { - transform: rotate(0deg); - } - 100% { - transform: rotate(360deg); - } -} - - diff --git a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/reference/yolo/utils/callbacks/tensorboard.md b/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/reference/yolo/utils/callbacks/tensorboard.md deleted file mode 100644 index b5eea1be36312e2ea52128256916a528ceb62b33..0000000000000000000000000000000000000000 --- a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/reference/yolo/utils/callbacks/tensorboard.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -description: Learn how to monitor the training process with Tensorboard using Ultralytics YOLO's "_log_scalars" and "on_batch_end" methods. -keywords: TensorBoard callbacks, YOLO training, ultralytics YOLO ---- - -## _log_scalars ---- -### ::: ultralytics.yolo.utils.callbacks.tensorboard._log_scalars -

    - -## on_pretrain_routine_start ---- -### ::: ultralytics.yolo.utils.callbacks.tensorboard.on_pretrain_routine_start -

    - -## on_batch_end ---- -### ::: ultralytics.yolo.utils.callbacks.tensorboard.on_batch_end -

    - -## on_fit_epoch_end ---- -### ::: ultralytics.yolo.utils.callbacks.tensorboard.on_fit_epoch_end -

    diff --git a/spaces/vishnu0001/text2mesh/shap_e/rendering/blender/view_data.py b/spaces/vishnu0001/text2mesh/shap_e/rendering/blender/view_data.py deleted file mode 100644 index 2f726ec40f8b5fbf713af145282c9d7475dfb2ea..0000000000000000000000000000000000000000 --- a/spaces/vishnu0001/text2mesh/shap_e/rendering/blender/view_data.py +++ /dev/null @@ -1,84 +0,0 @@ -import itertools -import json -import zipfile -from typing import BinaryIO, List, Tuple - -import numpy as np -from PIL import Image - -from shap_e.rendering.view_data import Camera, ProjectiveCamera, ViewData - - -class BlenderViewData(ViewData): - """ - Interact with a dataset zipfile exported by view_data.py. - """ - - def __init__(self, f_obj: BinaryIO): - self.zipfile = zipfile.ZipFile(f_obj, mode="r") - self.infos = [] - with self.zipfile.open("info.json", "r") as f: - self.info = json.load(f) - self.channels = list(self.info.get("channels", "RGBAD")) - assert set("RGBA").issubset( - set(self.channels) - ), "The blender output should at least have RGBA images." - names = set(x.filename for x in self.zipfile.infolist()) - for i in itertools.count(): - name = f"{i:05}.json" - if name not in names: - break - with self.zipfile.open(name, "r") as f: - self.infos.append(json.load(f)) - - @property - def num_views(self) -> int: - return len(self.infos) - - @property - def channel_names(self) -> List[str]: - return list(self.channels) - - def load_view(self, index: int, channels: List[str]) -> Tuple[Camera, np.ndarray]: - for ch in channels: - if ch not in self.channel_names: - raise ValueError(f"unsupported channel: {ch}") - - # Gather (a superset of) the requested channels. - channel_map = {} - if any(x in channels for x in "RGBA"): - with self.zipfile.open(f"{index:05}.png", "r") as f: - rgba = np.array(Image.open(f)).astype(np.float32) / 255.0 - channel_map.update(zip("RGBA", rgba.transpose([2, 0, 1]))) - if "D" in channels: - with self.zipfile.open(f"{index:05}_depth.png", "r") as f: - # Decode a 16-bit fixed-point number. - fp = np.array(Image.open(f)) - inf_dist = fp == 0xFFFF - channel_map["D"] = np.where( - inf_dist, - np.inf, - self.infos[index]["max_depth"] * (fp.astype(np.float32) / 65536), - ) - if "MatAlpha" in channels: - with self.zipfile.open(f"{index:05}_MatAlpha.png", "r") as f: - channel_map["MatAlpha"] = np.array(Image.open(f)).astype(np.float32) / 65536 - - # The order of channels is user-specified. - combined = np.stack([channel_map[k] for k in channels], axis=-1) - - h, w, _ = combined.shape - return self.camera(index, w, h), combined - - def camera(self, index: int, width: int, height: int) -> ProjectiveCamera: - info = self.infos[index] - return ProjectiveCamera( - origin=np.array(info["origin"], dtype=np.float32), - x=np.array(info["x"], dtype=np.float32), - y=np.array(info["y"], dtype=np.float32), - z=np.array(info["z"], dtype=np.float32), - width=width, - height=height, - x_fov=info["x_fov"], - y_fov=info["y_fov"], - ) diff --git a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/engine/test.py b/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/engine/test.py deleted file mode 100644 index 8dbeef271db634ec2dadfda3bc0b5ef9c7a677ff..0000000000000000000000000000000000000000 --- a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/engine/test.py +++ /dev/null @@ -1,202 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os.path as osp -import pickle -import shutil -import tempfile -import time - -import torch -import torch.distributed as dist - -import annotator.uniformer.mmcv as mmcv -from annotator.uniformer.mmcv.runner import get_dist_info - - -def single_gpu_test(model, data_loader): - """Test model with a single gpu. - - This method tests model with a single gpu and displays test progress bar. - - Args: - model (nn.Module): Model to be tested. - data_loader (nn.Dataloader): Pytorch data loader. - - Returns: - list: The prediction results. - """ - model.eval() - results = [] - dataset = data_loader.dataset - prog_bar = mmcv.ProgressBar(len(dataset)) - for data in data_loader: - with torch.no_grad(): - result = model(return_loss=False, **data) - results.extend(result) - - # Assume result has the same length of batch_size - # refer to https://github.com/open-mmlab/mmcv/issues/985 - batch_size = len(result) - for _ in range(batch_size): - prog_bar.update() - return results - - -def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False): - """Test model with multiple gpus. - - This method tests model with multiple gpus and collects the results - under two different modes: gpu and cpu modes. By setting - ``gpu_collect=True``, it encodes results to gpu tensors and use gpu - communication for results collection. On cpu mode it saves the results on - different gpus to ``tmpdir`` and collects them by the rank 0 worker. - - Args: - model (nn.Module): Model to be tested. - data_loader (nn.Dataloader): Pytorch data loader. - tmpdir (str): Path of directory to save the temporary results from - different gpus under cpu mode. - gpu_collect (bool): Option to use either gpu or cpu to collect results. - - Returns: - list: The prediction results. - """ - model.eval() - results = [] - dataset = data_loader.dataset - rank, world_size = get_dist_info() - if rank == 0: - prog_bar = mmcv.ProgressBar(len(dataset)) - time.sleep(2) # This line can prevent deadlock problem in some cases. - for i, data in enumerate(data_loader): - with torch.no_grad(): - result = model(return_loss=False, **data) - results.extend(result) - - if rank == 0: - batch_size = len(result) - batch_size_all = batch_size * world_size - if batch_size_all + prog_bar.completed > len(dataset): - batch_size_all = len(dataset) - prog_bar.completed - for _ in range(batch_size_all): - prog_bar.update() - - # collect results from all ranks - if gpu_collect: - results = collect_results_gpu(results, len(dataset)) - else: - results = collect_results_cpu(results, len(dataset), tmpdir) - return results - - -def collect_results_cpu(result_part, size, tmpdir=None): - """Collect results under cpu mode. - - On cpu mode, this function will save the results on different gpus to - ``tmpdir`` and collect them by the rank 0 worker. - - Args: - result_part (list): Result list containing result parts - to be collected. - size (int): Size of the results, commonly equal to length of - the results. - tmpdir (str | None): temporal directory for collected results to - store. If set to None, it will create a random temporal directory - for it. - - Returns: - list: The collected results. - """ - rank, world_size = get_dist_info() - # create a tmp dir if it is not specified - if tmpdir is None: - MAX_LEN = 512 - # 32 is whitespace - dir_tensor = torch.full((MAX_LEN, ), - 32, - dtype=torch.uint8, - device='cuda') - if rank == 0: - mmcv.mkdir_or_exist('.dist_test') - tmpdir = tempfile.mkdtemp(dir='.dist_test') - tmpdir = torch.tensor( - bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda') - dir_tensor[:len(tmpdir)] = tmpdir - dist.broadcast(dir_tensor, 0) - tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip() - else: - mmcv.mkdir_or_exist(tmpdir) - # dump the part result to the dir - mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl')) - dist.barrier() - # collect all parts - if rank != 0: - return None - else: - # load results of all parts from tmp dir - part_list = [] - for i in range(world_size): - part_file = osp.join(tmpdir, f'part_{i}.pkl') - part_result = mmcv.load(part_file) - # When data is severely insufficient, an empty part_result - # on a certain gpu could makes the overall outputs empty. - if part_result: - part_list.append(part_result) - # sort the results - ordered_results = [] - for res in zip(*part_list): - ordered_results.extend(list(res)) - # the dataloader may pad some samples - ordered_results = ordered_results[:size] - # remove tmp dir - shutil.rmtree(tmpdir) - return ordered_results - - -def collect_results_gpu(result_part, size): - """Collect results under gpu mode. - - On gpu mode, this function will encode results to gpu tensors and use gpu - communication for results collection. - - Args: - result_part (list): Result list containing result parts - to be collected. - size (int): Size of the results, commonly equal to length of - the results. - - Returns: - list: The collected results. - """ - rank, world_size = get_dist_info() - # dump result part to tensor with pickle - part_tensor = torch.tensor( - bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda') - # gather all result part tensor shape - shape_tensor = torch.tensor(part_tensor.shape, device='cuda') - shape_list = [shape_tensor.clone() for _ in range(world_size)] - dist.all_gather(shape_list, shape_tensor) - # padding result part tensor to max length - shape_max = torch.tensor(shape_list).max() - part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda') - part_send[:shape_tensor[0]] = part_tensor - part_recv_list = [ - part_tensor.new_zeros(shape_max) for _ in range(world_size) - ] - # gather all result part - dist.all_gather(part_recv_list, part_send) - - if rank == 0: - part_list = [] - for recv, shape in zip(part_recv_list, shape_list): - part_result = pickle.loads(recv[:shape[0]].cpu().numpy().tobytes()) - # When data is severely insufficient, an empty part_result - # on a certain gpu could makes the overall outputs empty. - if part_result: - part_list.append(part_result) - # sort the results - ordered_results = [] - for res in zip(*part_list): - ordered_results.extend(list(res)) - # the dataloader may pad some samples - ordered_results = ordered_results[:size] - return ordered_results diff --git a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/runner/log_buffer.py b/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/runner/log_buffer.py deleted file mode 100644 index d949e2941c5400088c7cd8a1dc893d8b233ae785..0000000000000000000000000000000000000000 --- a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/runner/log_buffer.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from collections import OrderedDict - -import numpy as np - - -class LogBuffer: - - def __init__(self): - self.val_history = OrderedDict() - self.n_history = OrderedDict() - self.output = OrderedDict() - self.ready = False - - def clear(self): - self.val_history.clear() - self.n_history.clear() - self.clear_output() - - def clear_output(self): - self.output.clear() - self.ready = False - - def update(self, vars, count=1): - assert isinstance(vars, dict) - for key, var in vars.items(): - if key not in self.val_history: - self.val_history[key] = [] - self.n_history[key] = [] - self.val_history[key].append(var) - self.n_history[key].append(count) - - def average(self, n=0): - """Average latest n values or all values.""" - assert n >= 0 - for key in self.val_history: - values = np.array(self.val_history[key][-n:]) - nums = np.array(self.n_history[key][-n:]) - avg = np.sum(values * nums) / np.sum(nums) - self.output[key] = avg - self.ready = True diff --git a/spaces/wffcyrus/MetaGPT-v1/tests/metagpt/utils/test_output_parser.py b/spaces/wffcyrus/MetaGPT-v1/tests/metagpt/utils/test_output_parser.py deleted file mode 100644 index c56cff6fafc279b40a6af6a4891737e9747d5530..0000000000000000000000000000000000000000 --- a/spaces/wffcyrus/MetaGPT-v1/tests/metagpt/utils/test_output_parser.py +++ /dev/null @@ -1,238 +0,0 @@ -#!/usr/bin/env python -# coding: utf-8 -""" -@Time : 2023/7/11 10:25 -@Author : chengmaoyu -@File : test_output_parser.py -""" -from typing import List, Tuple - -import pytest - -from metagpt.utils.common import OutputParser - - -def test_parse_blocks(): - test_text = "##block1\nThis is block 1.\n##block2\nThis is block 2." - expected_result = {'block1': 'This is block 1.', 'block2': 'This is block 2.'} - assert OutputParser.parse_blocks(test_text) == expected_result - - -def test_parse_code(): - test_text = "```python\nprint('Hello, world!')```" - expected_result = "print('Hello, world!')" - assert OutputParser.parse_code(test_text, 'python') == expected_result - - with pytest.raises(Exception): - OutputParser.parse_code(test_text, 'java') - - -def test_parse_python_code(): - expected_result = "print('Hello, world!')" - assert OutputParser.parse_python_code("```python\nprint('Hello, world!')```") == expected_result - assert OutputParser.parse_python_code("```python\nprint('Hello, world!')") == expected_result - assert OutputParser.parse_python_code("print('Hello, world!')") == expected_result - assert OutputParser.parse_python_code("print('Hello, world!')```") == expected_result - assert OutputParser.parse_python_code("print('Hello, world!')```") == expected_result - expected_result = "print('```Hello, world!```')" - assert OutputParser.parse_python_code("```python\nprint('```Hello, world!```')```") == expected_result - assert OutputParser.parse_python_code("The code is: ```python\nprint('```Hello, world!```')```") == expected_result - assert OutputParser.parse_python_code("xxx.\n```python\nprint('```Hello, world!```')```\nxxx") == expected_result - - with pytest.raises(ValueError): - OutputParser.parse_python_code("xxx =") - - -def test_parse_str(): - test_text = "name = 'Alice'" - expected_result = 'Alice' - assert OutputParser.parse_str(test_text) == expected_result - - -def test_parse_file_list(): - test_text = "files=['file1', 'file2', 'file3']" - expected_result = ['file1', 'file2', 'file3'] - assert OutputParser.parse_file_list(test_text) == expected_result - - with pytest.raises(Exception): - OutputParser.parse_file_list("wrong_input") - - -def test_parse_data(): - test_data = "##block1\n```python\nprint('Hello, world!')\n```\n##block2\nfiles=['file1', 'file2', 'file3']" - expected_result = {'block1': "print('Hello, world!')", 'block2': ['file1', 'file2', 'file3']} - assert OutputParser.parse_data(test_data) == expected_result - - -if __name__ == '__main__': - t_text = ''' -## Required Python third-party packages -```python -""" -flask==1.1.2 -pygame==2.0.1 -""" -``` - -## Required Other language third-party packages -```python -""" -No third-party packages required for other languages. -""" -``` - -## Full API spec -```python -""" -openapi: 3.0.0 -info: - title: Web Snake Game API - version: 1.0.0 -paths: - /game: - get: - summary: Get the current game state - responses: - '200': - description: A JSON object of the game state - post: - summary: Send a command to the game - requestBody: - required: true - content: - application/json: - schema: - type: object - properties: - command: - type: string - responses: - '200': - description: A JSON object of the updated game state -""" -``` - -## Logic Analysis -```python -[ - ("app.py", "Main entry point for the Flask application. Handles HTTP requests and responses."), - ("game.py", "Contains the Game and Snake classes. Handles the game logic."), - ("static/js/script.js", "Handles user interactions and updates the game UI."), - ("static/css/styles.css", "Defines the styles for the game UI."), - ("templates/index.html", "The main page of the web application. Displays the game UI.") -] -``` - -## Task list -```python -[ - "game.py", - "app.py", - "static/css/styles.css", - "static/js/script.js", - "templates/index.html" -] -``` - -## Shared Knowledge -```python -""" -'game.py' contains the Game and Snake classes which are responsible for the game logic. The Game class uses an instance of the Snake class. - -'app.py' is the main entry point for the Flask application. It creates an instance of the Game class and handles HTTP requests and responses. - -'static/js/script.js' is responsible for handling user interactions and updating the game UI based on the game state returned by 'app.py'. - -'static/css/styles.css' defines the styles for the game UI. - -'templates/index.html' is the main page of the web application. It displays the game UI and loads 'static/js/script.js' and 'static/css/styles.css'. -""" -``` - -## Anything UNCLEAR -We need clarification on how the high score should be stored. Should it persist across sessions (stored in a database or a file) or should it reset every time the game is restarted? Also, should the game speed increase as the snake grows, or should it remain constant throughout the game? - ''' - - OUTPUT_MAPPING = { - "Original Requirements": (str, ...), - "Product Goals": (List[str], ...), - "User Stories": (List[str], ...), - "Competitive Analysis": (List[str], ...), - "Competitive Quadrant Chart": (str, ...), - "Requirement Analysis": (str, ...), - "Requirement Pool": (List[Tuple[str, str]], ...), - "Anything UNCLEAR": (str, ...), - } - t_text1 = '''## Original Requirements: - -The boss wants to create a web-based version of the game "Fly Bird". - -## Product Goals: - -- Create a web-based version of the game "Fly Bird" that is engaging and addictive. -- Provide a seamless and intuitive user experience. -- Optimize the game for different devices and screen sizes. - -## User Stories: - -- As a user, I want to be able to control the bird's flight by clicking or tapping on the screen. -- As a user, I want to see my score and the highest score achieved in the game. -- As a user, I want the game to be challenging but not frustratingly difficult. -- As a user, I want to be able to pause and resume the game at any time. -- As a user, I want to be able to share my score on social media. - -## Competitive Analysis: - -- Flappy Bird: A popular mobile game where the player controls a bird's flight through a series of obstacles. -- Angry Birds: A physics-based puzzle game where the player launches birds to destroy structures and defeat pigs. -- Snake Game: A classic game where the player controls a snake to eat food and grow longer without hitting the walls or its own body. -- Temple Run: An endless running game where the player controls a character to avoid obstacles and collect coins. -- Subway Surfers: An endless running game where the player controls a character to avoid obstacles and collect coins while being chased by a guard. -- Doodle Jump: A vertical platform game where the player controls a character to jump on platforms and avoid falling. -- Fruit Ninja: A fruit-slicing game where the player uses their finger to slice flying fruits. - -## Competitive Quadrant Chart: - -```mermaid -quadrantChart - title Reach and engagement of games - x-axis Low Reach --> High Reach - y-axis Low Engagement --> High Engagement - quadrant-1 We should expand - quadrant-2 Need to promote - quadrant-3 Re-evaluate - quadrant-4 May be improved - "Flappy Bird": [0.8, 0.9] - "Angry Birds": [0.9, 0.8] - "Snake Game": [0.6, 0.6] - "Temple Run": [0.9, 0.7] - "Subway Surfers": [0.9, 0.7] - "Doodle Jump": [0.7, 0.5] - "Fruit Ninja": [0.8, 0.6] - "Our Target Product": [0.7, 0.8] -``` - -## Requirement Analysis: - -The product should be a web-based version of the game "Fly Bird" that is engaging, addictive, and optimized for different devices and screen sizes. It should provide a seamless and intuitive user experience, with controls that allow the user to control the bird's flight by clicking or tapping on the screen. The game should display the user's score and the highest score achieved. It should be challenging but not frustratingly difficult, allowing the user to pause and resume the game at any time. The user should also have the option to share their score on social media. - -## Requirement Pool: - -```python -[ - ("Implement bird's flight control using click or tap", "P0"), - ("Display user's score and highest score achieved", "P0"), - ("Implement challenging but not frustrating difficulty level", "P1"), - ("Allow user to pause and resume the game", "P1"), - ("Implement social media sharing feature", "P2") -] -``` - -## Anything UNCLEAR: - -There are no unclear points. - ''' - d = OutputParser.parse_data_with_mapping(t_text1, OUTPUT_MAPPING) - import json - - print(json.dumps(d)) diff --git a/spaces/whgwd2023/bingo/cloudflare/worker.js b/spaces/whgwd2023/bingo/cloudflare/worker.js deleted file mode 100644 index e0debd750615f1329b2c72fbce73e1b9291f7137..0000000000000000000000000000000000000000 --- a/spaces/whgwd2023/bingo/cloudflare/worker.js +++ /dev/null @@ -1,18 +0,0 @@ -const TRAGET_HOST='hf4all-bingo.hf.space' // 请将此域名改成你自己的,域名信息在设置》站点域名查看。 - -export default { - async fetch(request) { - const uri = new URL(request.url); - if (uri.protocol === 'http:') { - uri.protocol = 'https:'; - return new Response('', { - status: 301, - headers: { - location: uri.toString(), - }, - }) - } - uri.host = TRAGET_HOST - return fetch(new Request(uri.toString(), request)); - }, -}; diff --git a/spaces/williamberman/stable-diffusion-xl-inpainting/share_btn.py b/spaces/williamberman/stable-diffusion-xl-inpainting/share_btn.py deleted file mode 100644 index b9fb838408c2446d15e95c8abbf0d46a79da1601..0000000000000000000000000000000000000000 --- a/spaces/williamberman/stable-diffusion-xl-inpainting/share_btn.py +++ /dev/null @@ -1,94 +0,0 @@ -community_icon_html = """ - - -""" - -loading_icon_html = """""" - -share_js = """async () => { - async function uploadFile(file){ - const UPLOAD_URL = 'https://huggingface.co/uploads'; - const response = await fetch(UPLOAD_URL, { - method: 'POST', - headers: { - 'Content-Type': file.type, - 'X-Requested-With': 'XMLHttpRequest', - }, - body: file, /// <- File inherits from Blob - }); - const url = await response.text(); - return url; - } - - async function getInputImgFile(imgCanvas){ - const blob = await new Promise(resolve => imgCanvas.toBlob(resolve)); - const imgId = Date.now() % 200; - const fileName = `sd-inpainting-${{imgId}}.png`; - return new File([blob], fileName, { type: 'image/png' }); - } - - async function getOutoutImgFile(imgEl){ - const res = await fetch(imgEl.src); - const blob = await res.blob(); - const imgId = Date.now() % 200; - const fileName = `sd-inpainting-${{imgId}}.png`; - return new File([blob], fileName, { type: 'image/png' }); - } - - const gradioEl = document.querySelector('body > gradio-app'); - // const gradioEl = document.querySelector("gradio-app").shadowRoot; - const inputImgCanvas = gradioEl.querySelector('canvas[key="drawing"]'); - const outputImgEl = gradioEl.querySelector('#output-img img'); - const promptTxt = gradioEl.querySelector('#prompt textarea').value; - let titleTxt = promptTxt; - if(titleTxt.length > 100){ - titleTxt = titleTxt.slice(0, 100) + ' ...'; - } - const shareBtnEl = gradioEl.querySelector('#share-btn'); - const shareIconEl = gradioEl.querySelector('#share-btn-share-icon'); - const loadingIconEl = gradioEl.querySelector('#share-btn-loading-icon'); - - if(!outputImgEl){ - return; - }; - - shareBtnEl.style.pointerEvents = 'none'; - shareIconEl.style.display = 'none'; - loadingIconEl.style.removeProperty('display'); - - const inputImgFile = await getInputImgFile(inputImgCanvas); - const outputImgFile = await getOutoutImgFile(outputImgEl); - const files = [inputImgFile, outputImgFile]; - - const urls = await Promise.all(files.map((f) => uploadFile(f))); - - const htmlImgs = urls.map(url => ``); - const [inputImgUrl, outputImgUrl] = htmlImgs; - - const descriptionMd = `
    -
    -${inputImgUrl} - -${promptTxt} -
    -
    -${outputImgUrl} -
    -
    `; - - const params = new URLSearchParams({ - title: titleTxt, - description: descriptionMd, - }); - - const paramsStr = params.toString(); - - window.open(`https://huggingface.co/spaces/diffusers/stable-diffusion-xl-inpainting/discussions/new?${paramsStr}&preview=true`, '_blank'); - - shareBtnEl.style.removeProperty('pointer-events'); - shareIconEl.style.removeProperty('display'); - loadingIconEl.style.display = 'none'; -}""" \ No newline at end of file diff --git a/spaces/wykonos/movie-recommender/app.py b/spaces/wykonos/movie-recommender/app.py deleted file mode 100644 index 0299e655d88f456e58077966e9356fb52345edb0..0000000000000000000000000000000000000000 --- a/spaces/wykonos/movie-recommender/app.py +++ /dev/null @@ -1,57 +0,0 @@ -import _pickle as cPickle -import bz2 - -import gradio as gr -import pandas as pd - - -def decompress_model(file): - data = bz2.BZ2File(file, 'rb') - data = cPickle.load(data) - return data - - -movies_list = decompress_model("./movies/movies_model.pbz2") -movies_similarity = decompress_model("./movies/movies_similarity.pbz2") -best_movies = pd.read_csv("./movies/best_movies.csv") -movie_data = pd.read_csv("./movies/movie_data.csv") - - -def recommend(movie_title): - movie_index = movies_list[movies_list["title"] == movie_title].index[0] - distances = movies_similarity[movie_index] - sorted_movie_list = sorted(list(enumerate(distances)), reverse=True, key=lambda x: x[1])[1:120] - recommended_movies, recommended_posters = [], [] - unique_movies = set() - for i in sorted_movie_list: - poster_path = movies_list["poster_path"][i[0]] - recommended_movie = movies_list.iloc[i[0]].title - if recommended_movie not in unique_movies: - unique_movies.add(recommended_movie) - recommended_movies.append(recommended_movie) - recommended_posters.append("https://image.tmdb.org/t/p/original" + poster_path) - return recommended_movies, recommended_posters - - -def get_movie_details(title): - movie_details = movie_data[movie_data["title"] == title] - return movie_details.to_dict(orient="records") - - -def get_recommendation(movie): - recommendation, movie_posters = recommend(movie) - movie_details = [get_movie_details(movie) for movie in recommendation] - return recommendation, movie_posters, movie_details - - -iface = gr.Interface( - fn=get_recommendation, - inputs="text", - outputs="json", - title="Movie Recommender", - description="Enter a movie title to get recommendations.", - examples=[["The Dark Knight"]], - allow_flagging=False -) - -iface.launch() diff --git a/spaces/xnetba/Chat_advance/ChuanhuChatbot.py b/spaces/xnetba/Chat_advance/ChuanhuChatbot.py deleted file mode 100644 index 890e5c7ec70f26a0452ded3e33cd56f488819932..0000000000000000000000000000000000000000 --- a/spaces/xnetba/Chat_advance/ChuanhuChatbot.py +++ /dev/null @@ -1,473 +0,0 @@ -# -*- coding:utf-8 -*- -import os -import logging -import sys - -import gradio as gr - -from modules import config -from modules.config import * -from modules.utils import * -from modules.presets import * -from modules.overwrites import * -from modules.models.models import get_model - -logging.getLogger("httpx").setLevel(logging.WARNING) - -gr.Chatbot._postprocess_chat_messages = postprocess_chat_messages -gr.Chatbot.postprocess = postprocess - -with open("assets/custom.css", "r", encoding="utf-8") as f: - customCSS = f.read() - -def create_new_model(): - return get_model(model_name = MODELS[DEFAULT_MODEL], access_key = my_api_key)[0] - -with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo: - user_name = gr.State("") - promptTemplates = gr.State(load_template(get_template_names(plain=True)[0], mode=2)) - user_question = gr.State("") - assert type(my_api_key)==str - user_api_key = gr.State(my_api_key) - current_model = gr.State(create_new_model) - - topic = gr.State(i18n("未命名对话历史记录")) - - with gr.Row(): - gr.HTML(CHUANHU_TITLE, elem_id="app_title") - status_display = gr.Markdown(get_geoip(), elem_id="status_display") - with gr.Row(elem_id="float_display"): - user_info = gr.Markdown(value="getting user info...", elem_id="user_info") - - with gr.Row().style(equal_height=True): - with gr.Column(scale=5): - with gr.Row(): - chatbot = gr.Chatbot(label="Chuanhu Chat", elem_id="chuanhu_chatbot").style(height="100%") - with gr.Row(): - with gr.Column(min_width=225, scale=12): - user_input = gr.Textbox( - elem_id="user_input_tb", - show_label=False, placeholder=i18n("在这里输入") - ).style(container=False) - with gr.Column(min_width=42, scale=1): - submitBtn = gr.Button(value="", variant="primary", elem_id="submit_btn") - cancelBtn = gr.Button(value="", variant="secondary", visible=False, elem_id="cancel_btn") - with gr.Row(): - emptyBtn = gr.Button( - i18n("🧹 新的对话"), elem_id="empty_btn" - ) - retryBtn = gr.Button(i18n("🔄 重新生成")) - delFirstBtn = gr.Button(i18n("🗑️ 删除最旧对话")) - delLastBtn = gr.Button(i18n("🗑️ 删除最新对话")) - with gr.Row(visible=False) as like_dislike_area: - with gr.Column(min_width=20, scale=1): - likeBtn = gr.Button(i18n("👍")) - with gr.Column(min_width=20, scale=1): - dislikeBtn = gr.Button(i18n("👎")) - - with gr.Column(): - with gr.Column(min_width=50, scale=1): - with gr.Tab(label=i18n("模型")): - keyTxt = gr.Textbox( - show_label=True, - placeholder=f"Your API-key...", - value=hide_middle_chars(user_api_key.value), - type="password", - visible=not HIDE_MY_KEY, - label="API-Key", - ) - if multi_api_key: - usageTxt = gr.Markdown(i18n("多账号模式已开启,无需输入key,可直接开始对话"), elem_id="usage_display", elem_classes="insert_block") - else: - usageTxt = gr.Markdown(i18n("**发送消息** 或 **提交key** 以显示额度"), elem_id="usage_display", elem_classes="insert_block") - model_select_dropdown = gr.Dropdown( - label=i18n("选择模型"), choices=MODELS, multiselect=False, value=MODELS[DEFAULT_MODEL], interactive=True - ) - lora_select_dropdown = gr.Dropdown( - label=i18n("选择LoRA模型"), choices=[], multiselect=False, interactive=True, visible=False - ) - with gr.Row(): - single_turn_checkbox = gr.Checkbox(label=i18n("单轮对话"), value=False) - use_websearch_checkbox = gr.Checkbox(label=i18n("使用在线搜索"), value=False) - language_select_dropdown = gr.Dropdown( - label=i18n("选择回复语言(针对搜索&索引功能)"), - choices=REPLY_LANGUAGES, - multiselect=False, - value=REPLY_LANGUAGES[0], - ) - index_files = gr.Files(label=i18n("上传"), type="file") - two_column = gr.Checkbox(label=i18n("双栏pdf"), value=advance_docs["pdf"].get("two_column", False)) - summarize_btn = gr.Button(i18n("总结")) - # TODO: 公式ocr - # formula_ocr = gr.Checkbox(label=i18n("识别公式"), value=advance_docs["pdf"].get("formula_ocr", False)) - - with gr.Tab(label="Prompt"): - systemPromptTxt = gr.Textbox( - show_label=True, - placeholder=i18n("在这里输入System Prompt..."), - label="System prompt", - value=INITIAL_SYSTEM_PROMPT, - lines=10, - ).style(container=False) - with gr.Accordion(label=i18n("加载Prompt模板"), open=True): - with gr.Column(): - with gr.Row(): - with gr.Column(scale=6): - templateFileSelectDropdown = gr.Dropdown( - label=i18n("选择Prompt模板集合文件"), - choices=get_template_names(plain=True), - multiselect=False, - value=get_template_names(plain=True)[0], - ).style(container=False) - with gr.Column(scale=1): - templateRefreshBtn = gr.Button(i18n("🔄 刷新")) - with gr.Row(): - with gr.Column(): - templateSelectDropdown = gr.Dropdown( - label=i18n("从Prompt模板中加载"), - choices=load_template( - get_template_names(plain=True)[0], mode=1 - ), - multiselect=False, - ).style(container=False) - - with gr.Tab(label=i18n("保存/加载")): - with gr.Accordion(label=i18n("保存/加载对话历史记录"), open=True): - with gr.Column(): - with gr.Row(): - with gr.Column(scale=6): - historyFileSelectDropdown = gr.Dropdown( - label=i18n("从列表中加载对话"), - choices=get_history_names(plain=True), - multiselect=False - ) - with gr.Column(scale=1): - historyRefreshBtn = gr.Button(i18n("🔄 刷新")) - with gr.Row(): - with gr.Column(scale=6): - saveFileName = gr.Textbox( - show_label=True, - placeholder=i18n("设置文件名: 默认为.json,可选为.md"), - label=i18n("设置保存文件名"), - value=i18n("对话历史记录"), - ).style(container=True) - with gr.Column(scale=1): - saveHistoryBtn = gr.Button(i18n("💾 保存对话")) - exportMarkdownBtn = gr.Button(i18n("📝 导出为Markdown")) - gr.Markdown(i18n("默认保存于history文件夹")) - with gr.Row(): - with gr.Column(): - downloadFile = gr.File(interactive=True) - - with gr.Tab(label=i18n("高级")): - gr.Markdown(i18n("# ⚠️ 务必谨慎更改 ⚠️\n\n如果无法使用请恢复默认设置")) - gr.HTML(get_html("appearance_switcher.html").format(label=i18n("切换亮暗色主题")), elem_classes="insert_block") - use_streaming_checkbox = gr.Checkbox( - label=i18n("实时传输回答"), value=True, visible=ENABLE_STREAMING_OPTION - ) - with gr.Accordion(i18n("参数"), open=False): - temperature_slider = gr.Slider( - minimum=-0, - maximum=2.0, - value=1.0, - step=0.1, - interactive=True, - label="temperature", - ) - top_p_slider = gr.Slider( - minimum=-0, - maximum=1.0, - value=1.0, - step=0.05, - interactive=True, - label="top-p", - ) - n_choices_slider = gr.Slider( - minimum=1, - maximum=10, - value=1, - step=1, - interactive=True, - label="n choices", - ) - stop_sequence_txt = gr.Textbox( - show_label=True, - placeholder=i18n("在这里输入停止符,用英文逗号隔开..."), - label="stop", - value="", - lines=1, - ) - max_context_length_slider = gr.Slider( - minimum=1, - maximum=32768, - value=2000, - step=1, - interactive=True, - label="max context", - ) - max_generation_slider = gr.Slider( - minimum=1, - maximum=32768, - value=1000, - step=1, - interactive=True, - label="max generations", - ) - presence_penalty_slider = gr.Slider( - minimum=-2.0, - maximum=2.0, - value=0.0, - step=0.01, - interactive=True, - label="presence penalty", - ) - frequency_penalty_slider = gr.Slider( - minimum=-2.0, - maximum=2.0, - value=0.0, - step=0.01, - interactive=True, - label="frequency penalty", - ) - logit_bias_txt = gr.Textbox( - show_label=True, - placeholder=f"word:likelihood", - label="logit bias", - value="", - lines=1, - ) - user_identifier_txt = gr.Textbox( - show_label=True, - placeholder=i18n("用于定位滥用行为"), - label=i18n("用户名"), - value=user_name.value, - lines=1, - ) - - with gr.Accordion(i18n("网络设置"), open=False, visible=False): - # 优先展示自定义的api_host - apihostTxt = gr.Textbox( - show_label=True, - placeholder=i18n("在这里输入API-Host..."), - label="API-Host", - value=config.api_host or shared.API_HOST, - lines=1, - ) - changeAPIURLBtn = gr.Button(i18n("🔄 切换API地址")) - proxyTxt = gr.Textbox( - show_label=True, - placeholder=i18n("在这里输入代理地址..."), - label=i18n("代理地址(示例:http://127.0.0.1:10809)"), - value="", - lines=2, - ) - changeProxyBtn = gr.Button(i18n("🔄 设置代理地址")) - default_btn = gr.Button(i18n("🔙 恢复默认设置")) - - gr.Markdown(CHUANHU_DESCRIPTION, elem_id="description") - gr.HTML(get_html("footer.html").format(versions=versions_html()), elem_id="footer") - - # https://github.com/gradio-app/gradio/pull/3296 - def create_greeting(request: gr.Request): - if hasattr(request, "username") and request.username: # is not None or is not "" - logging.info(f"Get User Name: {request.username}") - user_info, user_name = gr.Markdown.update(value=f"User: {request.username}"), request.username - else: - user_info, user_name = gr.Markdown.update(value=f"", visible=False), "" - current_model = get_model(model_name = MODELS[DEFAULT_MODEL], access_key = my_api_key)[0] - current_model.set_user_identifier(user_name) - chatbot = gr.Chatbot.update(label=MODELS[DEFAULT_MODEL]) - return user_info, user_name, current_model, toggle_like_btn_visibility(DEFAULT_MODEL), *current_model.auto_load(), get_history_names(False, user_name), chatbot - demo.load(create_greeting, inputs=None, outputs=[user_info, user_name, current_model, like_dislike_area, systemPromptTxt, chatbot, historyFileSelectDropdown, chatbot], api_name="load") - chatgpt_predict_args = dict( - fn=predict, - inputs=[ - current_model, - user_question, - chatbot, - use_streaming_checkbox, - use_websearch_checkbox, - index_files, - language_select_dropdown, - ], - outputs=[chatbot, status_display], - show_progress=True, - ) - - start_outputing_args = dict( - fn=start_outputing, - inputs=[], - outputs=[submitBtn, cancelBtn], - show_progress=True, - ) - - end_outputing_args = dict( - fn=end_outputing, inputs=[], outputs=[submitBtn, cancelBtn] - ) - - reset_textbox_args = dict( - fn=reset_textbox, inputs=[], outputs=[user_input] - ) - - transfer_input_args = dict( - fn=transfer_input, inputs=[user_input], outputs=[user_question, user_input, submitBtn, cancelBtn], show_progress=True - ) - - get_usage_args = dict( - fn=billing_info, inputs=[current_model], outputs=[usageTxt], show_progress=False - ) - - load_history_from_file_args = dict( - fn=load_chat_history, - inputs=[current_model, historyFileSelectDropdown, user_name], - outputs=[saveFileName, systemPromptTxt, chatbot] - ) - - - # Chatbot - cancelBtn.click(interrupt, [current_model], []) - - user_input.submit(**transfer_input_args).then(**chatgpt_predict_args).then(**end_outputing_args) - user_input.submit(**get_usage_args) - - submitBtn.click(**transfer_input_args).then(**chatgpt_predict_args, api_name="predict").then(**end_outputing_args) - submitBtn.click(**get_usage_args) - - index_files.change(handle_file_upload, [current_model, index_files, chatbot, language_select_dropdown], [index_files, chatbot, status_display]) - summarize_btn.click(handle_summarize_index, [current_model, index_files, chatbot, language_select_dropdown], [chatbot, status_display]) - - emptyBtn.click( - reset, - inputs=[current_model], - outputs=[chatbot, status_display], - show_progress=True, - ) - - retryBtn.click(**start_outputing_args).then( - retry, - [ - current_model, - chatbot, - use_streaming_checkbox, - use_websearch_checkbox, - index_files, - language_select_dropdown, - ], - [chatbot, status_display], - show_progress=True, - ).then(**end_outputing_args) - retryBtn.click(**get_usage_args) - - delFirstBtn.click( - delete_first_conversation, - [current_model], - [status_display], - ) - - delLastBtn.click( - delete_last_conversation, - [current_model, chatbot], - [chatbot, status_display], - show_progress=False - ) - - likeBtn.click( - like, - [current_model], - [status_display], - show_progress=False - ) - - dislikeBtn.click( - dislike, - [current_model], - [status_display], - show_progress=False - ) - - two_column.change(update_doc_config, [two_column], None) - - # LLM Models - keyTxt.change(set_key, [current_model, keyTxt], [user_api_key, status_display], api_name="set_key").then(**get_usage_args) - keyTxt.submit(**get_usage_args) - single_turn_checkbox.change(set_single_turn, [current_model, single_turn_checkbox], None) - model_select_dropdown.change(get_model, [model_select_dropdown, lora_select_dropdown, user_api_key, temperature_slider, top_p_slider, systemPromptTxt, user_name], [current_model, status_display, chatbot, lora_select_dropdown], show_progress=True, api_name="get_model") - model_select_dropdown.change(toggle_like_btn_visibility, [model_select_dropdown], [like_dislike_area], show_progress=False) - lora_select_dropdown.change(get_model, [model_select_dropdown, lora_select_dropdown, user_api_key, temperature_slider, top_p_slider, systemPromptTxt, user_name], [current_model, status_display, chatbot], show_progress=True) - - # Template - systemPromptTxt.change(set_system_prompt, [current_model, systemPromptTxt], None) - templateRefreshBtn.click(get_template_names, None, [templateFileSelectDropdown]) - templateFileSelectDropdown.change( - load_template, - [templateFileSelectDropdown], - [promptTemplates, templateSelectDropdown], - show_progress=True, - ) - templateSelectDropdown.change( - get_template_content, - [promptTemplates, templateSelectDropdown, systemPromptTxt], - [systemPromptTxt], - show_progress=True, - ) - - # S&L - saveHistoryBtn.click( - save_chat_history, - [current_model, saveFileName, chatbot, user_name], - downloadFile, - show_progress=True, - ) - saveHistoryBtn.click(get_history_names, [gr.State(False), user_name], [historyFileSelectDropdown]) - exportMarkdownBtn.click( - export_markdown, - [current_model, saveFileName, chatbot, user_name], - downloadFile, - show_progress=True, - ) - historyRefreshBtn.click(get_history_names, [gr.State(False), user_name], [historyFileSelectDropdown]) - historyFileSelectDropdown.change(**load_history_from_file_args) - downloadFile.change(upload_chat_history, [current_model, downloadFile, user_name], [saveFileName, systemPromptTxt, chatbot]) - - # Advanced - max_context_length_slider.change(set_token_upper_limit, [current_model, max_context_length_slider], None) - temperature_slider.change(set_temperature, [current_model, temperature_slider], None) - top_p_slider.change(set_top_p, [current_model, top_p_slider], None) - n_choices_slider.change(set_n_choices, [current_model, n_choices_slider], None) - stop_sequence_txt.change(set_stop_sequence, [current_model, stop_sequence_txt], None) - max_generation_slider.change(set_max_tokens, [current_model, max_generation_slider], None) - presence_penalty_slider.change(set_presence_penalty, [current_model, presence_penalty_slider], None) - frequency_penalty_slider.change(set_frequency_penalty, [current_model, frequency_penalty_slider], None) - logit_bias_txt.change(set_logit_bias, [current_model, logit_bias_txt], None) - user_identifier_txt.change(set_user_identifier, [current_model, user_identifier_txt], None) - - default_btn.click( - reset_default, [], [apihostTxt, proxyTxt, status_display], show_progress=True - ) - changeAPIURLBtn.click( - change_api_host, - [apihostTxt], - [status_display], - show_progress=True, - ) - changeProxyBtn.click( - change_proxy, - [proxyTxt], - [status_display], - show_progress=True, - ) - -logging.info( - colorama.Back.GREEN - + "\n川虎的温馨提示:访问 http://localhost:7860 查看界面" - + colorama.Style.RESET_ALL -) -# 默认开启本地服务器,默认可以直接从IP访问,默认不创建公开分享链接 -demo.title = i18n("川虎Chat 🚀") - -if __name__ == "__main__": - reload_javascript() - demo.queue(concurrency_count=CONCURRENT_COUNT).launch( - blocked_paths=["config.json"], - favicon_path="./assets/favicon.ico" - ) diff --git a/spaces/xxccc/gpt-academic/crazy_functions/test_project/cpp/cppipc/buffer.cpp b/spaces/xxccc/gpt-academic/crazy_functions/test_project/cpp/cppipc/buffer.cpp deleted file mode 100644 index 0ac0fa7bc3ced0447ba4caa359355dd4252670b3..0000000000000000000000000000000000000000 --- a/spaces/xxccc/gpt-academic/crazy_functions/test_project/cpp/cppipc/buffer.cpp +++ /dev/null @@ -1,87 +0,0 @@ -#include "libipc/buffer.h" -#include "libipc/utility/pimpl.h" - -#include - -namespace ipc { - -bool operator==(buffer const & b1, buffer const & b2) { - return (b1.size() == b2.size()) && (std::memcmp(b1.data(), b2.data(), b1.size()) == 0); -} - -bool operator!=(buffer const & b1, buffer const & b2) { - return !(b1 == b2); -} - -class buffer::buffer_ : public pimpl { -public: - void* p_; - std::size_t s_; - void* a_; - buffer::destructor_t d_; - - buffer_(void* p, std::size_t s, buffer::destructor_t d, void* a) - : p_(p), s_(s), a_(a), d_(d) { - } - - ~buffer_() { - if (d_ == nullptr) return; - d_((a_ == nullptr) ? p_ : a_, s_); - } -}; - -buffer::buffer() - : buffer(nullptr, 0, nullptr, nullptr) { -} - -buffer::buffer(void* p, std::size_t s, destructor_t d) - : p_(p_->make(p, s, d, nullptr)) { -} - -buffer::buffer(void* p, std::size_t s, destructor_t d, void* additional) - : p_(p_->make(p, s, d, additional)) { -} - -buffer::buffer(void* p, std::size_t s) - : buffer(p, s, nullptr) { -} - -buffer::buffer(char const & c) - : buffer(const_cast(&c), 1) { -} - -buffer::buffer(buffer&& rhs) - : buffer() { - swap(rhs); -} - -buffer::~buffer() { - p_->clear(); -} - -void buffer::swap(buffer& rhs) { - std::swap(p_, rhs.p_); -} - -buffer& buffer::operator=(buffer rhs) { - swap(rhs); - return *this; -} - -bool buffer::empty() const noexcept { - return (impl(p_)->p_ == nullptr) || (impl(p_)->s_ == 0); -} - -void* buffer::data() noexcept { - return impl(p_)->p_; -} - -void const * buffer::data() const noexcept { - return impl(p_)->p_; -} - -std::size_t buffer::size() const noexcept { - return impl(p_)->s_; -} - -} // namespace ipc diff --git a/spaces/yangheng/Super-Resolution-Anime-Diffusion/Waifu2x/Img_to_Sqlite.py b/spaces/yangheng/Super-Resolution-Anime-Diffusion/Waifu2x/Img_to_Sqlite.py deleted file mode 100644 index 6f761e681e84433f4060bd2ec9abedddbc261381..0000000000000000000000000000000000000000 --- a/spaces/yangheng/Super-Resolution-Anime-Diffusion/Waifu2x/Img_to_Sqlite.py +++ /dev/null @@ -1,123 +0,0 @@ -""" -Split images into small patches and insert them into sqlite db. Reading and Inserting speeds are much better than -Ubuntu's (18.04) file system when the number of patches is larger than 20k. And it has smaller size than using h5 format - -Recommend to check or filter out small size patches as their content vary little. 128x128 seems better than 64x64. - - -""" -import sqlite3 -from torch.utils.data import DataLoader -from tqdm import trange -from Dataloader import Image2Sqlite - -conn = sqlite3.connect("dataset/image_yandere.db") -cursor = conn.cursor() - -with conn: - cursor.execute("PRAGMA SYNCHRONOUS = OFF") - -table_name = "train_images_size_128_noise_1_rgb" -lr_col = "lr_img" -hr_col = "hr_img" - -with conn: - conn.execute( - f"CREATE TABLE IF NOT EXISTS {table_name} ({lr_col} BLOB, {hr_col} BLOB)" - ) - -dat = Image2Sqlite( - img_folder="./dataset/yande.re_test_shrink", - patch_size=256, - shrink_size=2, - noise_level=1, - down_sample_method=None, - color_mod="RGB", - dummy_len=None, -) -print(f"Total images {len(dat)}") - -img_dat = DataLoader(dat, num_workers=6, batch_size=6, shuffle=True) - -num_batches = 20 -for i in trange(num_batches): - bulk = [] - for lrs, hrs in img_dat: - patches = [(lrs[i], hrs[i]) for i in range(len(lrs))] - # patches = [(lrs[i], hrs[i]) for i in range(len(lrs)) if len(lrs[i]) > 14000] - - bulk.extend(patches) - - bulk = [ - i for i in bulk if len(i[0]) > 15000 - ] # for 128x128, 14000 is fair. Around 20% of patches are filtered out - cursor.executemany( - f"INSERT INTO {table_name}({lr_col}, {hr_col}) VALUES (?,?)", bulk - ) - conn.commit() - -cursor.execute(f"select max(rowid) from {table_name}") -print(cursor.fetchall()) -conn.commit() -# +++++++++++++++++++++++++++++++++++++ -# Used for Create Test Database -# ------------------------------------- - -# cursor.execute(f"SELECT ROWID FROM {table_name} ORDER BY LENGTH({lr_col}) DESC LIMIT 400") -# rowdis = cursor.fetchall() -# rowdis = ",".join([str(i[0]) for i in rowdis]) -# -# cursor.execute(f"DELETE FROM {table_name} WHERE ROWID NOT IN ({rowdis})") -# conn.commit() -# cursor.execute("vacuum") -# -# cursor.execute(""" -# CREATE TABLE IF NOT EXISTS train_images_size_128_noise_1_rgb_small AS -# SELECT * -# FROM train_images_size_128_noise_1_rgb -# WHERE length(lr_img) < 14000; -# """) -# -# cursor.execute(""" -# DELETE -# FROM train_images_size_128_noise_1_rgb -# WHERE length(lr_img) < 14000; -# """) - -# reset index -cursor.execute("VACUUM") -conn.commit() - -# +++++++++++++++++++++++++++++++++++++ -# check image size -# ------------------------------------- -# - -from PIL import Image -import io - -cursor.execute( - f""" - select {hr_col} from {table_name} - ORDER BY LENGTH({hr_col}) desc - limit 100 -""" -) -# WHERE LENGTH({lr_col}) BETWEEN 14000 AND 16000 - -# small = cursor.fetchall() -# print(len(small)) -for idx, i in enumerate(cursor): - img = Image.open(io.BytesIO(i[0])) - img.save(f"dataset/check/{idx}.png") - -# +++++++++++++++++++++++++++++++++++++ -# Check Image Variance -# ------------------------------------- - -import pandas as pd -import matplotlib.pyplot as plt - -dat = pd.read_sql(f"SELECT length({lr_col}) from {table_name}", conn) -dat.hist(bins=20) -plt.show() diff --git a/spaces/yangogo/bingo/src/components/turn-counter.tsx b/spaces/yangogo/bingo/src/components/turn-counter.tsx deleted file mode 100644 index 08a9e488f044802a8600f4d195b106567c35aab4..0000000000000000000000000000000000000000 --- a/spaces/yangogo/bingo/src/components/turn-counter.tsx +++ /dev/null @@ -1,23 +0,0 @@ -import React from 'react' -import { Throttling } from '@/lib/bots/bing/types' - -export interface TurnCounterProps { - throttling?: Throttling -} - -export function TurnCounter({ throttling }: TurnCounterProps) { - if (!throttling) { - return null - } - - return ( -
    -
    - {throttling.numUserMessagesInConversation} - 共 - {throttling.maxNumUserMessagesInConversation} -
    -
    -
    - ) -} diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/bark/modeling_bark.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/bark/modeling_bark.py deleted file mode 100644 index bdafb6347755d3216ed40403438dc78a04bc617c..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/bark/modeling_bark.py +++ /dev/null @@ -1,1625 +0,0 @@ -# coding=utf-8 -# Copyright 2023 The Suno AI Authors and The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" PyTorch BARK model.""" -import math -from typing import Dict, Optional, Tuple, Union - -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - -from ...generation.logits_process import AlternatingCodebooksLogitsProcessor, SuppressTokensLogitsProcessor -from ...modeling_outputs import CausalLMOutputWithPast, MaskedLMOutput -from ...modeling_utils import PreTrainedModel, get_parameter_device -from ...utils import ( - add_start_docstrings, - add_start_docstrings_to_model_forward, - is_accelerate_available, - logging, -) -from ..auto import AutoModel -from .configuration_bark import ( - BarkCoarseConfig, - BarkConfig, - BarkFineConfig, - BarkSemanticConfig, - BarkSubModelConfig, -) -from .generation_configuration_bark import ( - BarkCoarseGenerationConfig, - BarkFineGenerationConfig, - BarkSemanticGenerationConfig, -) - - -logger = logging.get_logger(__name__) - - -_CHECKPOINT_FOR_DOC = "suno/bark-small" -_CONFIG_FOR_DOC = "BarkConfig" - -BARK_PRETRAINED_MODEL_ARCHIVE_LIST = [ - "suno/bark-small", - "suno/bark", - # See all Bark models at https://huggingface.co/models?filter=bark -] - - -class BarkSelfAttention(nn.Module): - # adapted from GPTNeoSelfAttention and Bark code - # BarkSelfAttention can have two attention type, i.e full attention or causal attention - - def __init__(self, config, is_causal=False): - super().__init__() - - # regularization - self.dropout = config.dropout - self.attn_dropout = nn.Dropout(config.dropout) - self.resid_dropout = nn.Dropout(config.dropout) - - self.embed_dim = config.hidden_size - self.num_heads = config.num_heads - self.head_dim = self.embed_dim // self.num_heads - - if config.hidden_size % config.num_heads != 0: - raise ValueError( - f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" - f" {self.num_heads})." - ) - - # key, query, value projections for all heads, but in a batch - self.att_proj = nn.Linear(config.hidden_size, 3 * config.hidden_size, bias=config.bias) - # output projection - self.out_proj = nn.Linear(config.hidden_size, config.hidden_size, bias=config.bias) - - self.is_causal = is_causal - if is_causal: - block_size = config.block_size - bias = torch.tril(torch.ones((block_size, block_size), dtype=bool)).view(1, 1, block_size, block_size) - self.register_buffer("bias", bias) - - # Copied from transformers.models.gpt_neo.modeling_gpt_neo.GPTNeoSelfAttention._split_heads - def _split_heads(self, tensor, num_heads, attn_head_size): - """ - Splits hidden_size dim into attn_head_size and num_heads - """ - new_shape = tensor.size()[:-1] + (num_heads, attn_head_size) - tensor = tensor.view(new_shape) - return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features) - - def _merge_heads(self, tensor, num_heads, attn_head_size): - """ - Merges attn_head_size dim and num_attn_heads dim into hidden_size - """ - - # re-assemble all head outputs side by side - # (batch, num_heads, seq_len, attn_head_size) -> (batch, seq_len, num_heads*attn_head_size) - tensor = tensor.transpose(1, 2).contiguous() - tensor = tensor.view(tensor.size()[:-2] + (num_heads * attn_head_size,)) - - return tensor - - def _attn(self, query, key, value, attention_mask=None, head_mask=None): - # unlike GPTNeo's SelfAttention, divide by the square root of the dimension of the query and the key - attn_weights = torch.matmul(query, key.transpose(-1, -2)) * (1.0 / math.sqrt(self.head_dim)) - - if self.is_causal: - query_length, key_length = query.size(-2), key.size(-2) - - # fill the upper left part of the attention weights with inf - attn_weights = attn_weights.masked_fill( - self.bias[:, :, key_length - query_length : key_length, :key_length] == 0, - torch.finfo(attn_weights.dtype).min, - ) - - if attention_mask is not None: - # Apply the attention mask - attn_weights = attn_weights + attention_mask - - attn_weights = nn.functional.softmax(attn_weights, dim=-1) - attn_weights = attn_weights.to(value.dtype) - attn_weights = self.attn_dropout(attn_weights) - - # Mask heads if we want to - if head_mask is not None: - attn_weights = attn_weights * head_mask - - # (batch, num_heads, seq_len, seq_len) x (batch, num_heads, seq_len, attn_head_size) - # -> (batch, num_heads, seq_len, attn_head_size) - attn_output = torch.matmul(attn_weights, value) - - return attn_output, attn_weights - - def forward( - self, - hidden_states, - attention_mask=None, - past_key_values=None, - head_mask=None, - use_cache=False, - output_attentions=False, - ): - # calculate query, key, values for all heads in batch and move head forward to be the batch dim - query, key, value = self.att_proj(hidden_states).split(self.embed_dim, dim=2) - - query = self._split_heads(query, self.num_heads, self.head_dim) - key = self._split_heads(key, self.num_heads, self.head_dim) - value = self._split_heads(value, self.num_heads, self.head_dim) - - if past_key_values is not None: - past_key = past_key_values[0] - past_value = past_key_values[1] - key = torch.cat((past_key, key), dim=-2) - value = torch.cat((past_value, value), dim=-2) - - if use_cache is True: - present = (key, value) - else: - present = None - - attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask) - - attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim) - attn_output = self.out_proj(attn_output) - attn_output = self.resid_dropout(attn_output) - - outputs = (attn_output, present) - if output_attentions: - outputs += (attn_weights,) - - return outputs - - -class BarkLayerNorm(nn.Module): - """LayerNorm but with an optional bias. PyTorch doesn't support simply bias=False.""" - - def __init__(self, hidden_size, bias=True): - super().__init__() - self.weight = nn.Parameter(torch.ones(hidden_size)) - self.bias = nn.Parameter(torch.zeros(hidden_size)) if bias else None - - def forward(self, input): - return F.layer_norm(input, self.weight.shape, self.weight, self.bias, eps=1e-5) - - -class BarkMLP(nn.Module): - def __init__(self, config): - super().__init__() - self.in_proj = nn.Linear(config.hidden_size, 4 * config.hidden_size, bias=config.bias) - self.out_proj = nn.Linear(4 * config.hidden_size, config.hidden_size, bias=config.bias) - self.dropout = nn.Dropout(config.dropout) - self.gelu = nn.GELU() - - def forward(self, hidden_states): - hidden_states = self.in_proj(hidden_states) - hidden_states = self.gelu(hidden_states) - hidden_states = self.out_proj(hidden_states) - hidden_states = self.dropout(hidden_states) - return hidden_states - - -class BarkBlock(nn.Module): - def __init__(self, config, is_causal=False): - super().__init__() - - if is_causal: - # if causal, uses handmade LayerNorm, so that the layerNorm bias is optional - # this handmade layerNorm is used to stick with Bark choice of leaving optional bias in - # AutoRegressive models (corresponding to the "Text" and the "Coarse" modules) - self.layernorm_1 = BarkLayerNorm(config.hidden_size, bias=config.bias) - self.layernorm_2 = BarkLayerNorm(config.hidden_size, bias=config.bias) - else: - self.layernorm_1 = nn.LayerNorm(config.hidden_size) - self.layernorm_2 = nn.LayerNorm(config.hidden_size) - - self.attn = BarkSelfAttention(config, is_causal=is_causal) - - self.mlp = BarkMLP(config) - - def forward( - self, - hidden_states, - past_key_values=None, - attention_mask=None, - head_mask=None, - use_cache=False, - output_attentions=False, - ): - intermediary_hidden_states = self.layernorm_1(hidden_states) - - attn_outputs = self.attn( - intermediary_hidden_states, - past_key_values=past_key_values, - attention_mask=attention_mask, - head_mask=head_mask, - use_cache=use_cache, - output_attentions=output_attentions, - ) - - attn_output = attn_outputs[0] # output_attn: output, present_key_values, (attn_weights) - outputs = attn_outputs[1:] - - intermediary_hidden_states = hidden_states + attn_output - intermediary_hidden_states = intermediary_hidden_states + self.mlp( - self.layernorm_2(intermediary_hidden_states) - ) - - if use_cache: - outputs = (intermediary_hidden_states,) + outputs - else: - outputs = (intermediary_hidden_states,) + outputs[1:] - - return outputs # hidden_states, ((present), attentions) - - -class BarkPreTrainedModel(PreTrainedModel): - """ - An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained - models. - """ - - config_class = BarkConfig - supports_gradient_checkpointing = False - - def _init_weights(self, module): - """Initialize the weights.""" - if isinstance(module, (nn.Linear,)): - # Slightly different from the TF version which uses truncated_normal for initialization - # cf https://github.com/pytorch/pytorch/pull/5617 - module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) - if module.bias is not None: - module.bias.data.zero_() - elif isinstance(module, nn.Embedding): - module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) - if module.padding_idx is not None: - module.weight.data[module.padding_idx].zero_() - elif isinstance(module, nn.LayerNorm): - module.bias.data.zero_() - module.weight.data.fill_(1.0) - - def __init__(self, *inputs, **kwargs): - super().__init__(*inputs, **kwargs) - - @property - def device(self) -> torch.device: - """ - `torch.device`: The device on which the module is (assuming that all the module parameters are on the same - device). - """ - - # if has _hf_hook, has been offloaded so the device has to be found in the hook - if not hasattr(self, "_hf_hook"): - return get_parameter_device(self) - for module in self.modules(): - if ( - hasattr(module, "_hf_hook") - and hasattr(module._hf_hook, "execution_device") - and module._hf_hook.execution_device is not None - ): - return torch.device(module._hf_hook.execution_device) - - return get_parameter_device(self) - - def _set_gradient_checkpointing(self, module, value=False): - if isinstance(module, BarkCausalModel) or isinstance(module, BarkFineModel) or isinstance(module, BarkModel): - module.gradient_checkpointing = value - - -BARK_MODEL_START_DOCSTRING = """ - This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the - library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads - etc.) - - This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. - Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage - and behavior. - - Parameters: - config ([`{config}`]): - Model configuration class with all the parameters of the model. Initializing with a config file does not - load the weights associated with the model, only the configuration. Check out the - [`~PreTrainedModel.from_pretrained`] method to load the model weights. -""" - - -BARK_START_DOCSTRING = r""" - This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the - library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads - etc.) - - This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. - Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage - and behavior. - - Parameters: - config ([`BarkConfig`]): - Model configuration class with all the parameters of the model. Initializing with a config file does not - load the weights associated with the model, only the configuration. Check out the - [`~PreTrainedModel.from_pretrained`] method to load the model weights. -""" - - -BARK_FINE_INPUTS_DOCSTRING = r""" - Args: - codebook_idx (`int`): - Index of the codebook that will be predicted. - input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length, number_of_codebooks)`): - Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide - it. Initially, indices of the first two codebooks are obtained from the `coarse` sub-model. The rest is - predicted recursively by attending the previously predicted channels. The model predicts on windows of - length 1024. - attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): - Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - - [What are attention masks?](../glossary#attention-mask) - position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): - Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, - config.max_position_embeddings - 1]`. - - [What are position IDs?](../glossary#position-ids) - head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): - Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - - - 1 indicates the head is **not masked**, - - 0 indicates the head is **masked**. - labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): NOT IMPLEMENTED YET. - input_embeds (`torch.FloatTensor` of shape `(batch_size, input_sequence_length, hidden_size)`, *optional*): - Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. If - `past_key_values` is used, optionally only the last `input_embeds` have to be input (see - `past_key_values`). This is useful if you want more control over how to convert `input_ids` indices into - associated vectors than the model's internal embedding lookup matrix. - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned - tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for - more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. -""" - -BARK_CAUSAL_MODEL_INPUTS_DOCSTRING = r""" - Args: - input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): - Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide - it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and - [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) - past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache` is passed or when `config.use_cache=True`): - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape - `(batch_size, num_heads, sequence_length, embed_size_per_head)`. - - Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see - `past_key_values` input) to speed up sequential decoding. - - If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that - don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all - `input_ids` of shape `(batch_size, sequence_length)`. - attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): - Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - - [What are attention masks?](../glossary#attention-mask) - position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): - Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, - config.max_position_embeddings - 1]`. - - [What are position IDs?](../glossary#position-ids) - head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): - Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - - - 1 indicates the head is **not masked**, - - 0 indicates the head is **masked**. - input_embeds (`torch.FloatTensor` of shape `(batch_size, input_sequence_length, hidden_size)`, *optional*): - Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. - Here, due to `Bark` particularities, if `past_key_values` is used, `input_embeds` will be ignored and you - have to use `input_ids`. If `past_key_values` is not used and `use_cache` is set to `True`, `input_embeds` - is used in priority instead of `input_ids`. - use_cache (`bool`, *optional*): - If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see - `past_key_values`). - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned - tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for - more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. -""" - - -# GPT2-like autoregressive model -class BarkCausalModel(BarkPreTrainedModel): - config_class = BarkSubModelConfig - - def __init__(self, config): - super().__init__(config) - self.config = config - - # initialize as an autoregressive GPT-like model - self.input_embeds_layer = nn.Embedding(config.input_vocab_size, config.hidden_size) - self.position_embeds_layer = nn.Embedding(config.block_size, config.hidden_size) - - self.drop = nn.Dropout(config.dropout) - - self.layers = nn.ModuleList([BarkBlock(config, is_causal=True) for _ in range(config.num_layers)]) - - self.layernorm_final = BarkLayerNorm(config.hidden_size, bias=config.bias) - - self.lm_head = nn.Linear(config.hidden_size, config.output_vocab_size, bias=False) - self.gradient_checkpointing = False - - # Initialize weights and apply final processing - self.post_init() - - def get_input_embeddings(self): - return self.input_embeds_layer - - def set_input_embeddings(self, new_embeddings): - self.input_embeds_layer = new_embeddings - - def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **kwargs): - input_embeds = kwargs.get("input_embeds", None) - - attention_mask = kwargs.get("attention_mask", None) - position_ids = kwargs.get("position_ids", None) - - if past_key_values is not None: - # only last token for inputs_ids if past is defined in kwargs - seq_len = input_ids.shape[1] - input_ids = input_ids[:, [-1]] - - # input_embeds have already been used and is not required anymore - input_embeds = None - else: - if input_embeds is not None and kwargs.get("use_cache"): - seq_len = input_embeds.shape[1] - else: - seq_len = input_ids.shape[1] - - # ensure that attention_mask and position_ids shapes are aligned with the weird Bark hack of reducing - # sequence length on the first forward pass - if attention_mask is not None: - attention_mask = attention_mask[:, :seq_len] - if position_ids is not None: - position_ids = position_ids[:, :seq_len] - - if attention_mask is not None and position_ids is None: - # create position_ids on the fly for batch generation - position_ids = attention_mask.long().cumsum(-1) - 1 - position_ids.masked_fill_(attention_mask == 0, 1) - if past_key_values: - position_ids = position_ids[:, -1].unsqueeze(-1) - else: - position_ids = None - - if input_embeds is not None and kwargs.get("use_cache"): - return { - "input_ids": None, - "input_embeds": input_embeds, - "past_key_values": past_key_values, - "use_cache": kwargs.get("use_cache"), - "position_ids": position_ids, - "attention_mask": attention_mask, - } - return { - "input_ids": input_ids, - "past_key_values": past_key_values, - "use_cache": kwargs.get("use_cache"), - "position_ids": position_ids, - "attention_mask": attention_mask, - } - - @add_start_docstrings_to_model_forward(BARK_CAUSAL_MODEL_INPUTS_DOCSTRING) - def forward( - self, - input_ids: Optional[torch.Tensor] = None, - past_key_values: Optional[Tuple[torch.FloatTensor]] = None, - attention_mask: Optional[torch.Tensor] = None, - position_ids: Optional[torch.Tensor] = None, - head_mask: Optional[torch.Tensor] = None, - labels: Optional[torch.LongTensor] = None, - input_embeds: Optional[torch.Tensor] = None, - use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithPast]: - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - use_cache = use_cache if use_cache is not None else self.config.use_cache - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - # Verify if input_embeds already exists - # then compute embeddings. - if input_ids is not None and input_embeds is not None: - raise ValueError("You cannot specify both input_ids and input_embeds at the same time") - elif input_embeds is not None and past_key_values is None: - # we want to return the input_embeds in priority so that it is in line with a weird hack - # of Bark which concatenate two bits of the input_embeds on the first forward pass of the semantic model - pass - elif input_ids is not None: - input_embeds = self.input_embeds_layer(input_ids) # token embeddings of shape (b, t, n_embd) - elif input_embeds is not None: - pass - else: - raise ValueError("You have to specify either input_ids or input_embeds") - - input_shape = input_embeds.size()[:-1] - batch_size = input_embeds.shape[0] - seq_length = input_shape[-1] - - device = input_ids.device if input_ids is not None else input_embeds.device - - if past_key_values is None: - past_length = 0 - past_key_values = tuple([None] * len(self.layers)) - else: - past_length = past_key_values[0][0].size(-2) - - if position_ids is None: - position_ids = torch.arange(past_length, seq_length + past_length, dtype=torch.long, device=device) - position_ids = position_ids.unsqueeze(0) # shape (1, seq_length) - - position_embeds = self.position_embeds_layer(position_ids) # position embeddings of shape (1, t, n_embd) - - # Attention mask. - if attention_mask is not None: - if batch_size <= 0: - raise ValueError("batch_size has to be defined and > 0") - attention_mask = attention_mask.view(batch_size, -1) - # We create a 3D attention mask from a 2D tensor mask. - # Sizes are [batch_size, 1, 1, to_seq_length] - # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] - # this attention mask is more simple than the triangular masking of causal attention - # used in OpenAI GPT, we just need to prepare the broadcast dimension here. - attention_mask = attention_mask[:, None, None, :] - - # Since attention_mask is 1.0 for positions we want to attend and 0.0 for - # masked positions, this operation will create a tensor which is 0.0 for - # positions we want to attend and the dtype's smallest value for masked positions. - # Since we are adding it to the raw scores before the softmax, this is - # effectively the same as removing these entirely. - attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility - attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min - - # Prepare head mask if needed - # 1.0 in head_mask indicate we keep the head - # attention_probs has shape bsz x num_heads x N x N - # head_mask has shape num_layers x batch x num_heads x N x N - head_mask = self.get_head_mask(head_mask, self.config.num_layers) - - hidden_states = self.drop(input_embeds + position_embeds) - output_shape = input_shape + (hidden_states.size(-1),) - - if self.gradient_checkpointing and self.training: - if use_cache: - logger.warning_once( - "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." - ) - use_cache = False - - present_key_values = () if use_cache else None - all_self_attentions = () if output_attentions else None - all_hidden_states = () if output_hidden_states else None - - for i, (block, past_layer_key_values) in enumerate(zip(self.layers, past_key_values)): - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - if self.gradient_checkpointing and self.training: - - def create_custom_forward(module): - def custom_forward(*inputs): - # None for past_key_value - return module(*inputs, use_cache, output_attentions) - - return custom_forward - - outputs = torch.utils.checkpoint.checkpoint( - create_custom_forward(block), - hidden_states, - None, - attention_mask, - head_mask[i], - ) - else: - outputs = block( - hidden_states, - past_key_values=past_layer_key_values, - attention_mask=attention_mask, - head_mask=head_mask[i], - use_cache=use_cache, - output_attentions=output_attentions, - ) - - hidden_states = outputs[0] - - if use_cache: - present_key_values = present_key_values + (outputs[1],) - - if output_attentions: - all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) - - hidden_states = self.layernorm_final(hidden_states) - - hidden_states = hidden_states.view(output_shape) - - # Add last hidden state - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - logits = self.lm_head(hidden_states) - - loss = None - if labels is not None: - raise NotImplementedError( - "Training is not implemented yet for Bark - ensure you do not pass `labels` to the model." - ) - - if not return_dict: - return tuple( - v for v in [None, logits, present_key_values, all_hidden_states, all_self_attentions] if v is not None - ) - - return CausalLMOutputWithPast( - loss=loss, - logits=logits, - past_key_values=present_key_values, - hidden_states=all_hidden_states, - attentions=all_self_attentions, - ) - - @staticmethod - def _reorder_cache( - past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor - ) -> Tuple[Tuple[torch.Tensor]]: - """ - This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or - [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct - beam_idx at every generation step. - """ - # Necessary for beam_search - return tuple( - tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past) - for layer_past in past_key_values - ) - - -@add_start_docstrings( - """Bark semantic (or text) model. It shares the same architecture as the coarse model. - It is a GPT-2 like autoregressive model with a language modeling head on top.""", - BARK_MODEL_START_DOCSTRING.format(config="BarkSemanticConfig"), -) -class BarkSemanticModel(BarkCausalModel): - base_model_prefix = "semantic" - config_class = BarkSemanticConfig - - def generate( - self, - input_ids: torch.Tensor, - semantic_generation_config: BarkSemanticGenerationConfig = None, - history_prompt: Optional[Dict[str, torch.Tensor]] = None, - attention_mask: Optional[torch.Tensor] = None, - **kwargs, - ) -> torch.LongTensor: - """ - Generates text semantic tokens from an input prompt and an additional optional `Bark` speaker prompt. - - Args: - input_ids (`Optional[torch.Tensor]` of shape (batch_size, seq_len), *optional*): - Input ids, i.e tokenized input sentences. Will be truncated up to - semantic_generation_config.max_input_semantic_length tokens. Note that the output audios will be as - long as the longest generation among the batch. - semantic_generation_config (`BarkSemanticGenerationConfig`): - Generation config indicating how to generate the semantic tokens. - history_prompt (`Optional[Dict[str,torch.Tensor]]`, *optional*): - Optional `Bark` speaker prompt. - attention_mask (`Optional[torch.Tensor]`, *optional*): - Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - - [What are attention masks?](../glossary#attention-mask) - Returns: - torch.LongTensor: Output semantic tokens. - """ - if semantic_generation_config is None: - raise ValueError("`semantic_generation_config` has to be provided") - - batch_size = input_ids.shape[0] - - max_input_semantic_length = semantic_generation_config.max_input_semantic_length - - input_ids = input_ids + semantic_generation_config.text_encoding_offset - - if attention_mask is not None: - input_ids = input_ids.masked_fill((1 - attention_mask).bool(), semantic_generation_config.text_pad_token) - - if history_prompt is not None: - semantic_history = history_prompt["semantic_prompt"][-max_input_semantic_length:] - semantic_history = nn.functional.pad( - semantic_history, - (0, max_input_semantic_length - len(semantic_history)), - value=semantic_generation_config.semantic_pad_token, - mode="constant", - ) - else: - semantic_history = torch.tensor( - [semantic_generation_config.semantic_pad_token] * max_input_semantic_length, dtype=torch.int - ).to(self.device) - - semantic_history = torch.repeat_interleave(semantic_history[None], batch_size, dim=0) - - infer_array = torch.tensor( - [[semantic_generation_config.semantic_infer_token]] * batch_size, dtype=torch.int - ).to(self.device) - - input_embeds = torch.cat( - [ - self.input_embeds_layer(input_ids[:, :max_input_semantic_length]) - + self.input_embeds_layer(semantic_history[:, : max_input_semantic_length + 1]), - self.input_embeds_layer(infer_array), - ], - dim=1, - ) - - tokens_to_suppress = list( - range(semantic_generation_config.semantic_vocab_size, semantic_generation_config.semantic_pad_token) - ) - tokens_to_suppress.extend( - list(range(semantic_generation_config.semantic_pad_token + 1, self.config.output_vocab_size)) - ) - - suppress_tokens_logits_processor = SuppressTokensLogitsProcessor(tokens_to_suppress) - - # pass input_ids in order to stay consistent with the transformers generate method even though it is not used - # (except to get the input seq_len - that's why we keep the first 257 tokens) - semantic_output = super().generate( - torch.ones((batch_size, max_input_semantic_length + 1), dtype=torch.int).to(self.device), - input_embeds=input_embeds, - logits_processor=[suppress_tokens_logits_processor], - generation_config=semantic_generation_config, - **kwargs, - ) # size: 10048 - - # take the generated semantic tokens - semantic_output = semantic_output[:, max_input_semantic_length + 1 :] - - return semantic_output - - -@add_start_docstrings( - """Bark coarse acoustics model. - It shares the same architecture as the semantic (or text) model. It is a GPT-2 like autoregressive model with a - language modeling head on top.""", - BARK_MODEL_START_DOCSTRING.format(config="BarkCoarseConfig"), -) -class BarkCoarseModel(BarkCausalModel): - base_model_prefix = "coarse_acoustics" - config_class = BarkCoarseConfig - - def preprocess_histories( - self, - max_coarse_history: int, - semantic_to_coarse_ratio: int, - batch_size: int, - semantic_generation_config: int, - codebook_size: int, - history_prompt: Optional[Dict[str, torch.Tensor]] = None, - ): - """ - Preprocess the optional `Bark` speaker prompts before `self.generate`. - - Args: - max_coarse_history (`int`): - Maximum size of coarse tokens used. - semantic_to_coarse_ratio (`int`): - Ratio of semantic to coarse frequency - batch_size (`int`): - Batch size, i.e the number of samples. - semantic_generation_config (`BarkSemanticGenerationConfig`): - Generation config indicating how to generate the semantic tokens. - codebook_size (`int`): - Codebook channel size, i.e. the size of the output vocabulary per codebook channel. - history_prompt (`Optional[Dict[str,torch.Tensor]]`): - Optional `Bark` speaker prompt. - Returns: Returns: - `tuple(torch.FloatTensor)`: - - **x_semantic_history** (`torch.FloatTensor` -- Processed semantic speaker prompt. - - **x_coarse_history** (`torch.FloatTensor`) -- Processed coarse speaker prompt. - """ - if history_prompt is not None: - x_semantic_history = torch.repeat_interleave(history_prompt["semantic_prompt"][None], batch_size, dim=0) - # clone to avoid modifying history_prompt.coarse_prompt - x_coarse_history = history_prompt["coarse_prompt"].clone() - - # offset x_coarse_history - if codebook_size is not None: - for n in range(1, x_coarse_history.shape[0]): - # offset - x_coarse_history[n, :] += codebook_size * n - - # flatten x_coarse_history - x_coarse_history = torch.transpose(x_coarse_history, 0, 1).view(-1) - - x_coarse_history = x_coarse_history + semantic_generation_config.semantic_vocab_size - - x_coarse_history = torch.repeat_interleave(x_coarse_history[None], batch_size, dim=0) - # e.g: after SEMANTIC_VOCAB_SIZE (10000), 1024 tokens dedicated to first codebook, 1024 next tokens - # dedicated to second codebook. - - max_semantic_history = int(np.floor(max_coarse_history / semantic_to_coarse_ratio)) - # trim histories correctly - n_semantic_hist_provided = min( - [ - max_semantic_history, - x_semantic_history.shape[1] - x_semantic_history.shape[1] % 2, - int(np.floor(x_coarse_history.shape[1] / semantic_to_coarse_ratio)), - ] - ) - - n_coarse_hist_provided = int(round(n_semantic_hist_provided * semantic_to_coarse_ratio)) - - x_semantic_history = x_semantic_history[:, -n_semantic_hist_provided:].int() - x_coarse_history = x_coarse_history[:, -n_coarse_hist_provided:].int() - # bit of a hack for time alignment (sounds better) - from Bark original implementation - x_coarse_history = x_coarse_history[:, :-2] - - else: - # shape: (batch_size, 0) - x_semantic_history = torch.tensor([[]] * batch_size, dtype=torch.int).to(self.device) - x_coarse_history = torch.tensor([[]] * batch_size, dtype=torch.int).to(self.device) - - return x_semantic_history, x_coarse_history - - def generate( - self, - semantic_output: torch.Tensor, - semantic_generation_config: BarkSemanticGenerationConfig = None, - coarse_generation_config: BarkCoarseGenerationConfig = None, - codebook_size: int = 1024, - history_prompt: Optional[Dict[str, torch.Tensor]] = None, - **kwargs, - ) -> torch.LongTensor: - """ - Generates coarse acoustics tokens from input text semantic tokens and an additional optional `Bark` speaker - prompt. - - Args: - semantic_output (`torch.Tensor` of shape (batch_size, seq_len), *optional*): - Input text semantic ids, i.e the output of `BarkSemanticModel.generate`. - semantic_generation_config (`BarkSemanticGenerationConfig`): - Generation config indicating how to generate the semantic tokens. - coarse_generation_config (`BarkCoarseGenerationConfig`): - Generation config indicating how to generate the coarse tokens. - codebook_size (`int`, *optional*, defaults to 1024): - Codebook channel size, i.e. the size of the output vocabulary per codebook channel. - history_prompt (`Optional[Dict[str,torch.Tensor]]`, *optional*): - Optional `Bark` speaker prompt. - Returns: - torch.LongTensor: Output coarse acoustics tokens. - """ - - if semantic_generation_config is None: - raise ValueError("`semantic_generation_config` has to be provided") - - if coarse_generation_config is None: - raise ValueError("`coarse_generation_config` has to be provided") - - max_coarse_input_length = coarse_generation_config.max_coarse_input_length - max_coarse_history = coarse_generation_config.max_coarse_history - sliding_window_len = coarse_generation_config.sliding_window_len - - # replace semantic_pad_token (eos_tok and pad_tok here) with coarse_semantic_pad_token i.e the pad_token - # used in the next model - semantic_output.masked_fill_( - semantic_output == semantic_generation_config.semantic_pad_token, - coarse_generation_config.coarse_semantic_pad_token, - ) - - semantic_to_coarse_ratio = ( - coarse_generation_config.coarse_rate_hz - / semantic_generation_config.semantic_rate_hz - * coarse_generation_config.n_coarse_codebooks - ) - max_semantic_history = int(np.floor(max_coarse_history / semantic_to_coarse_ratio)) - - # beware, depends on the seq_len of the longest sequence of the batch. - # Also, the seq_len might be one token too long because of an added - # pad_token as compared to Bark original implementation. - max_generated_len = np.floor( - semantic_output.shape[1] * semantic_to_coarse_ratio / coarse_generation_config.n_coarse_codebooks - ) - max_generated_len = int(round(max_generated_len * coarse_generation_config.n_coarse_codebooks)) - - batch_size = semantic_output.shape[0] - - x_semantic_history, x_coarse = self.preprocess_histories( - history_prompt=history_prompt, - max_coarse_history=max_coarse_history, - semantic_to_coarse_ratio=semantic_to_coarse_ratio, - batch_size=batch_size, - semantic_generation_config=semantic_generation_config, - codebook_size=codebook_size, - ) - base_semantic_idx = x_semantic_history.shape[1] - - semantic_output = torch.hstack([x_semantic_history, semantic_output]) - - n_window_steps = int(np.ceil(max_generated_len / sliding_window_len)) - - total_generated_len = 0 - - len_coarse_history = x_coarse.shape[1] - - for _ in range(n_window_steps): - semantic_idx = base_semantic_idx + int(round(total_generated_len / semantic_to_coarse_ratio)) - - # pad from right side - input_coarse = semantic_output[:, np.max([0, semantic_idx - max_semantic_history]) :] - input_coarse = input_coarse[:, :max_coarse_input_length] - input_coarse = F.pad( - input_coarse, - (0, max_coarse_input_length - input_coarse.shape[-1]), - "constant", - coarse_generation_config.coarse_semantic_pad_token, - ) - - input_coarse = torch.hstack( - [ - input_coarse, - torch.tensor([[coarse_generation_config.coarse_infer_token]] * batch_size).to(self.device), - x_coarse[:, -max_coarse_history:], - ] - ) - - alternatingLogitsProcessor = AlternatingCodebooksLogitsProcessor( - input_coarse.shape[1], - semantic_generation_config.semantic_vocab_size, - codebook_size, - ) - - output_coarse = super().generate( - input_coarse, - logits_processor=[alternatingLogitsProcessor], - max_new_tokens=min(sliding_window_len, max_generated_len - total_generated_len), - generation_config=coarse_generation_config, - **kwargs, - ) - - input_coarse_len = input_coarse.shape[1] - - x_coarse = torch.hstack([x_coarse, output_coarse[:, input_coarse_len:]]) - total_generated_len = x_coarse.shape[1] - len_coarse_history - - del output_coarse - - coarse_output = x_coarse[:, len_coarse_history:] - - return coarse_output - - -@add_start_docstrings( - """Bark fine acoustics model. It is a non-causal GPT-like model with `config.n_codes_total` embedding layers and - language modeling heads, one for each codebook.""", - BARK_MODEL_START_DOCSTRING.format(config="BarkFineConfig"), -) -class BarkFineModel(BarkPreTrainedModel): - base_model_prefix = "fine_acoustics" - config_class = BarkFineConfig - main_input_name = "codebook_idx" - - def __init__(self, config): - # non-causal gpt-like model with one embedding layer and one lm_head for each codebook of Encodec - super().__init__(config) - self.config = config - - # initialize a modified non causal GPT-like model - # note that for there is one embedding layer and one lm_head for each codebook of Encodec - self.input_embeds_layers = nn.ModuleList( - [nn.Embedding(config.input_vocab_size, config.hidden_size) for _ in range(config.n_codes_total)] - ) - self.position_embeds_layer = nn.Embedding(config.block_size, config.hidden_size) - - self.drop = nn.Dropout(config.dropout) - - self.layers = nn.ModuleList([BarkBlock(config, is_causal=False) for _ in range(config.num_layers)]) - - self.layernorm_final = nn.LayerNorm(config.hidden_size) - - self.lm_heads = nn.ModuleList( - [ - nn.Linear(config.hidden_size, config.output_vocab_size, bias=False) - for _ in range(config.n_codes_given, config.n_codes_total) - ] - ) - self.gradient_checkpointing = False - self.n_codes_total = config.n_codes_total - - # Initialize weights and apply final processing - self.post_init() - - def get_input_embeddings(self): - # one embedding layers for each codebook - return self.input_embeds_layers - - def set_input_embeddings(self, new_embeddings): - # one embedding layers for each codebook - self.input_embeds_layers = new_embeddings - - def get_output_embeddings(self): - # one lm_head for each codebook - return self.lm_heads - - def set_output_embeddings(self, new_output_embeddings): - # one lm_head for each codebook - self.lm_heads = new_output_embeddings - - def _resize_token_embeddings(self, new_num_tokens, pad_to_multiple_of=None): - old_embeddings_list = self.get_input_embeddings() - new_embeddings_list = nn.ModuleList( - [ - self._get_resized_embeddings(old_embeddings, new_num_tokens, pad_to_multiple_of) - for old_embeddings in old_embeddings_list - ] - ) - self.set_input_embeddings(new_embeddings_list) - new_num_tokens = new_embeddings_list[0].weight.shape[0] - - # if word embeddings are not tied, make sure that lm head is resized as well - if self.get_output_embeddings() is not None and not self.config.tie_word_embeddings: - old_lm_head_list = self.get_output_embeddings() - new_lm_head_list = nn.ModuleList( - [self._get_resized_lm_head(old_lm_head, new_num_tokens) for old_lm_head in old_lm_head_list] - ) - self.set_output_embeddings(new_lm_head_list) - - return self.get_input_embeddings() - - def resize_token_embeddings( - self, new_num_tokens: Optional[int] = None, pad_to_multiple_of: Optional[int] = None - ) -> nn.Embedding: - """ - Resizes input token embeddings matrix of the model if `new_num_tokens != config.vocab_size`. - - Takes care of tying weights embeddings afterwards if the model class has a `tie_weights()` method. - - Arguments: - new_num_tokens (`int`, *optional*): - The number of new tokens in the embedding matrix. Increasing the size will add newly initialized - vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just - returns a pointer to the input tokens `torch.nn.Embedding` module of the model without doing anything. - pad_to_multiple_of (`int`, *optional*): - If set will pad the embedding matrix to a multiple of the provided value. - - This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability - `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. For more - details about this, or help on choosing the correct value for resizing, refer to this guide: - https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#requirements-tc - - Return: - `torch.nn.Embedding`: Pointer to the input tokens Embeddings Module of the model. - """ - model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of) - if new_num_tokens is None and pad_to_multiple_of is None: - return model_embeds - - # Update base model and current model config - self.config.output_vocab_size = model_embeds[0].weight.shape[0] - self.config.vocab_size = model_embeds[0].weight.shape[0] - self.output_vocab_size = model_embeds[0].weight.shape[0] - self.vocab_size = model_embeds[0].weight.shape[0] - - # Tie weights again if needed - self.tie_weights() - - return model_embeds - - def tie_weights(self): - """ - Tie the weights between the input embeddings list and the output embeddings list. - - If the `torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning the - weights instead. - """ - if getattr(self.config, "tie_word_embeddings", True): - self._tied_weights_keys = [] - output_embeddings = self.get_output_embeddings() - input_embeddings = self.get_input_embeddings() - - for i in range(self.config.n_codes_total - self.config.n_codes_given): - # self.input_embeds_layers[i + 1].weight = self.lm_heads[i].weight - self._tie_or_clone_weights(output_embeddings[i], input_embeddings[i + 1]) - self._tied_weights_keys.append(f"lm_heads.{i}.weight") - - for module in self.modules(): - if hasattr(module, "_tie_weights"): - module._tie_weights() - - @add_start_docstrings_to_model_forward(BARK_FINE_INPUTS_DOCSTRING) - def forward( - self, - codebook_idx: int, # an additionnal idx corresponding to the id of the codebook that will be predicted - input_ids: Optional[torch.Tensor] = None, - attention_mask: Optional[torch.Tensor] = None, - position_ids: Optional[torch.Tensor] = None, - head_mask: Optional[torch.Tensor] = None, - labels: Optional[torch.LongTensor] = None, - input_embeds: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]: - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - if codebook_idx == 0: - raise ValueError("Cannot predict 0th codebook - 0th codebook should be predicted by the coarse model") - - if input_ids is not None and input_embeds is not None: - raise ValueError("You cannot specify both input_ids and input_embeds at the same time") - - if input_ids is None and input_embeds is None: - raise ValueError("You have to specify either input_ids or input_embeds") - - if input_ids is not None: - # the input_embeddings are the sum of the j previous codebooks embeddings before - # the current codebook_idx codebook - - # forward the GPT model itself - input_embeds = [ - input_embeds_layer(input_ids[:, :, i]).unsqueeze(-1) - for i, input_embeds_layer in enumerate(self.input_embeds_layers) - ] # token embeddings of shape (b, t, n_embd) - input_embeds = torch.cat(input_embeds, dim=-1) - input_embeds = input_embeds[:, :, :, : codebook_idx + 1].sum(dim=-1) - - input_shape = input_embeds.size()[:-1] - batch_size = input_embeds.shape[0] - seq_length = input_shape[1] - - device = input_ids.device if input_ids is not None else input_embeds.device - - if position_ids is None: - position_ids = torch.arange(0, seq_length, dtype=torch.long, device=device) - position_ids = position_ids.unsqueeze(0) # shape (1, seq_length) - - position_embeds = self.position_embeds_layer(position_ids) # position embeddings of shape (1, t, n_embd) - - # Attention mask. - if attention_mask is not None: - if batch_size <= 0: - raise ValueError("batch_size has to be defined and > 0") - attention_mask = attention_mask.view(batch_size, -1) - attention_mask = attention_mask[:, None, None, :] - attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility - attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min - - head_mask = self.get_head_mask(head_mask, self.config.num_layers) - - hidden_states = self.drop(input_embeds + position_embeds) - output_shape = input_shape + (hidden_states.size(-1),) - - all_self_attentions = () if output_attentions else None - all_hidden_states = () if output_hidden_states else None - - for i, block in enumerate(self.layers): - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - outputs = block( - hidden_states, - attention_mask=attention_mask, - head_mask=head_mask[i], - output_attentions=output_attentions, - ) - - hidden_states = outputs[0] - - if output_attentions: - all_self_attentions = all_self_attentions + (outputs[1],) - - hidden_states = self.layernorm_final(hidden_states) - hidden_states = hidden_states.view(output_shape) - - # Add last hidden state - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - logits = self.lm_heads[codebook_idx - self.config.n_codes_given](hidden_states) - - loss = None - if labels is not None: - raise NotImplementedError("Training is not implemented yet") - - if not return_dict: - return tuple(v for v in [None, logits, all_hidden_states, all_self_attentions] if v is not None) - - return MaskedLMOutput( - loss=loss, - logits=logits, - hidden_states=all_hidden_states, - attentions=all_self_attentions, - ) - - def generate( - self, - coarse_output: torch.Tensor, - semantic_generation_config: BarkSemanticGenerationConfig = None, - coarse_generation_config: BarkCoarseGenerationConfig = None, - fine_generation_config: BarkFineGenerationConfig = None, - codebook_size: int = 1024, - history_prompt: Optional[Dict[str, torch.Tensor]] = None, - **kwargs, - ) -> torch.LongTensor: - """ - Generates fine acoustics tokens from input coarse acoustics tokens and an additional optional `Bark` speaker - prompt. - - Args: - coarse_output (`torch.Tensor` of shape (batch_size, seq_len)): - Input coarse acoustics ids, i.e the output of `BarkCoarseModel.generate`. - semantic_generation_config (`BarkSemanticGenerationConfig`): - Generation config indicating how to generate the semantic tokens. - coarse_generation_config (`BarkCoarseGenerationConfig`): - Generation config indicating how to generate the coarse tokens. - fine_generation_config (`BarkFineGenerationConfig`): - Generation config indicating how to generate the fine tokens. - codebook_size (`int`, *optional*, defaults to 1024): - Codebook channel size, i.e. the size of the output vocabulary per codebook channel. - history_prompt (`Optional[Dict[str,torch.Tensor]]`, *optional*): - Optional `Bark` speaker prompt. - Returns: - torch.LongTensor: Output fine acoustics tokens. - """ - if semantic_generation_config is None: - raise ValueError("`semantic_generation_config` has to be provided") - - if coarse_generation_config is None: - raise ValueError("`coarse_generation_config` has to be provided") - - if fine_generation_config is None: - raise ValueError("`fine_generation_config` has to be provided") - - # since we don't really use GenerationConfig through the fine model (autoencoder) - # and since only temperature is used from the classic GenerationConfig parameters - # manually impose the kwargs priority over the generation config - temperature = kwargs.get("temperature", fine_generation_config.temperature) - - max_fine_history_length = fine_generation_config.max_fine_history_length - max_fine_input_length = fine_generation_config.max_fine_input_length - - # shape: (batch, n_coarse_codebooks * seq_len) - # new_shape: (batch, seq_len, n_coarse_codebooks) - coarse_output = coarse_output.view(coarse_output.shape[0], -1, coarse_generation_config.n_coarse_codebooks) - - # brings ids into the range [0, codebook_size -1] - coarse_output = torch.remainder(coarse_output - semantic_generation_config.semantic_vocab_size, codebook_size) - batch_size = coarse_output.shape[0] - - if history_prompt is not None: - x_fine_history = torch.repeat_interleave(history_prompt["fine_prompt"].T[None], batch_size, dim=0) - # transpose to get to shape (seq_len, n_fine_codebooks) - else: - x_fine_history = None - - n_coarse = coarse_generation_config.n_coarse_codebooks - - # pad the last 6th codebooks - fine_input = F.pad( - coarse_output, - (0, fine_generation_config.n_fine_codebooks - n_coarse), - "constant", - codebook_size, - ) - - # prepend history if available (max max_fine_history_length) - if x_fine_history is not None: - fine_input = torch.cat([x_fine_history[:, -max_fine_history_length:, :], fine_input], dim=1) - - # len of the fine_history that has been added to fine_input - n_history = x_fine_history[:, -max_fine_history_length:, :].shape[1] - else: - n_history = 0 - - n_remove_from_end = 0 - # need to pad if too short (since non-causal model) - if fine_input.shape[1] < max_fine_input_length: - n_remove_from_end = max_fine_input_length - fine_input.shape[1] - fine_input = F.pad(fine_input, (0, 0, 0, n_remove_from_end), mode="constant", value=codebook_size) - - # we can be lazy about fractional loop and just keep overwriting codebooks. - # seems that coarse_output.shape[1] - (max_fine_input_length - n_history) is equal to minus n_remove_from_end - # So if we needed to pad because too short, n_loops is always 1 (because n_remove_from_end > 0) - # If not, we loop over at least twice. - - n_loops = (coarse_output.shape[1] - (max_fine_input_length - n_history)) / max_fine_history_length - n_loops = int(np.ceil(n_loops)) - n_loops = max(0, n_loops) + 1 - - for n_outer in range(n_loops): - start_idx = min([n_outer * max_fine_history_length, fine_input.shape[1] - max_fine_input_length]) - - start_fill_idx = min( - [n_history + n_outer * max_fine_history_length, fine_input.shape[1] - max_fine_history_length] - ) - rel_start_fill_idx = start_fill_idx - start_idx - input_buffer = fine_input[:, start_idx : start_idx + max_fine_input_length, :] - for n_inner in range(n_coarse, fine_generation_config.n_fine_codebooks): - logits = self.forward(n_inner, input_buffer).logits - if temperature is None or temperature == 1.0: - relevant_logits = logits[:, rel_start_fill_idx:, :codebook_size] - codebook_preds = torch.argmax(relevant_logits, -1) - else: - relevant_logits = logits[:, :, :codebook_size] / temperature - # apply softmax - probs = F.softmax(relevant_logits, dim=-1)[:, rel_start_fill_idx:max_fine_input_length] - # reshape to 2D: (batch_size, seq_len, codebook_size) -> (batch_size*seq_len, codebook_size) - probs = probs.reshape((-1, codebook_size)) - # multinomial then reshape : (batch_size*seq_len)-> (batch_size,seq_len) - codebook_preds = torch.multinomial(probs, num_samples=1).view(batch_size, -1) - codebook_preds = codebook_preds.to(torch.int32) - input_buffer[:, rel_start_fill_idx:, n_inner] = codebook_preds - del logits, codebook_preds - - # transfer into fine_input - for n_inner in range(n_coarse, fine_generation_config.n_fine_codebooks): - fine_input[ - :, start_fill_idx : start_fill_idx + (max_fine_input_length - rel_start_fill_idx), n_inner - ] = input_buffer[:, rel_start_fill_idx:, n_inner] - del input_buffer - - fine_input = fine_input.transpose(1, 2)[:, :, n_history:] - if n_remove_from_end > 0: - fine_input = fine_input[:, :, :-n_remove_from_end] - - if fine_input.shape[-1] != coarse_output.shape[-2]: - raise ValueError("input and output should have the same seq_len") - - return fine_input - - -@add_start_docstrings( - """ - The full Bark model, a text-to-speech model composed of 4 sub-models: - - [`BarkSemanticModel`] (also referred to as the 'text' model): a causal auto-regressive transformer model that - takes - as input tokenized text, and predicts semantic text tokens that capture the meaning of the text. - - [`BarkCoarseModel`] (also refered to as the 'coarse acoustics' model), also a causal autoregressive transformer, - that takes into input the results of the last model. It aims at regressing the first two audio codebooks necessary - to `encodec`. - - [`BarkFineModel`] (the 'fine acoustics' model), this time a non-causal autoencoder transformer, which iteratively - predicts the last codebooks based on the sum of the previous codebooks embeddings. - - having predicted all the codebook channels from the [`EncodecModel`], Bark uses it to decode the output audio - array. - - It should be noted that each of the first three modules can support conditional speaker embeddings to condition the - output sound according to specific predefined voice. - """, - BARK_START_DOCSTRING, -) -class BarkModel(BarkPreTrainedModel): - config_class = BarkConfig - - def __init__(self, config): - super().__init__(config) - - self.semantic = BarkSemanticModel(config.semantic_config) - self.coarse_acoustics = BarkCoarseModel(config.coarse_acoustics_config) - self.fine_acoustics = BarkFineModel(config.fine_acoustics_config) - - self.codec_model = AutoModel.from_config(config.codec_config) - - self.config = config - - @property - def device(self) -> torch.device: - """ - `torch.device`: The device on which the module is (assuming that all the module parameters are on the same - device). - """ - # for bark_model, device must be verified on its sub-models - # if has _hf_hook, has been offloaded so the device has to be found in the hook - if not hasattr(self.semantic, "_hf_hook"): - return get_parameter_device(self) - for module in self.semantic.modules(): - if ( - hasattr(module, "_hf_hook") - and hasattr(module._hf_hook, "execution_device") - and module._hf_hook.execution_device is not None - ): - return torch.device(module._hf_hook.execution_device) - - def enable_cpu_offload(self, gpu_id: Optional[int] = 0): - r""" - Offloads all sub-models to CPU using accelerate, reducing memory usage with a low impact on performance. This - method moves one whole sub-model at a time to the GPU when it is used, and the sub-model remains in GPU until - the next sub-model runs. - - Args: - gpu_id (`int`, *optional*, defaults to 0): - GPU id on which the sub-models will be loaded and offloaded. - """ - if is_accelerate_available(): - from accelerate import cpu_offload_with_hook - else: - raise ImportError("`enable_model_cpu_offload` requires `accelerate`.") - - device = torch.device(f"cuda:{gpu_id}") - - if self.device.type != "cpu": - self.to("cpu") - torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) - - # this layer is used outside the first foward pass of semantic so need to be loaded before semantic - self.semantic.input_embeds_layer, _ = cpu_offload_with_hook(self.semantic.input_embeds_layer, device) - - hook = None - for cpu_offloaded_model in [ - self.semantic, - self.coarse_acoustics, - self.fine_acoustics, - ]: - _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook) - - self.fine_acoustics_hook = hook - - _, hook = cpu_offload_with_hook(self.codec_model, device, prev_module_hook=hook) - - # We'll offload the last model manually. - self.codec_model_hook = hook - - def codec_decode(self, fine_output): - """Turn quantized audio codes into audio array using encodec.""" - - fine_output = fine_output.transpose(0, 1) - emb = self.codec_model.quantizer.decode(fine_output) - out = self.codec_model.decoder(emb) - audio_arr = out.squeeze(1) # squeeze the codebook dimension - - return audio_arr - - @torch.no_grad() - def generate( - self, - input_ids: Optional[torch.Tensor] = None, - history_prompt: Optional[Dict[str, torch.Tensor]] = None, - **kwargs, - ) -> torch.LongTensor: - """ - Generates audio from an input prompt and an additional optional `Bark` speaker prompt. - - Args: - input_ids (`Optional[torch.Tensor]` of shape (batch_size, seq_len), *optional*): - Input ids. Will be truncated up to 256 tokens. Note that the output audios will be as long as the - longest generation among the batch. - history_prompt (`Optional[Dict[str,torch.Tensor]]`, *optional*): - Optional `Bark` speaker prompt. Note that for now, this model takes only one speaker prompt per batch. - kwargs (*optional*): Remaining dictionary of keyword arguments. Keyword arguments are of two types: - - - Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model. - - With a *semantic_*, *coarse_*, *fine_* prefix, they will be input for the `generate` method of the - semantic, coarse and fine respectively. It has the priority over the keywords without a prefix. - - This means you can, for example, specify a generation strategy for all sub-models except one. - Returns: - torch.LongTensor: Output generated audio. - - Example: - - ```python - >>> from transformers import AutoProcessor, BarkModel - - >>> processor = AutoProcessor.from_pretrained("suno/bark-small") - >>> model = BarkModel.from_pretrained("suno/bark-small") - - >>> # To add a voice preset, you can pass `voice_preset` to `BarkProcessor.__call__(...)` - >>> voice_preset = "v2/en_speaker_6" - - >>> inputs = processor("Hello, my dog is cute, I need him in my life", voice_preset=voice_preset) - - >>> audio_array = model.generate(**inputs, semantic_max_new_tokens=100) - >>> audio_array = audio_array.cpu().numpy().squeeze() - ``` - """ - # TODO (joao):workaround until nested generation config is compatible with PreTrained Model - # todo: dict - semantic_generation_config = BarkSemanticGenerationConfig(**self.generation_config.semantic_config) - coarse_generation_config = BarkCoarseGenerationConfig(**self.generation_config.coarse_acoustics_config) - fine_generation_config = BarkFineGenerationConfig(**self.generation_config.fine_acoustics_config) - - kwargs_semantic = { - # if "attention_mask" is set, it should not be passed to CoarseModel and FineModel - "attention_mask": kwargs.pop("attention_mask", None) - } - kwargs_coarse = {} - kwargs_fine = {} - for key, value in kwargs.items(): - if key.startswith("semantic_"): - key = key[len("semantic_") :] - kwargs_semantic[key] = value - elif key.startswith("coarse_"): - key = key[len("coarse_") :] - kwargs_coarse[key] = value - elif key.startswith("fine_"): - key = key[len("fine_") :] - kwargs_fine[key] = value - else: - # If the key is already in a specific config, then it's been set with a - # submodules specific value and we don't override - if key not in kwargs_semantic: - kwargs_semantic[key] = value - if key not in kwargs_coarse: - kwargs_coarse[key] = value - if key not in kwargs_fine: - kwargs_fine[key] = value - - # 1. Generate from the semantic model - semantic_output = self.semantic.generate( - input_ids, - history_prompt=history_prompt, - semantic_generation_config=semantic_generation_config, - **kwargs_semantic, - ) - - # 2. Generate from the coarse model - coarse_output = self.coarse_acoustics.generate( - semantic_output, - history_prompt=history_prompt, - semantic_generation_config=semantic_generation_config, - coarse_generation_config=coarse_generation_config, - codebook_size=self.generation_config.codebook_size, - **kwargs_coarse, - ) - - # 3. "generate" from the fine model - output = self.fine_acoustics.generate( - coarse_output, - history_prompt=history_prompt, - semantic_generation_config=semantic_generation_config, - coarse_generation_config=coarse_generation_config, - fine_generation_config=fine_generation_config, - codebook_size=self.generation_config.codebook_size, - **kwargs_fine, - ) - - if getattr(self, "fine_acoustics_hook", None) is not None: - # Manually offload fine_acoustics to CPU - # and load codec_model to GPU - # since bark doesn't use codec_model forward pass - self.fine_acoustics_hook.offload() - self.codec_model = self.codec_model.to(self.device) - - # 4. Decode the output and generate audio array - audio = self.codec_decode(output) - - if getattr(self, "codec_model_hook", None) is not None: - # Offload codec_model to CPU - self.codec_model_hook.offload() - - return audio diff --git a/spaces/yl12053/so-vits-4.1-Grass-Wonder/auto_slicer.py b/spaces/yl12053/so-vits-4.1-Grass-Wonder/auto_slicer.py deleted file mode 100644 index 090d913455f8153b7f39ee85aba068b3ba28230a..0000000000000000000000000000000000000000 --- a/spaces/yl12053/so-vits-4.1-Grass-Wonder/auto_slicer.py +++ /dev/null @@ -1,106 +0,0 @@ -import os -import numpy as np -import librosa -import soundfile as sf -from modules.slicer2 import Slicer - -class AutoSlicer: - def __init__(self): - self.slicer_params = { - "threshold": -40, - "min_length": 5000, - "min_interval": 300, - "hop_size": 10, - "max_sil_kept": 500, - } - self.original_min_interval = self.slicer_params["min_interval"] - - def auto_slice(self, filename, input_dir, output_dir, max_sec): - audio, sr = librosa.load(os.path.join(input_dir, filename), sr=None, mono=False) - slicer = Slicer(sr=sr, **self.slicer_params) - chunks = slicer.slice(audio) - files_to_delete = [] - for i, chunk in enumerate(chunks): - if len(chunk.shape) > 1: - chunk = chunk.T - output_filename = f"{os.path.splitext(filename)[0]}_{i}" - output_filename = "".join(c for c in output_filename if c.isascii() or c == "_") + ".wav" - output_filepath = os.path.join(output_dir, output_filename) - sf.write(output_filepath, chunk, sr) - #Check and re-slice audio that more than max_sec. - while True: - new_audio, sr = librosa.load(output_filepath, sr=None, mono=False) - if librosa.get_duration(y=new_audio, sr=sr) <= max_sec: - break - self.slicer_params["min_interval"] = self.slicer_params["min_interval"] // 2 - if self.slicer_params["min_interval"] >= self.slicer_params["hop_size"]: - new_chunks = Slicer(sr=sr, **self.slicer_params).slice(new_audio) - for j, new_chunk in enumerate(new_chunks): - if len(new_chunk.shape) > 1: - new_chunk = new_chunk.T - new_output_filename = f"{os.path.splitext(output_filename)[0]}_{j}.wav" - sf.write(os.path.join(output_dir, new_output_filename), new_chunk, sr) - files_to_delete.append(output_filepath) - else: - break - self.slicer_params["min_interval"] = self.original_min_interval - for file_path in files_to_delete: - if os.path.exists(file_path): - os.remove(file_path) - - def merge_short(self, output_dir, max_sec, min_sec): - short_files = [] - for filename in os.listdir(output_dir): - filepath = os.path.join(output_dir, filename) - if filename.endswith(".wav"): - audio, sr = librosa.load(filepath, sr=None, mono=False) - duration = librosa.get_duration(y=audio, sr=sr) - if duration < min_sec: - short_files.append((filepath, audio, duration)) - short_files.sort(key=lambda x: x[2], reverse=True) - merged_audio = [] - current_duration = 0 - for filepath, audio, duration in short_files: - if current_duration + duration <= max_sec: - merged_audio.append(audio) - current_duration += duration - os.remove(filepath) - else: - if merged_audio: - output_audio = np.concatenate(merged_audio, axis=-1) - if len(output_audio.shape) > 1: - output_audio = output_audio.T - output_filename = f"merged_{len(os.listdir(output_dir))}.wav" - sf.write(os.path.join(output_dir, output_filename), output_audio, sr) - merged_audio = [audio] - current_duration = duration - os.remove(filepath) - if merged_audio and current_duration >= min_sec: - output_audio = np.concatenate(merged_audio, axis=-1) - if len(output_audio.shape) > 1: - output_audio = output_audio.T - output_filename = f"merged_{len(os.listdir(output_dir))}.wav" - sf.write(os.path.join(output_dir, output_filename), output_audio, sr) - - def slice_count(self, input_dir, output_dir): - orig_duration = final_duration = 0 - for file in os.listdir(input_dir): - if file.endswith(".wav"): - _audio, _sr = librosa.load(os.path.join(input_dir, file), sr=None, mono=False) - orig_duration += librosa.get_duration(y=_audio, sr=_sr) - wav_files = [file for file in os.listdir(output_dir) if file.endswith(".wav")] - num_files = len(wav_files) - max_duration = -1 - min_duration = float("inf") - for file in wav_files: - file_path = os.path.join(output_dir, file) - audio, sr = librosa.load(file_path, sr=None, mono=False) - duration = librosa.get_duration(y=audio, sr=sr) - final_duration += float(duration) - if duration > max_duration: - max_duration = float(duration) - if duration < min_duration: - min_duration = float(duration) - return num_files, max_duration, min_duration, orig_duration, final_duration - - diff --git a/spaces/yl12053/so-vits-4.1-Grass-Wonder/vdecoder/hifiganwithsnake/models.py b/spaces/yl12053/so-vits-4.1-Grass-Wonder/vdecoder/hifiganwithsnake/models.py deleted file mode 100644 index 64f0e4dc985afd7993f78bb1b9743139990fa4d1..0000000000000000000000000000000000000000 --- a/spaces/yl12053/so-vits-4.1-Grass-Wonder/vdecoder/hifiganwithsnake/models.py +++ /dev/null @@ -1,518 +0,0 @@ -import os -import json -from .env import AttrDict -import numpy as np -import torch -import torch.nn.functional as F -import torch.nn as nn -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from .utils import init_weights, get_padding -from vdecoder.hifiganwithsnake.alias.act import SnakeAlias - -LRELU_SLOPE = 0.1 - - -def load_model(model_path, device='cuda'): - config_file = os.path.join(os.path.split(model_path)[0], 'config.json') - with open(config_file) as f: - data = f.read() - - global h - json_config = json.loads(data) - h = AttrDict(json_config) - - generator = Generator(h).to(device) - - cp_dict = torch.load(model_path) - generator.load_state_dict(cp_dict['generator']) - generator.eval() - generator.remove_weight_norm() - del cp_dict - return generator, h - - -class ResBlock1(torch.nn.Module): - def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.h = h - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - self.num_layers = len(self.convs1) + len(self.convs2) - self.activations = nn.ModuleList([ - SnakeAlias(channels) for _ in range(self.num_layers) - ]) - - def forward(self, x): - acts1, acts2 = self.activations[::2], self.activations[1::2] - for c1, c2, a1, a2 in zip(self.convs1, self.convs2, acts1, acts2): - xt = a1(x) - xt = c1(xt) - xt = a2(xt) - xt = c2(xt) - x = xt + x - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, h, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.h = h - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - self.num_layers = len(self.convs) - self.activations = nn.ModuleList([ - SnakeAlias(channels) for _ in range(self.num_layers) - ]) - - def forward(self, x): - for c,a in zip(self.convs, self.activations): - xt = a(x) - xt = c(xt) - x = xt + x - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -def padDiff(x): - return F.pad(F.pad(x, (0,0,-1,1), 'constant', 0) - x, (0,0,0,-1), 'constant', 0) - -class SineGen(torch.nn.Module): - """ Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__(self, samp_rate, harmonic_num=0, - sine_amp=0.1, noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - self.flag_for_pulse = flag_for_pulse - - def _f02uv(self, f0): - # generate uv signal - uv = (f0 > self.voiced_threshold).type(torch.float32) - return uv - - def _f02sine(self, f0_values): - """ f0_values: (batchsize, length, dim) - where dim indicates fundamental tone and overtones - """ - # convert to F0 in rad. The interger part n can be ignored - # because 2 * np.pi * n doesn't affect phase - rad_values = (f0_values / self.sampling_rate) % 1 - - # initial phase noise (no noise for fundamental component) - rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2], \ - device=f0_values.device) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - - # instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad) - if not self.flag_for_pulse: - # for normal case - - # To prevent torch.cumsum numerical overflow, - # it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1. - # Buffer tmp_over_one_idx indicates the time step to add -1. - # This will not change F0 of sine because (x-1) * 2*pi = x * 2*pi - tmp_over_one = torch.cumsum(rad_values, 1) % 1 - tmp_over_one_idx = (padDiff(tmp_over_one)) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - - sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1) - * 2 * np.pi) - else: - # If necessary, make sure that the first time step of every - # voiced segments is sin(pi) or cos(0) - # This is used for pulse-train generation - - # identify the last time step in unvoiced segments - uv = self._f02uv(f0_values) - uv_1 = torch.roll(uv, shifts=-1, dims=1) - uv_1[:, -1, :] = 1 - u_loc = (uv < 1) * (uv_1 > 0) - - # get the instantanouse phase - tmp_cumsum = torch.cumsum(rad_values, dim=1) - # different batch needs to be processed differently - for idx in range(f0_values.shape[0]): - temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :] - temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :] - # stores the accumulation of i.phase within - # each voiced segments - tmp_cumsum[idx, :, :] = 0 - tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum - - # rad_values - tmp_cumsum: remove the accumulation of i.phase - # within the previous voiced segment. - i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1) - - # get the sines - sines = torch.cos(i_phase * 2 * np.pi) - return sines - - def forward(self, f0): - """ sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, - device=f0.device) - # fundamental component - fn = torch.multiply(f0, torch.FloatTensor([[range(1, self.harmonic_num + 2)]]).to(f0.device)) - - # generate sine waveforms - sine_waves = self._f02sine(fn) * self.sine_amp - - # generate uv signal - # uv = torch.ones(f0.shape) - # uv = uv * (f0 > self.voiced_threshold) - uv = self._f02uv(f0) - - # noise: for unvoiced should be similar to sine_amp - # std = self.sine_amp/3 -> max value ~ self.sine_amp - # . for voiced regions is self.noise_std - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - - # first: set the unvoiced part to 0 by uv - # then: additive noise - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """ SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__(self, sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - - # to produce sine waveforms - self.l_sin_gen = SineGen(sampling_rate, harmonic_num, - sine_amp, add_noise_std, voiced_threshod) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x): - """ - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - """ - # source for harmonic branch - sine_wavs, uv, _ = self.l_sin_gen(x) - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - - # source for noise branch, in the same shape as uv - noise = torch.randn_like(uv) * self.sine_amp / 3 - return sine_merge, noise, uv - - -class Generator(torch.nn.Module): - def __init__(self, h): - super(Generator, self).__init__() - self.h = h - - self.num_kernels = len(h["resblock_kernel_sizes"]) - self.num_upsamples = len(h["upsample_rates"]) - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(h["upsample_rates"])) - self.m_source = SourceModuleHnNSF( - sampling_rate=h["sampling_rate"], - harmonic_num=8) - self.noise_convs = nn.ModuleList() - self.conv_pre = weight_norm(Conv1d(h["inter_channels"], h["upsample_initial_channel"], 7, 1, padding=3)) - resblock = ResBlock1 if h["resblock"] == '1' else ResBlock2 - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(h["upsample_rates"], h["upsample_kernel_sizes"])): - c_cur = h["upsample_initial_channel"] // (2 ** (i + 1)) - self.ups.append(weight_norm( - ConvTranspose1d(h["upsample_initial_channel"] // (2 ** i), h["upsample_initial_channel"] // (2 ** (i + 1)), - k, u, padding=(k - u + 1) // 2))) - if i + 1 < len(h["upsample_rates"]): # - stride_f0 = np.prod(h["upsample_rates"][i + 1:]) - self.noise_convs.append(Conv1d( - 1, c_cur, kernel_size=stride_f0 * 2, stride=stride_f0, padding=(stride_f0+ 1) // 2)) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - self.resblocks = nn.ModuleList() - self.snakes = nn.ModuleList() - for i in range(len(self.ups)): - ch = h["upsample_initial_channel"] // (2 ** (i + 1)) - self.snakes.append(SnakeAlias(h["upsample_initial_channel"] // (2 ** (i)))) - for j, (k, d) in enumerate(zip(h["resblock_kernel_sizes"], h["resblock_dilation_sizes"])): - self.resblocks.append(resblock(h, ch, k, d)) - - self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3)) - self.ups.apply(init_weights) - self.conv_post.apply(init_weights) - self.snake_post = SnakeAlias(ch) - self.cond = nn.Conv1d(h['gin_channels'], h['upsample_initial_channel'], 1) - - def forward(self, x, f0, g=None): - # print(1,x.shape,f0.shape,f0[:, None].shape) - f0 = self.f0_upsamp(f0[:, None]).transpose(1, 2) # bs,n,t - # print(2,f0.shape) - har_source, noi_source, uv = self.m_source(f0) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - x = x + self.cond(g) - # print(124,x.shape,har_source.shape) - for i in range(self.num_upsamples): - x = self.snakes[i](x) - # print(3,x.shape) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - # print(4,x_source.shape,har_source.shape,x.shape) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = self.snake_post(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - remove_weight_norm(self.conv_pre) - remove_weight_norm(self.conv_post) - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, periods=None): - super(MultiPeriodDiscriminator, self).__init__() - self.periods = periods if periods is not None else [2, 3, 5, 7, 11] - self.discriminators = nn.ModuleList() - for period in self.periods: - self.discriminators.append(DiscriminatorP(period)) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - fmap_rs.append(fmap_r) - y_d_gs.append(y_d_g) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 128, 15, 1, padding=7)), - norm_f(Conv1d(128, 128, 41, 2, groups=4, padding=20)), - norm_f(Conv1d(128, 256, 41, 2, groups=16, padding=20)), - norm_f(Conv1d(256, 512, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(512, 1024, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 1, groups=16, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiScaleDiscriminator(torch.nn.Module): - def __init__(self): - super(MultiScaleDiscriminator, self).__init__() - self.discriminators = nn.ModuleList([ - DiscriminatorS(use_spectral_norm=True), - DiscriminatorS(), - DiscriminatorS(), - ]) - self.meanpools = nn.ModuleList([ - AvgPool1d(4, 2, padding=2), - AvgPool1d(4, 2, padding=2) - ]) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - if i != 0: - y = self.meanpools[i - 1](y) - y_hat = self.meanpools[i - 1](y_hat) - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - fmap_rs.append(fmap_r) - y_d_gs.append(y_d_g) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -def feature_loss(fmap_r, fmap_g): - loss = 0 - for dr, dg in zip(fmap_r, fmap_g): - for rl, gl in zip(dr, dg): - loss += torch.mean(torch.abs(rl - gl)) - - return loss * 2 - - -def discriminator_loss(disc_real_outputs, disc_generated_outputs): - loss = 0 - r_losses = [] - g_losses = [] - for dr, dg in zip(disc_real_outputs, disc_generated_outputs): - r_loss = torch.mean((1 - dr) ** 2) - g_loss = torch.mean(dg ** 2) - loss += (r_loss + g_loss) - r_losses.append(r_loss.item()) - g_losses.append(g_loss.item()) - - return loss, r_losses, g_losses - - -def generator_loss(disc_outputs): - loss = 0 - gen_losses = [] - for dg in disc_outputs: - l = torch.mean((1 - dg) ** 2) - gen_losses.append(l) - loss += l - - return loss, gen_losses diff --git a/spaces/yl12053/so-vits-4.1-Kitasan-Black/vencoder/encoder.py b/spaces/yl12053/so-vits-4.1-Kitasan-Black/vencoder/encoder.py deleted file mode 100644 index 2cf5678533cf16f2e81248535d35e4c3c1c5799a..0000000000000000000000000000000000000000 --- a/spaces/yl12053/so-vits-4.1-Kitasan-Black/vencoder/encoder.py +++ /dev/null @@ -1,12 +0,0 @@ -class SpeechEncoder(object): - def __init__(self,vec_path = "pretrain/checkpoint_best_legacy_500.pt",device=None): - self.model = None #This is Model - self.hidden_dim = 768 - pass - - def encoder(self,wav): - ''' - input: wav:[batchsize,signal_length] - output: embedding:[batchsize,hidden_dim,wav_frame] - ''' - pass \ No newline at end of file diff --git a/spaces/ysharma/llamas/app.py b/spaces/ysharma/llamas/app.py deleted file mode 100644 index 8fa95cc12a4c1afa47daa8d5fbd65ea2ec295284..0000000000000000000000000000000000000000 --- a/spaces/ysharma/llamas/app.py +++ /dev/null @@ -1,147 +0,0 @@ -import time - -from theme_dropdown import create_theme_dropdown # noqa: F401 - -import gradio as gr - -dropdown, js = create_theme_dropdown() - -with gr.Blocks(theme='ysharma/llamas') as demo: - with gr.Row().style(equal_height=True): - with gr.Column(scale=10): - gr.Markdown( - """ - # Theme preview: `llamas` - To use this theme, set `theme='ysharma/llamas'` in `gr.Blocks()` or `gr.Interface()`. - You can append an `@` and a semantic version expression, e.g. @>=1.0.0,<2.0.0 to pin to a given version - of this theme. - """ - ) - with gr.Column(scale=3): - with gr.Box(): - dropdown.render() - toggle_dark = gr.Button(value="Toggle Dark").style(full_width=True) - - dropdown.change(None, dropdown, None, _js=js) - toggle_dark.click( - None, - _js=""" - () => { - document.body.classList.toggle('dark'); - document.querySelector('gradio-app').style.backgroundColor = 'var(--color-background-primary)' - } - """, - ) - - name = gr.Textbox( - label="Name", - info="Full name, including middle name. No special characters.", - placeholder="John Doe", - value="John Doe", - interactive=True, - ) - - with gr.Row(): - slider1 = gr.Slider(label="Slider 1") - slider2 = gr.Slider(label="Slider 2") - gr.CheckboxGroup(["A", "B", "C"], label="Checkbox Group") - - with gr.Row(): - with gr.Column(variant="panel", scale=1): - gr.Markdown("## Panel 1") - radio = gr.Radio( - ["A", "B", "C"], - label="Radio", - info="Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.", - ) - drop = gr.Dropdown(["Option 1", "Option 2", "Option 3"], show_label=False) - drop_2 = gr.Dropdown( - ["Option A", "Option B", "Option C"], - multiselect=True, - value=["Option A"], - label="Dropdown", - interactive=True, - ) - check = gr.Checkbox(label="Go") - with gr.Column(variant="panel", scale=2): - img = gr.Image( - "https://gradio.app/assets/img/header-image.jpg", label="Image" - ).style(height=320) - with gr.Row(): - go_btn = gr.Button("Go", label="Primary Button", variant="primary") - clear_btn = gr.Button( - "Clear", label="Secondary Button", variant="secondary" - ) - - def go(*args): - time.sleep(3) - return "https://gradio.app/assets/img/header-image.jpg" - - go_btn.click(go, [radio, drop, drop_2, check, name], img, api_name="go") - - def clear(): - time.sleep(0.2) - return None - - clear_btn.click(clear, None, img) - - with gr.Row(): - btn1 = gr.Button("Button 1").style(size="sm") - btn2 = gr.UploadButton().style(size="sm") - stop_btn = gr.Button("Stop", label="Stop Button", variant="stop").style( - size="sm" - ) - - with gr.Row(): - gr.Dataframe(value=[[1, 2, 3], [4, 5, 6], [7, 8, 9]], label="Dataframe") - gr.JSON( - value={"a": 1, "b": 2, "c": {"test": "a", "test2": [1, 2, 3]}}, label="JSON" - ) - gr.Label(value={"cat": 0.7, "dog": 0.2, "fish": 0.1}) - gr.File() - with gr.Row(): - gr.ColorPicker() - gr.Video("https://gradio-static-files.s3.us-west-2.amazonaws.com/world.mp4") - gr.Gallery( - [ - ( - "https://gradio-static-files.s3.us-west-2.amazonaws.com/lion.jpg", - "lion", - ), - ( - "https://gradio-static-files.s3.us-west-2.amazonaws.com/logo.png", - "logo", - ), - ( - "https://gradio-static-files.s3.us-west-2.amazonaws.com/tower.jpg", - "tower", - ), - ] - ).style(height="200px", grid=2) - - with gr.Row(): - with gr.Column(scale=2): - chatbot = gr.Chatbot([("Hello", "Hi")], label="Chatbot") - chat_btn = gr.Button("Add messages") - - def chat(history): - time.sleep(2) - yield [["How are you?", "I am good."]] - - chat_btn.click( - lambda history: history - + [["How are you?", "I am good."]] - + (time.sleep(2) or []), - chatbot, - chatbot, - ) - with gr.Column(scale=1): - with gr.Accordion("Advanced Settings"): - gr.Markdown("Hello") - gr.Number(label="Chatbot control 1") - gr.Number(label="Chatbot control 2") - gr.Number(label="Chatbot control 3") - - -if __name__ == "__main__": - demo.queue().launch() diff --git a/spaces/zaursamedov1/llama2-qlora-finetunined-NER/index.html b/spaces/zaursamedov1/llama2-qlora-finetunined-NER/index.html deleted file mode 100644 index 58275de3b1c343a98420342baa076b9baaafa157..0000000000000000000000000000000000000000 --- a/spaces/zaursamedov1/llama2-qlora-finetunined-NER/index.html +++ /dev/null @@ -1,19 +0,0 @@ - - - - - - My static Space - - - -
    -

    Welcome to your static Space!

    -

    You can modify this app directly by editing index.html in the Files and versions tab.

    -

    - Also don't forget to check the - Spaces documentation. -

    -
    - - diff --git a/spaces/zideliu/styledrop/README.md b/spaces/zideliu/styledrop/README.md deleted file mode 100644 index f33b69dda290cd73299e39fb022444368acecf4c..0000000000000000000000000000000000000000 --- a/spaces/zideliu/styledrop/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Styledrop -emoji: 🌍 -colorFrom: green -colorTo: indigo -sdk: docker -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/zwhe99/MAPS-mt/model/openai/translate.py b/spaces/zwhe99/MAPS-mt/model/openai/translate.py deleted file mode 100644 index 20511a62a784150c2f282272ae1df32dcc64cb6a..0000000000000000000000000000000000000000 --- a/spaces/zwhe99/MAPS-mt/model/openai/translate.py +++ /dev/null @@ -1,164 +0,0 @@ -import os -import re -import openai -import argparse -import tiktoken -from tqdm import tqdm -import backoff - -api_key = os.environ["api_key"] - -model2max_context = { - "text-davinci-003": 4097, -} - -class OutOfQuotaException(Exception): - "Raised when the key exceeded the current quota" - def __init__(self, key, cause=None): - super().__init__(f"No quota for key: {key}") - self.key = key - self.cause = cause - - def __str__(self): - if self.cause: - return f"{super().__str__()}. Caused by {self.cause}" - else: - return super().__str__() - -class AccessTerminatedException(Exception): - "Raised when the key has been terminated" - def __init__(self, key, cause=None): - super().__init__(f"Access terminated key: {key}") - self.key = key - self.cause = cause - - def __str__(self): - if self.cause: - return f"{super().__str__()}. Caused by {self.cause}" - else: - return super().__str__() - -def num_tokens_from_string(string: str, model_name: str) -> int: - """Returns the number of tokens in a text string.""" - encoding = tiktoken.encoding_for_model(model_name) - num_tokens = len(encoding.encode(string)) - return num_tokens - -def generate_batch(lst, batch_size): - """ Yields batch of specified size """ - for i in range(0, len(lst), batch_size): - yield lst[i : i + batch_size] - -def post_procress(s: str): - res = s.strip().replace("\n", " ") - if res == "": - res = " " - return res - -@backoff.on_exception(backoff.expo, (openai.error.OpenAIError, openai.error.RateLimitError, openai.error.APIError, openai.error.ServiceUnavailableError, openai.error.APIConnectionError), max_tries=5) -def translate_with_backoff(smp, model_name, max_tokens, api_key, temperature): - try: - response = openai.Completion.create( - model=model_name, - prompt=smp, - temperature=temperature, - max_tokens=max_tokens, - api_key=api_key, - ) - gen = response.choices[0].text - - gen = post_procress(gen) - return gen - - except openai.error.RateLimitError as e: - if "You exceeded your current quota, please check your plan and billing details" in e.user_message: - raise OutOfQuotaException(api_key) - elif "Your access was terminated due to violation of our policies" in e.user_message: - raise AccessTerminatedException(api_key) - else: - raise e - -@backoff.on_exception(backoff.expo, (openai.error.OpenAIError, openai.error.RateLimitError, openai.error.APIError, openai.error.ServiceUnavailableError, openai.error.APIConnectionError), max_tries=5) -def batch_translate_with_backoff(smp_lst, model_name, max_tokens, api_key, temperature): - try: - response = openai.Completion.create( - model=model_name, - prompt=smp_lst, - temperature=temperature, - max_tokens=max_tokens, - api_key=api_key, - ) - - gen_lst = [""] * len(smp_lst) - for choice in response.choices: - gen = choice.text - gen = post_procress(gen) # Assuming your post_procress function can handle a single text - gen_lst[choice.index] = gen - - return gen_lst - - except openai.error.RateLimitError as e: - if "You exceeded your current quota, please check your plan and billing details" in e.user_message: - raise OutOfQuotaException(api_key) - elif "Your access was terminated due to violation of our policies" in e.user_message: - raise AccessTerminatedException(api_key) - else: - raise e - -def parse_args(): - parser = argparse.ArgumentParser("", formatter_class=argparse.ArgumentDefaultsHelpFormatter) - - parser.add_argument("--model-name", type=str, required=True, - help="Model name") - parser.add_argument("-i", "--input", type=str, required=True, - help="Input file path") - parser.add_argument("-o", "--output", type=str, required=True, - help="Output file path") - parser.add_argument("--temperature", type=float, default=0, - help="Sampling temperature") - - return parser.parse_args() - -def main(): - args = parse_args() - model_name = args.model_name - in_file_path = args.input - out_file_path = args.output - temperature = args.temperature - - # get input samples - input_file_path = os.path.join(in_file_path) - with open(input_file_path, 'r') as in_file: - in_file_str = in_file.read() - samples = in_file_str.strip().split("\n\n\n") - total = len(samples) - - # create or check output file - num_done = 0 - output_file_path = os.path.join(out_file_path) - if os.path.exists(output_file_path): - with open(output_file_path, 'r') as out_file: - num_done = len(out_file.readlines()) - - # translate - pattern = re.compile(r'\d\d\d\d\n') - with tqdm(total=total) as pbar: - pbar.update(num_done) - - for to_be_translated_idx, to_be_translated_smp in enumerate(samples[num_done: ]): - assert len(pattern.findall(to_be_translated_smp)) >= 1 - to_be_translated_smp = to_be_translated_smp.replace(f"{to_be_translated_idx:04}\n", "", 1).strip() - len_prompt = num_tokens_from_string(to_be_translated_smp, model_name) - gen = translate_with_backoff( - to_be_translated_smp, - model_name=model_name, - max_tokens=model2max_context[model_name]-len_prompt, - api_key=api_key, - temperature=temperature - ) - with open(output_file_path, 'a') as fout: - fout.write(f"{gen}\n") - pbar.update(1) - -if __name__ == "__main__": - main() \ No newline at end of file